From 934992a461098b54b1d905b3844d4f2678cdca17 Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Thu, 25 Feb 2021 17:40:31 -0800 Subject: [PATCH 01/10] Drop sha256sums and README.md Signed-off-by: Arvind Iyengar --- README.md | 7 ------- sha256sum/rke2-canal/rke2-canal.sum | 9 --------- sha256sum/rke2-coredns/rke2-coredns.sum | 2 -- sha256sum/rke2-ingress-nginx/rke2-ingress-nginx.sum | 2 -- sha256sum/rke2-kube-proxy/rke2-kube-proxy.sum | 8 -------- sha256sum/rke2-metrics-server/rke2-metrics-server.sum | 2 -- 6 files changed, 30 deletions(-) delete mode 100644 README.md delete mode 100644 sha256sum/rke2-canal/rke2-canal.sum delete mode 100644 sha256sum/rke2-coredns/rke2-coredns.sum delete mode 100644 sha256sum/rke2-ingress-nginx/rke2-ingress-nginx.sum delete mode 100644 sha256sum/rke2-kube-proxy/rke2-kube-proxy.sum delete mode 100644 sha256sum/rke2-metrics-server/rke2-metrics-server.sum diff --git a/README.md b/README.md deleted file mode 100644 index ac1fda3..0000000 --- a/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Asset branch - -This branch is auto-generated from main-source branch, please open PRs to main-source. - -[asset](./assets) Folder contains all the helm chart artifacts. - -[charts](./charts) Folder contains all the helm chart content of the latest version for browsing purpose. \ No newline at end of file diff --git a/sha256sum/rke2-canal/rke2-canal.sum b/sha256sum/rke2-canal/rke2-canal.sum deleted file mode 100644 index 9545488..0000000 --- a/sha256sum/rke2-canal/rke2-canal.sum +++ /dev/null @@ -1,9 +0,0 @@ -df6f4924e4f542dba5947e64b9528260b681db8475167b88977464dccabb3651 packages/rke2-canal/charts/values.yaml -04a3caab7086b0fef6894bf8f9c29d7eecefad1cf3eaed4742e9305cfc9f6b8a packages/rke2-canal/charts/templates/serviceaccount.yaml -ac1baa8b6d7b02c713b80bc38a853d7a8c70f67847f08f1c5e81d4157ccf85e4 packages/rke2-canal/charts/templates/rbac.yaml -707744e262fccd7ff3e9e138be55aebedba32a29206deb0948d97b0ea9f0f08d packages/rke2-canal/charts/templates/crd.yaml -ebbce05e2f4f53227016183e3638bbd576897598b91639ce3942bc738e7ec15c packages/rke2-canal/charts/templates/config.yaml -bf4bfa1018dda5895521ac50a4a6102d6f3a1659e9c52a37b70b4b8001452ea1 packages/rke2-canal/charts/templates/NOTES.txt -90f1d8a3a944e4b0fde9f2842b2a3531a95102d5ce3e3a893652e200c26b947f packages/rke2-canal/charts/templates/_helpers.tpl -7878a574ad9f673266ce585d3688428796e3e182a7f672026258ef6b92393dd0 packages/rke2-canal/charts/templates/daemonset.yaml -f9520bfc03135b7293e8e7fc13933331fd2e71e7f23077a0bdd814d17d2f2dc3 packages/rke2-canal/charts/Chart.yaml diff --git a/sha256sum/rke2-coredns/rke2-coredns.sum b/sha256sum/rke2-coredns/rke2-coredns.sum deleted file mode 100644 index 95bb05f..0000000 --- a/sha256sum/rke2-coredns/rke2-coredns.sum +++ /dev/null @@ -1,2 +0,0 @@ -771f526366bc9f1758d13e7807a51512fc03ad6d4ed48c162218a776292c11da packages/rke2-coredns/package.yaml -52156705d3c82cd638e0a00aa3d836ba90cecf293dec11a48ce7fac08466692f packages/rke2-coredns/rke2-coredns.patch diff --git a/sha256sum/rke2-ingress-nginx/rke2-ingress-nginx.sum b/sha256sum/rke2-ingress-nginx/rke2-ingress-nginx.sum deleted file mode 100644 index f0354a5..0000000 --- a/sha256sum/rke2-ingress-nginx/rke2-ingress-nginx.sum +++ /dev/null @@ -1,2 +0,0 @@ -3ab8fe9454ce25221d16fc687c3942744c1ddb73e1dd0ada9ddf7bc732935e3f packages/rke2-ingress-nginx/rke2-ingress-nginx.patch -816cccb4bd7850999be977f709b2ed8fb16396e3e89baeefa3237069be6a2e85 packages/rke2-ingress-nginx/package.yaml diff --git a/sha256sum/rke2-kube-proxy/rke2-kube-proxy.sum b/sha256sum/rke2-kube-proxy/rke2-kube-proxy.sum deleted file mode 100644 index 0bcb7eb..0000000 --- a/sha256sum/rke2-kube-proxy/rke2-kube-proxy.sum +++ /dev/null @@ -1,8 +0,0 @@ -134f693f1882219122df41796027b464f724089d4ab07a0af589fc9cc98083be packages/rke2-kube-proxy/charts/values.yaml -d1de4d515c8b9d215a9410a4c7ce04b0593583f9edaa16ea3a5b939ed6d44560 packages/rke2-kube-proxy/charts/templates/serviceaccount.yaml -3ea61d8092a4c6841e1a28d9b5db86110a263b693fb2845679cd2fc1891b5763 packages/rke2-kube-proxy/charts/templates/rbac.yaml -b75c849a1d9d324710abf0972e3d64392b45a7fca8a09dbe056390699eecf659 packages/rke2-kube-proxy/charts/templates/config.yaml -41b28ddbe82a714e88b3e3f62bd1f96b794f02384a560c7d21b4a0d680400451 packages/rke2-kube-proxy/charts/templates/NOTES.txt -00e5a2a05f04e3a96f0d7b68516ca830bc35dae1ce8a61bbe78adc3b53f02c6d packages/rke2-kube-proxy/charts/templates/_helpers.tpl -6a1dcaed632df8e1c4478255d9fba792f4ee7a10aa5fe88861d0eed4357ca392 packages/rke2-kube-proxy/charts/templates/daemonset.yaml -eb338f6abc3affcbe1eccbbfca0aa89c8a2a9374bb5b368a529e40515fa1e36e packages/rke2-kube-proxy/charts/Chart.yaml diff --git a/sha256sum/rke2-metrics-server/rke2-metrics-server.sum b/sha256sum/rke2-metrics-server/rke2-metrics-server.sum deleted file mode 100644 index 2841321..0000000 --- a/sha256sum/rke2-metrics-server/rke2-metrics-server.sum +++ /dev/null @@ -1,2 +0,0 @@ -d70eeb3eed4cbf768778e64cb74fe0341b0ad05ced3685b82d4e62af25fe150a packages/rke2-metrics-server/rke2-metrics-server.patch -87c58bd17bd974b3956e43df568d87fc6ea7b690c68e88d9fa8d238702030062 packages/rke2-metrics-server/package.yaml From 74c4feb591dac0b0c80717c9b8faf40458c7e225 Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Thu, 25 Feb 2021 17:44:35 -0800 Subject: [PATCH 02/10] Initialize template, fix, and re-run make docs 1. Ran the following script to initialize ``` arvindiyengar: ~/Rancher/rke2-charts/src/github.com/rancher/rke2-charts $ curl -s https://raw.githubusercontent.com/aiyengar2/charts-build-scripts/add_init_steps/init.sh > /dev/null | sh Pulling in charts-build-scripts version v0.0.4 charts-build-scripts version v0.0.4 (567c991) INFO[0000] Pulling rancher/charts-build-scripts[path=templates] from upstream into templates436707958 INFO[0002] Successfully pulled new updated docs into working directory. Pulled in basic template for live into configuration.yaml and constructed charts directory Next Steps: 1. Modify the configuration.yaml with your expected setup and re-run make docs to automatically update the repository. arvindiyengar: ~/Rancher/rke2-charts/src/github.com/rancher/rke2-charts $ make docs ./bin/charts-build-scripts docs INFO[0000] Pulling rancher/charts-build-scripts[path=templates] from upstream into templates174667530 INFO[0002] Successfully pulled new updated docs into working directory. ``` 2. Updated the configuration.yaml to point at rke2-charts manually 3. Ran `make docs` Signed-off-by: Arvind Iyengar --- .gitignore | 2 ++ Makefile | 10 ++++++ README.md | 62 +++++++++++++++++++++++++++++++++++++ _config.yml | 1 + assets/README.md | 3 ++ charts/README.md | 3 ++ configuration.yaml | 8 +++++ scripts/pull-scripts | 23 ++++++++++++++ scripts/regenerate-assets | 65 +++++++++++++++++++++++++++++++++++++++ scripts/version | 4 +++ 10 files changed, 181 insertions(+) create mode 100755 .gitignore create mode 100755 Makefile create mode 100755 README.md create mode 100755 _config.yml create mode 100755 assets/README.md create mode 100755 charts/README.md create mode 100644 configuration.yaml create mode 100755 scripts/pull-scripts create mode 100755 scripts/regenerate-assets create mode 100755 scripts/version diff --git a/.gitignore b/.gitignore new file mode 100755 index 0000000..5f44260 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +bin +*.DS_Store \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100755 index 0000000..2eb544b --- /dev/null +++ b/Makefile @@ -0,0 +1,10 @@ +pull-scripts: + ./scripts/pull-scripts + +TARGETS := prepare patch charts clean sync validate rebase docs + +$(TARGETS): + @ls ./bin/charts-build-scripts 1>/dev/null 2>/dev/null || ./scripts/pull-scripts + ./bin/charts-build-scripts $@ + +.PHONY: $(TARGETS) \ No newline at end of file diff --git a/README.md b/README.md new file mode 100755 index 0000000..8fc2db3 --- /dev/null +++ b/README.md @@ -0,0 +1,62 @@ +## Live Branch + +This branch contains generated assets that have been officially released on rke2-charts.rancher.io. + +The following directory structure is expected: +```text +assets/ + / + -.tgz + ... +charts/ + + + + # Unarchived Helm chart +``` + +### Configuration + +This repository branch contains a `configuration.yaml` file that is used to specify how it interacts with other repository branches. + +#### Sync + +This branch syncs with the generated assets from the following branches: +- main-source at https://github.com/rancher/rke2-charts.git (only latest assets) + +To release a new version of a chart, please open the relevant PRs to one of these branches. + +Merging should trigger a sync workflow on pushing to these branches. + +### Cutting a Release + +In the Live branch, cutting a release requires you to run the `make sync` command. + +This command will automatically get the latest charts / resources merged into the the branches you sync with (as indicated in this branch's `configuration.yaml`) and will fail if any of those branches try to modify already released assets. + +If the `make sync` command fails, you might have to manually make changes to the contents of the Staging Branch to resolve any issues. + +Once you successfully run the `make sync` command, the logs outputted will itemize the releaseCandidateVersions picked out from the Staging branch and make exactly two changes: + +1. It will update the `Chart.yaml`'s version for each chart to drop the `-rcXX` from it + +2. It will update the `Chart.yaml`'s annotations for each chart to drop the `-rcXX` from it only for some special annotations (note: currently, the only special annotation we track is `catalog.cattle.io/auto-install`). + +Once you successfully run the `make release` command, ensure the following is true: +- The `assets/` and `charts/` directories each only have a single file contained within them: `README.md` +- The `released/assets/` directory has a .tgz file for each releaseCandidateVersion of a Chart that was created during this release. +- The `index.yaml` and `released/assets/index.yaml` both are identical and the `index.yaml`'s diff shows only two types of changes: a timestamp update or a modification of an existing URL from `assets/*` to `released/assets/*`. + +No other changes are expected. + +### Makefile + +#### Basic Commands + +`make pull-scripts`: Pulls in the version of the `charts-build-scripts` indicated in scripts. + +`make sync`: Syncs the assets in your current repository with the merged contents of all of the repository branches indicated in your configuration.yaml + +`make validate`: Validates your current repository branch against all the repository branches indicated in your configuration.yaml + +`make docs`: Pulls in the latest docs, scripts, etc. from the charts-build-scripts repository \ No newline at end of file diff --git a/_config.yml b/_config.yml new file mode 100755 index 0000000..1888c5a --- /dev/null +++ b/_config.yml @@ -0,0 +1 @@ +exclude: [charts] diff --git a/assets/README.md b/assets/README.md new file mode 100755 index 0000000..9fb85d2 --- /dev/null +++ b/assets/README.md @@ -0,0 +1,3 @@ +## Assets + +This folder contains Helm chart archives that are served from rke2-charts.rancher.io. \ No newline at end of file diff --git a/charts/README.md b/charts/README.md new file mode 100755 index 0000000..c6a14ae --- /dev/null +++ b/charts/README.md @@ -0,0 +1,3 @@ +## Charts + +This folder contains the unarchived Helm charts that are currently being served at rke2-charts.rancher.io. \ No newline at end of file diff --git a/configuration.yaml b/configuration.yaml new file mode 100644 index 0000000..56d6667 --- /dev/null +++ b/configuration.yaml @@ -0,0 +1,8 @@ +template: live + +sync: +- url: https://github.com/rancher/rke2-charts.git + branch: main-source + dropReleaseCandidates: true +helmRepo: + cname: rke2-charts.rancher.io \ No newline at end of file diff --git a/scripts/pull-scripts b/scripts/pull-scripts new file mode 100755 index 0000000..0b5835f --- /dev/null +++ b/scripts/pull-scripts @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +cd $(dirname $0) + +source ./version + +echo "Pulling in charts-build-scripts version ${CHARTS_BUILD_SCRIPT_VERSION}" + +rm -rf ../bin +cd .. + +rm -rf charts-build-scripts +git clone --depth 1 --branch $CHARTS_BUILD_SCRIPT_VERSION https://github.com/rancher/charts-build-scripts.git 2>/dev/null + +cd charts-build-scripts +./scripts/build +mv bin ../bin +cd .. + +rm -rf charts-build-scripts +chmod +x ./bin/charts-build-scripts +./bin/charts-build-scripts --version \ No newline at end of file diff --git a/scripts/regenerate-assets b/scripts/regenerate-assets new file mode 100755 index 0000000..f6472ca --- /dev/null +++ b/scripts/regenerate-assets @@ -0,0 +1,65 @@ +#!/bin/bash +set -e + +cd $(dirname $0) + +if [[ -z ${REPOSITORY} ]]; then + echo "Need to provide REPOSITORY as environment variable" + exit 1 +fi + +cd .. + +# Setup +rm -rf ./repository +mkdir -p ./repository +cd repository + +# Pull in branch +branch=$(git rev-parse --abbrev-ref HEAD) +echo "Pulling in ${REPOSITORY}@${branch}" +git clone --depth 1 --branch ${branch} ${REPOSITORY} . > /dev/null 2>&1 + +if ! (test -d assets && test -d charts); then + echo "There are no charts or assets in this repository" + cd .. + rm -rf ./repository + exit 1 +fi + +# Copy assets and charts into the new format +for package_assets in assets/*; do + cp -R ${package_assets} ../assets + package_name=$(basename -- ${package_assets}) + for asset in ${package_assets}/*; do + if [[ ${asset} =~ .*\.tgz ]]; then + # Parse structure + asset_name=$(basename -- ${asset%.*}) + chart_name=$(echo ${asset_name} | rev | cut -d- -f2- | rev) + chart_name=$(echo ${chart_name} | sed -r 's/-[[:digit:]\.]+$//') + chart_version=${asset_name#${chart_name}-} + + # Fix chart version for rc version + # e.g. 0.0.0-rc100 -> 0.0.000-rc1 to keep the drop release candidate version logic simple + if [[ ${chart_version} =~ [0-9]{2}$ ]] && [[ ${chart_version} =~ -rc ]]; then + actual_version=${chart_version%-*} + package_version=${chart_version: -2} + chart_version_without_package_version=${chart_version%${package_version}} + rc_version=${chart_version_without_package_version#${actual_version}} + chart_version=${actual_version}${package_version}${rc_version} + fi + + # Dump archives as charts + chart_path=../charts/${package_name}/${chart_name}/${chart_version} + echo "Unarchiving ${asset} to ${chart_path}" + mkdir -p ${chart_path} + tar xvzf ${asset} -C ${chart_path} --strip-components=1 > /dev/null 2>&1 + fi + done +done + +# Go back +cd .. +helm repo index --merge ./assets/index.yaml --url assets assets +mv ./assets/index.yaml ./index.yaml +rm -rf ./repository \ No newline at end of file diff --git a/scripts/version b/scripts/version new file mode 100755 index 0000000..3ca1e4d --- /dev/null +++ b/scripts/version @@ -0,0 +1,4 @@ +#!/bin/bash +set -e + +CHARTS_BUILD_SCRIPT_VERSION=v0.0.4 From cecc18cf67e7b3d5c0a3b6a549c6c03270510adb Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Thu, 25 Feb 2021 17:48:01 -0800 Subject: [PATCH 03/10] Fixup bug with regenerate-assets Will be tracked in https://github.com/rancher/charts-build-scripts/pull/19 Signed-off-by: Arvind Iyengar --- scripts/regenerate-assets | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/scripts/regenerate-assets b/scripts/regenerate-assets index f6472ca..cfcb16d 100755 --- a/scripts/regenerate-assets +++ b/scripts/regenerate-assets @@ -3,6 +3,13 @@ set -e cd $(dirname $0) +if [[ -z ${BRANCH} ]]; then + branch=$(git rev-parse --abbrev-ref HEAD) +else + echo "Using branch ${BRANCH}" + branch=${BRANCH} +fi + if [[ -z ${REPOSITORY} ]]; then echo "Need to provide REPOSITORY as environment variable" exit 1 @@ -16,7 +23,6 @@ mkdir -p ./repository cd repository # Pull in branch -branch=$(git rev-parse --abbrev-ref HEAD) echo "Pulling in ${REPOSITORY}@${branch}" git clone --depth 1 --branch ${branch} ${REPOSITORY} . > /dev/null 2>&1 From cd9d2ae6ab7605560a9ffc3b625fbdc33a5fd5f9 Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Thu, 25 Feb 2021 18:03:02 -0800 Subject: [PATCH 04/10] Migrate all assets Just needed to run: ```bash $ find assets -maxdepth 1 -mindepth 1 -type d -exec rm -rf {} \; $ find charts -maxdepth 1 -mindepth 1 -type d -exec rm -rf {} \; $ REPOSITORY=https://github.com/rancher/rke2-charts BRANCH=main ./scripts/regenerate-assets ``` Signed-off-by: Arvind Iyengar --- assets/index.yaml | 381 ------------ .../build20210223}/Chart.yaml | 16 +- .../build20210223}/templates/NOTES.txt | 0 .../build20210223}/templates/_helpers.tpl | 0 .../build20210223}/templates/config.yaml | 0 .../build20210223}/templates/crd.yaml | 0 .../build20210223}/templates/daemonset.yaml | 0 .../build20210223}/templates/rbac.yaml | 0 .../templates/serviceaccount.yaml | 0 .../build20210223}/values.yaml | 0 .../rke2-canal/rke2-canal/v3.13.3/Chart.yaml | 13 + .../rke2-canal/v3.13.3/templates/NOTES.txt | 3 + .../rke2-canal/v3.13.3/templates/_helpers.tpl | 7 + .../rke2-canal/v3.13.3/templates/config.yaml | 67 ++ .../rke2-canal/v3.13.3/templates/crd.yaml | 197 ++++++ .../v3.13.3/templates/daemonset.yaml | 262 ++++++++ .../rke2-canal/v3.13.3/templates/rbac.yaml | 163 +++++ .../v3.13.3/templates/serviceaccount.yaml | 6 + .../rke2-canal/rke2-canal/v3.13.3/values.yaml | 74 +++ .../1.10.101-build2021022301/.helmignore | 22 + .../1.10.101-build2021022301}/Chart.yaml | 3 +- .../1.10.101-build2021022301}/README.md | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/clusterrole-autoscaler.yaml | 0 .../templates/clusterrole.yaml | 0 .../clusterrolebinding-autoscaler.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../templates/configmap-autoscaler.yaml | 0 .../templates/configmap.yaml | 0 .../templates/deployment-autoscaler.yaml | 0 .../templates/deployment.yaml | 0 .../templates/poddisruptionbudget.yaml | 0 .../templates/podsecuritypolicy.yaml | 0 .../templates/service-metrics.yaml | 0 .../templates/service.yaml | 0 .../templates/serviceaccount-autoscaler.yaml | 0 .../templates/serviceaccount.yaml | 0 .../templates/servicemonitor.yaml | 0 .../1.10.101-build2021022301}/values.yaml | 0 .../rke2-coredns/1.10.101/.helmignore | 22 + .../rke2-coredns/1.10.101/Chart.yaml | 23 + .../rke2-coredns/1.10.101/README.md | 138 +++++ .../rke2-coredns/1.10.101/templates/NOTES.txt | 30 + .../1.10.101/templates/_helpers.tpl | 158 +++++ .../templates/clusterrole-autoscaler.yaml | 35 ++ .../1.10.101/templates/clusterrole.yaml | 38 ++ .../clusterrolebinding-autoscaler.yaml | 28 + .../templates/clusterrolebinding.yaml | 24 + .../templates/configmap-autoscaler.yaml | 34 ++ .../1.10.101/templates/configmap.yaml | 30 + .../templates/deployment-autoscaler.yaml | 77 +++ .../1.10.101/templates/deployment.yaml | 127 ++++ .../templates/poddisruptionbudget.yaml | 28 + .../1.10.101/templates/podsecuritypolicy.yaml | 57 ++ .../1.10.101/templates/service-metrics.yaml | 33 + .../1.10.101/templates/service.yaml | 40 ++ .../templates/serviceaccount-autoscaler.yaml | 21 + .../1.10.101/templates/serviceaccount.yaml | 16 + .../1.10.101/templates/servicemonitor.yaml | 33 + .../rke2-coredns/1.10.101/values.yaml | 202 ++++++ .../rke2-coredns/1.13.800/.helmignore | 22 + .../rke2-coredns/1.13.800/Chart.yaml | 14 + .../rke2-coredns/1.13.800/README.md | 169 +++++ .../rke2-coredns/1.13.800/templates/NOTES.txt | 30 + .../1.13.800/templates/_helpers.tpl | 158 +++++ .../templates/clusterrole-autoscaler.yaml | 35 ++ .../1.13.800/templates/clusterrole.yaml | 38 ++ .../clusterrolebinding-autoscaler.yaml | 28 + .../templates/clusterrolebinding.yaml | 24 + .../templates/configmap-autoscaler.yaml | 37 ++ .../1.13.800/templates/configmap.yaml | 30 + .../templates/deployment-autoscaler.yaml | 77 +++ .../1.13.800/templates/deployment.yaml} | 8 +- .../1.13.800}/templates/hpa.yaml | 0 .../templates/poddisruptionbudget.yaml | 28 + .../1.13.800/templates/podsecuritypolicy.yaml | 57 ++ .../1.13.800/templates/service-metrics.yaml | 33 + .../1.13.800/templates/service.yaml} | 6 +- .../templates/serviceaccount-autoscaler.yaml | 21 + .../1.13.800/templates/serviceaccount.yaml | 16 + .../1.13.800/templates/servicemonitor.yaml | 33 + .../rke2-coredns/1.13.800/values.yaml | 259 ++++++++ .../rke2-ingress-nginx/1.36.300/.helmignore | 21 + .../rke2-ingress-nginx/1.36.300/Chart.yaml | 17 + .../rke2-ingress-nginx/1.36.300/OWNERS | 6 + .../rke2-ingress-nginx/1.36.300/README.md | 361 +++++++++++ .../ci/daemonset-customconfig-values.yaml | 4 + .../ci/daemonset-customnodeport-values.yaml | 15 + .../1.36.300/ci/daemonset-headers-values.yaml | 6 + .../ci/daemonset-nodeport-values.yaml | 4 + ...set-tcp-udp-configMapNamespace-values.yaml | 14 + .../1.36.300/ci/daemonset-tcp-udp-values.yaml | 10 + .../1.36.300/ci/daemonset-tcp-values.yaml | 6 + .../1.36.300/ci/deamonset-default-values.yaml | 2 + .../1.36.300/ci/deamonset-metrics-values.yaml | 4 + .../1.36.300/ci/deamonset-psp-values.yaml | 5 + .../ci/deamonset-webhook-and-psp-values.yaml | 7 + .../1.36.300/ci/deamonset-webhook-values.yaml | 4 + .../ci/deployment-autoscaling-values.yaml | 3 + .../ci/deployment-customconfig-values.yaml | 3 + .../ci/deployment-customnodeport-values.yaml | 14 + .../ci/deployment-default-values.yaml | 1 + .../ci/deployment-headers-values.yaml | 5 + .../ci/deployment-metrics-values.yaml | 3 + .../ci/deployment-nodeport-values.yaml | 3 + .../1.36.300/ci/deployment-psp-values.yaml | 2 + ...ent-tcp-udp-configMapNamespace-values.yaml | 13 + .../ci/deployment-tcp-udp-values.yaml | 9 + .../1.36.300/ci/deployment-tcp-values.yaml | 3 + .../ci/deployment-webhook-and-psp-values.yaml | 6 + .../ci/deployment-webhook-values.yaml | 3 + .../1.36.300/templates/NOTES.txt | 71 +++ .../1.36.300/templates/_helpers.tpl | 134 ++++ .../templates/addheaders-configmap.yaml | 0 .../job-patch/clusterrole.yaml | 30 + .../job-patch/clusterrolebinding.yaml | 23 + .../job-patch/job-createSecret.yaml | 55 ++ .../job-patch/job-patchWebhook.yaml | 57 ++ .../admission-webhooks/job-patch/psp.yaml | 39 ++ .../admission-webhooks/job-patch/role.yaml | 23 + .../job-patch/rolebinding.yaml | 23 + .../job-patch/serviceaccount.yaml | 15 + .../validating-webhook.yaml | 31 + .../1.36.300/templates/clusterrole.yaml | 71 +++ .../templates/clusterrolebinding.yaml | 19 + .../templates/controller-configmap.yaml | 22 + .../templates/controller-daemonset.yaml | 257 ++++++++ .../templates/controller-deployment.yaml | 255 ++++++++ .../1.36.300/templates/controller-hpa.yaml | 34 ++ .../templates/controller-metrics-service.yaml | 0 .../controller-poddisruptionbudget.yaml | 19 + .../templates/controller-prometheusrules.yaml | 24 + .../1.36.300/templates/controller-psp.yaml | 80 +++ .../1.36.300/templates/controller-role.yaml | 91 +++ .../templates/controller-rolebinding.yaml | 19 + .../templates/controller-service.yaml | 94 +++ .../templates/controller-serviceaccount.yaml | 11 + .../templates/controller-servicemonitor.yaml | 38 ++ .../templates/controller-webhook-service.yaml | 0 .../templates/default-backend-deployment.yaml | 110 ++++ .../default-backend-poddisruptionbudget.yaml | 19 + .../templates/default-backend-psp.yaml | 35 ++ .../templates/default-backend-role.yaml | 16 + .../default-backend-rolebinding.yaml | 19 + .../templates/default-backend-service.yaml | 45 ++ .../default-backend-serviceaccount.yaml | 11 + .../templates/proxyheaders-configmap.yaml | 0 .../1.36.300}/templates/tcp-configmap.yaml | 0 .../1.36.300}/templates/udp-configmap.yaml | 0 .../rke2-ingress-nginx/1.36.300/values.yaml | 578 ++++++++++++++++++ .../rke2-ingress-nginx/3.3.000/.helmignore | 22 + .../3.3.000}/Chart.yaml | 3 +- .../{ => rke2-ingress-nginx/3.3.000}/OWNERS | 0 .../3.3.000}/README.md | 0 .../ci/daemonset-customconfig-values.yaml | 0 .../ci/daemonset-customnodeport-values.yaml | 0 .../3.3.000}/ci/daemonset-headers-values.yaml | 0 .../ci/daemonset-internal-lb-values.yaml | 0 .../ci/daemonset-nodeport-values.yaml | 0 ...set-tcp-udp-configMapNamespace-values.yaml | 0 .../3.3.000}/ci/daemonset-tcp-udp-values.yaml | 0 .../3.3.000}/ci/daemonset-tcp-values.yaml | 0 .../3.3.000}/ci/deamonset-default-values.yaml | 0 .../3.3.000}/ci/deamonset-metrics-values.yaml | 0 .../3.3.000}/ci/deamonset-psp-values.yaml | 0 .../ci/deamonset-webhook-and-psp-values.yaml | 0 .../3.3.000}/ci/deamonset-webhook-values.yaml | 0 .../ci/deployment-autoscaling-values.yaml | 0 .../ci/deployment-customconfig-values.yaml | 0 .../ci/deployment-customnodeport-values.yaml | 0 .../ci/deployment-default-values.yaml | 0 .../ci/deployment-headers-values.yaml | 0 .../ci/deployment-internal-lb-values.yaml | 0 .../ci/deployment-metrics-values.yaml | 0 .../ci/deployment-nodeport-values.yaml | 0 .../3.3.000}/ci/deployment-psp-values.yaml | 0 ...ent-tcp-udp-configMapNamespace-values.yaml | 0 .../ci/deployment-tcp-udp-values.yaml | 0 .../3.3.000}/ci/deployment-tcp-values.yaml | 0 .../ci/deployment-webhook-and-psp-values.yaml | 0 .../ci/deployment-webhook-values.yaml | 0 .../3.3.000}/templates/NOTES.txt | 0 .../3.3.000}/templates/_helpers.tpl | 0 .../job-patch/clusterrole.yaml | 0 .../job-patch/clusterrolebinding.yaml | 0 .../job-patch/job-createSecret.yaml | 0 .../job-patch/job-patchWebhook.yaml | 0 .../admission-webhooks/job-patch/psp.yaml | 0 .../admission-webhooks/job-patch/role.yaml | 0 .../job-patch/rolebinding.yaml | 0 .../job-patch/serviceaccount.yaml | 0 .../validating-webhook.yaml | 0 .../3.3.000}/templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../controller-configmap-addheaders.yaml | 0 .../controller-configmap-proxyheaders.yaml | 0 .../templates/controller-configmap-tcp.yaml | 0 .../templates/controller-configmap-udp.yaml | 0 .../templates/controller-configmap.yaml | 0 .../templates/controller-daemonset.yaml | 0 .../templates/controller-deployment.yaml | 0 .../3.3.000}/templates/controller-hpa.yaml | 0 .../controller-poddisruptionbudget.yaml | 0 .../templates/controller-prometheusrules.yaml | 0 .../3.3.000}/templates/controller-psp.yaml | 0 .../3.3.000}/templates/controller-role.yaml | 0 .../templates/controller-rolebinding.yaml | 0 .../controller-service-internal.yaml | 0 .../templates/controller-service-metrics.yaml | 0 .../templates/controller-service-webhook.yaml | 0 .../templates/controller-service.yaml | 0 .../templates/controller-serviceaccount.yaml | 0 .../templates/controller-servicemonitor.yaml | 0 .../templates/default-backend-deployment.yaml | 0 .../default-backend-poddisruptionbudget.yaml | 0 .../templates/default-backend-psp.yaml | 0 .../templates/default-backend-role.yaml | 0 .../default-backend-rolebinding.yaml | 0 .../templates/default-backend-service.yaml | 0 .../default-backend-serviceaccount.yaml | 0 .../3.3.000}/values.yaml | 0 .../rke2-kube-proxy/v1.18.10/Chart.yaml | 12 + .../v1.18.10}/templates/NOTES.txt | 0 .../v1.18.10/templates/_helpers.tpl | 7 + .../v1.18.10/templates/config.yaml | 69 +++ .../v1.18.10/templates/daemonset.yaml | 78 +++ .../v1.18.10}/templates/rbac.yaml | 0 .../v1.18.10}/templates/serviceaccount.yaml | 0 .../rke2-kube-proxy/v1.18.10/values.yaml | 223 +++++++ .../rke2-kube-proxy/v1.18.12/Chart.yaml | 12 + .../v1.18.12/templates/NOTES.txt | 2 + .../v1.18.12/templates/_helpers.tpl | 7 + .../v1.18.12/templates/config.yaml | 69 +++ .../v1.18.12/templates/daemonset.yaml | 78 +++ .../v1.18.12/templates/rbac.yaml | 12 + .../v1.18.12/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.18.12/values.yaml | 223 +++++++ .../rke2-kube-proxy/v1.18.13/Chart.yaml | 12 + .../v1.18.13/templates/NOTES.txt | 2 + .../v1.18.13/templates/_helpers.tpl | 7 + .../v1.18.13/templates/config.yaml | 69 +++ .../v1.18.13/templates/daemonset.yaml | 78 +++ .../v1.18.13/templates/rbac.yaml | 12 + .../v1.18.13/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.18.13/values.yaml | 223 +++++++ .../rke2-kube-proxy/v1.18.15/Chart.yaml | 12 + .../v1.18.15/templates/NOTES.txt | 2 + .../v1.18.15}/templates/_helpers.tpl | 0 .../v1.18.15/templates/config.yaml | 69 +++ .../v1.18.15}/templates/daemonset.yaml | 0 .../v1.18.15/templates/rbac.yaml | 12 + .../v1.18.15/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.18.15/values.yaml | 221 +++++++ .../rke2-kube-proxy/v1.18.16/Chart.yaml | 12 + .../v1.18.16/templates/NOTES.txt | 2 + .../v1.18.16/templates/_helpers.tpl | 21 + .../v1.18.16}/templates/config.yaml | 0 .../v1.18.16/templates/daemonset.yaml | 78 +++ .../v1.18.16/templates/rbac.yaml | 12 + .../v1.18.16/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.18.16/values.yaml | 142 +++++ .../rke2-kube-proxy/v1.18.4/Chart.yaml | 12 + .../v1.18.4/templates/NOTES.txt | 2 + .../v1.18.4/templates/_helpers.tpl | 7 + .../v1.18.4/templates/config.yaml | 69 +++ .../v1.18.4/templates/daemonset.yaml | 78 +++ .../v1.18.4/templates/rbac.yaml | 12 + .../v1.18.4/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.18.4/values.yaml | 223 +++++++ .../rke2-kube-proxy/v1.18.8/Chart.yaml | 12 + .../v1.18.8/templates/NOTES.txt | 2 + .../v1.18.8/templates/_helpers.tpl | 7 + .../v1.18.8/templates/config.yaml | 69 +++ .../v1.18.8/templates/daemonset.yaml | 78 +++ .../v1.18.8/templates/rbac.yaml | 12 + .../v1.18.8/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.18.8/values.yaml | 223 +++++++ .../rke2-kube-proxy/v1.18.9/Chart.yaml | 12 + .../v1.18.9/templates/NOTES.txt | 2 + .../v1.18.9/templates/_helpers.tpl | 7 + .../v1.18.9/templates/config.yaml | 69 +++ .../v1.18.9/templates/daemonset.yaml | 78 +++ .../v1.18.9/templates/rbac.yaml | 12 + .../v1.18.9/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.18.9/values.yaml | 223 +++++++ .../rke2-kube-proxy/v1.19.5/Chart.yaml | 12 + .../v1.19.5/templates/NOTES.txt | 2 + .../v1.19.5/templates/_helpers.tpl | 21 + .../v1.19.5/templates/config.yaml | 69 +++ .../v1.19.5/templates/daemonset.yaml | 78 +++ .../v1.19.5/templates/rbac.yaml | 12 + .../v1.19.5/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.19.5/values.yaml | 221 +++++++ .../rke2-kube-proxy/v1.19.7/Chart.yaml | 12 + .../v1.19.7/templates/NOTES.txt | 2 + .../v1.19.7/templates/_helpers.tpl | 21 + .../v1.19.7/templates/config.yaml | 69 +++ .../v1.19.7/templates/daemonset.yaml | 78 +++ .../v1.19.7/templates/rbac.yaml | 12 + .../v1.19.7/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.19.7/values.yaml | 221 +++++++ .../{ => rke2-kube-proxy/v1.19.8}/Chart.yaml | 16 +- .../v1.19.8/templates/NOTES.txt | 2 + .../v1.19.8/templates/_helpers.tpl | 21 + .../v1.19.8/templates/config.yaml | 69 +++ .../v1.19.8/templates/daemonset.yaml | 78 +++ .../v1.19.8/templates/rbac.yaml | 12 + .../v1.19.8/templates/serviceaccount.yaml | 5 + .../{ => rke2-kube-proxy/v1.19.8}/values.yaml | 0 .../rke2-kube-proxy/v1.20.2/Chart.yaml | 12 + .../v1.20.2/templates/NOTES.txt | 2 + .../v1.20.2/templates/_helpers.tpl | 21 + .../v1.20.2/templates/config.yaml | 69 +++ .../v1.20.2/templates/daemonset.yaml | 78 +++ .../v1.20.2/templates/rbac.yaml | 12 + .../v1.20.2/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.20.2/values.yaml | 142 +++++ .../2.11.100-build2021022300/.helmignore | 22 + .../2.11.100-build2021022300}/Chart.yaml | 0 .../2.11.100-build2021022300}/README.md | 0 .../ci/ci-values.yaml | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 ...ggregated-metrics-reader-cluster-role.yaml | 0 .../templates/auth-delegator-crb.yaml | 0 .../templates/cluster-role.yaml | 0 .../templates/metric-server-service.yaml | 0 .../templates/metrics-api-service.yaml | 0 .../templates/metrics-server-crb.yaml | 0 .../templates/metrics-server-deployment.yaml | 0 .../metrics-server-serviceaccount.yaml | 0 .../templates/pdb.yaml | 0 .../templates/psp.yaml | 0 .../templates/role-binding.yaml | 0 .../templates/tests/test-version.yaml | 0 .../2.11.100-build2021022300}/values.yaml | 0 .../rke2-metrics-server/2.11.100/.helmignore | 22 + .../rke2-metrics-server/2.11.100/Chart.yaml | 15 + .../rke2-metrics-server/2.11.100/README.md | 39 ++ .../2.11.100/ci/ci-values.yaml | 5 + .../2.11.100/templates/NOTES.txt | 11 + .../2.11.100/templates/_helpers.tpl | 59 ++ ...ggregated-metrics-reader-cluster-role.yaml | 18 + .../templates/auth-delegator-crb.yaml | 19 + .../2.11.100/templates/cluster-role.yaml | 34 ++ .../templates/metric-server-service.yaml | 25 + .../templates/metrics-api-service.yaml | 20 + .../templates/metrics-server-crb.yaml | 19 + .../templates/metrics-server-deployment.yaml | 88 +++ .../metrics-server-serviceaccount.yaml | 12 + .../2.11.100/templates/pdb.yaml | 23 + .../2.11.100/templates/psp.yaml | 26 + .../2.11.100/templates/role-binding.yaml | 20 + .../templates/tests/test-version.yaml | 21 + .../rke2-metrics-server/2.11.100/values.yaml | 113 ++++ index.yaml | 59 +- 357 files changed, 11120 insertions(+), 434 deletions(-) delete mode 100644 assets/index.yaml rename charts/rke2-canal/{ => rke2-canal-v3.13.300/build20210223}/Chart.yaml (65%) rename charts/rke2-canal/{ => rke2-canal-v3.13.300/build20210223}/templates/NOTES.txt (100%) rename charts/rke2-canal/{ => rke2-canal-v3.13.300/build20210223}/templates/_helpers.tpl (100%) mode change 100755 => 100644 rename charts/rke2-canal/{ => rke2-canal-v3.13.300/build20210223}/templates/config.yaml (100%) rename charts/rke2-canal/{ => rke2-canal-v3.13.300/build20210223}/templates/crd.yaml (100%) rename charts/rke2-canal/{ => rke2-canal-v3.13.300/build20210223}/templates/daemonset.yaml (100%) rename charts/rke2-canal/{ => rke2-canal-v3.13.300/build20210223}/templates/rbac.yaml (100%) rename charts/rke2-canal/{ => rke2-canal-v3.13.300/build20210223}/templates/serviceaccount.yaml (100%) rename charts/rke2-canal/{ => rke2-canal-v3.13.300/build20210223}/values.yaml (100%) create mode 100644 charts/rke2-canal/rke2-canal/v3.13.3/Chart.yaml create mode 100644 charts/rke2-canal/rke2-canal/v3.13.3/templates/NOTES.txt create mode 100644 charts/rke2-canal/rke2-canal/v3.13.3/templates/_helpers.tpl create mode 100644 charts/rke2-canal/rke2-canal/v3.13.3/templates/config.yaml create mode 100644 charts/rke2-canal/rke2-canal/v3.13.3/templates/crd.yaml create mode 100644 charts/rke2-canal/rke2-canal/v3.13.3/templates/daemonset.yaml create mode 100644 charts/rke2-canal/rke2-canal/v3.13.3/templates/rbac.yaml create mode 100644 charts/rke2-canal/rke2-canal/v3.13.3/templates/serviceaccount.yaml create mode 100644 charts/rke2-canal/rke2-canal/v3.13.3/values.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/.helmignore rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/Chart.yaml (94%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/README.md (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/NOTES.txt (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/_helpers.tpl (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/clusterrole-autoscaler.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/clusterrole.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/clusterrolebinding-autoscaler.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/clusterrolebinding.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/configmap-autoscaler.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/configmap.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/deployment-autoscaler.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/deployment.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/poddisruptionbudget.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/podsecuritypolicy.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/service-metrics.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/service.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/serviceaccount-autoscaler.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/serviceaccount.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/templates/servicemonitor.yaml (100%) mode change 100755 => 100644 rename charts/rke2-coredns/{ => rke2-coredns/1.10.101-build2021022301}/values.yaml (100%) mode change 100755 => 100644 create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/.helmignore create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/Chart.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/README.md create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/NOTES.txt create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/_helpers.tpl create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole-autoscaler.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding-autoscaler.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap-autoscaler.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment-autoscaler.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/poddisruptionbudget.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/podsecuritypolicy.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/service-metrics.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/service.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount-autoscaler.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/templates/servicemonitor.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.10.101/values.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/.helmignore create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/Chart.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/README.md create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/NOTES.txt create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/_helpers.tpl create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole-autoscaler.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding-autoscaler.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap-autoscaler.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment-autoscaler.yaml rename charts/rke2-coredns/{templates/deployment.yaml.orig => rke2-coredns/1.13.800/templates/deployment.yaml} (93%) rename charts/rke2-coredns/{ => rke2-coredns/1.13.800}/templates/hpa.yaml (100%) mode change 100755 => 100644 create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/poddisruptionbudget.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/podsecuritypolicy.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/service-metrics.yaml rename charts/rke2-coredns/{templates/service.yaml.orig => rke2-coredns/1.13.800/templates/service.yaml} (85%) create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount-autoscaler.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/templates/servicemonitor.yaml create mode 100644 charts/rke2-coredns/rke2-coredns/1.13.800/values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/.helmignore create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/Chart.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/OWNERS create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/README.md create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customconfig-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customnodeport-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-headers-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-nodeport-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-configMapNamespace-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-default-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-metrics-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-psp-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-and-psp-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-autoscaling-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customconfig-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customnodeport-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-default-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-headers-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-metrics-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-nodeport-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-psp-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-configMapNamespace-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-and-psp-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/NOTES.txt create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/_helpers.tpl rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/1.36.300}/templates/addheaders-configmap.yaml (100%) mode change 100755 => 100644 create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrole.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-createSecret.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/psp.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/role.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/rolebinding.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/serviceaccount.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/validating-webhook.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrole.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrolebinding.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-configmap.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-daemonset.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-deployment.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-hpa.yaml rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/1.36.300}/templates/controller-metrics-service.yaml (100%) mode change 100755 => 100644 create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-poddisruptionbudget.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-prometheusrules.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-psp.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-role.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-rolebinding.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-service.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-serviceaccount.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-servicemonitor.yaml rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/1.36.300}/templates/controller-webhook-service.yaml (100%) mode change 100755 => 100644 create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-deployment.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-poddisruptionbudget.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-psp.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-role.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-rolebinding.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-service.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-serviceaccount.yaml rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/1.36.300}/templates/proxyheaders-configmap.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/1.36.300}/templates/tcp-configmap.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/1.36.300}/templates/udp-configmap.yaml (100%) mode change 100755 => 100644 create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/values.yaml create mode 100644 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/.helmignore rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/Chart.yaml (92%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/OWNERS (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/README.md (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/daemonset-customconfig-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/daemonset-customnodeport-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/daemonset-headers-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/daemonset-internal-lb-values.yaml (100%) rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/daemonset-nodeport-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/daemonset-tcp-udp-configMapNamespace-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/daemonset-tcp-udp-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/daemonset-tcp-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deamonset-default-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deamonset-metrics-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deamonset-psp-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deamonset-webhook-and-psp-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deamonset-webhook-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-autoscaling-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-customconfig-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-customnodeport-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-default-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-headers-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-internal-lb-values.yaml (100%) rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-metrics-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-nodeport-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-psp-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-tcp-udp-configMapNamespace-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-tcp-udp-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-tcp-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-webhook-and-psp-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/ci/deployment-webhook-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/NOTES.txt (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/_helpers.tpl (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/admission-webhooks/job-patch/clusterrole.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/admission-webhooks/job-patch/clusterrolebinding.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/admission-webhooks/job-patch/job-createSecret.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/admission-webhooks/job-patch/job-patchWebhook.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/admission-webhooks/job-patch/psp.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/admission-webhooks/job-patch/role.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/admission-webhooks/job-patch/rolebinding.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/admission-webhooks/job-patch/serviceaccount.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/admission-webhooks/validating-webhook.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/clusterrole.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/clusterrolebinding.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-configmap-addheaders.yaml (100%) rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-configmap-proxyheaders.yaml (100%) rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-configmap-tcp.yaml (100%) rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-configmap-udp.yaml (100%) rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-configmap.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-daemonset.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-deployment.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-hpa.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-poddisruptionbudget.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-prometheusrules.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-psp.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-role.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-rolebinding.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-service-internal.yaml (100%) rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-service-metrics.yaml (100%) rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-service-webhook.yaml (100%) rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-service.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-serviceaccount.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/controller-servicemonitor.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/default-backend-deployment.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/default-backend-poddisruptionbudget.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/default-backend-psp.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/default-backend-role.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/default-backend-rolebinding.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/default-backend-service.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/templates/default-backend-serviceaccount.yaml (100%) mode change 100755 => 100644 rename charts/rke2-ingress-nginx/{ => rke2-ingress-nginx/3.3.000}/values.yaml (100%) mode change 100755 => 100644 create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/Chart.yaml rename charts/rke2-kube-proxy/{ => rke2-kube-proxy/v1.18.10}/templates/NOTES.txt (100%) create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/_helpers.tpl create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/config.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/daemonset.yaml rename charts/rke2-kube-proxy/{ => rke2-kube-proxy/v1.18.10}/templates/rbac.yaml (100%) rename charts/rke2-kube-proxy/{ => rke2-kube-proxy/v1.18.10}/templates/serviceaccount.yaml (100%) create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/values.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/Chart.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/NOTES.txt create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/_helpers.tpl create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/config.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/daemonset.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/serviceaccount.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/values.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/Chart.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/NOTES.txt create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/_helpers.tpl create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/config.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/daemonset.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/serviceaccount.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/values.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/Chart.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/NOTES.txt rename charts/rke2-kube-proxy/{ => rke2-kube-proxy/v1.18.15}/templates/_helpers.tpl (100%) mode change 100755 => 100644 create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/config.yaml rename charts/rke2-kube-proxy/{ => rke2-kube-proxy/v1.18.15}/templates/daemonset.yaml (100%) create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/serviceaccount.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/values.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/Chart.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/NOTES.txt create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/_helpers.tpl rename charts/rke2-kube-proxy/{ => rke2-kube-proxy/v1.18.16}/templates/config.yaml (100%) create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/daemonset.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/serviceaccount.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/values.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/Chart.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/NOTES.txt create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/_helpers.tpl create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/config.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/daemonset.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/serviceaccount.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/values.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/Chart.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/NOTES.txt create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/_helpers.tpl create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/config.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/daemonset.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/serviceaccount.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/values.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/Chart.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/NOTES.txt create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/_helpers.tpl create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/config.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/daemonset.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/serviceaccount.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/values.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/Chart.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/NOTES.txt create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/_helpers.tpl create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/config.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/daemonset.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/serviceaccount.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/values.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/Chart.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/NOTES.txt create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/_helpers.tpl create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/config.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/daemonset.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/serviceaccount.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/values.yaml rename charts/rke2-kube-proxy/{ => rke2-kube-proxy/v1.19.8}/Chart.yaml (55%) create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/NOTES.txt create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/_helpers.tpl create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/config.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/daemonset.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/serviceaccount.yaml rename charts/rke2-kube-proxy/{ => rke2-kube-proxy/v1.19.8}/values.yaml (100%) create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/Chart.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/NOTES.txt create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/_helpers.tpl create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/config.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/daemonset.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/rbac.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/serviceaccount.yaml create mode 100644 charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/values.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/.helmignore rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/Chart.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/README.md (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/ci/ci-values.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/NOTES.txt (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/_helpers.tpl (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/aggregated-metrics-reader-cluster-role.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/auth-delegator-crb.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/cluster-role.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/metric-server-service.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/metrics-api-service.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/metrics-server-crb.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/metrics-server-deployment.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/metrics-server-serviceaccount.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/pdb.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/psp.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/role-binding.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/templates/tests/test-version.yaml (100%) mode change 100755 => 100644 rename charts/rke2-metrics-server/{ => rke2-metrics-server/2.11.100-build2021022300}/values.yaml (100%) mode change 100755 => 100644 create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/.helmignore create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/Chart.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/README.md create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/ci/ci-values.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/NOTES.txt create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/_helpers.tpl create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/aggregated-metrics-reader-cluster-role.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/auth-delegator-crb.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/cluster-role.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metric-server-service.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-api-service.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-crb.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-deployment.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-serviceaccount.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/pdb.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/psp.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/role-binding.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/tests/test-version.yaml create mode 100644 charts/rke2-metrics-server/rke2-metrics-server/2.11.100/values.yaml diff --git a/assets/index.yaml b/assets/index.yaml deleted file mode 100644 index 215b65c..0000000 --- a/assets/index.yaml +++ /dev/null @@ -1,381 +0,0 @@ -apiVersion: v1 -entries: - rke2-canal: - - apiVersion: v1 - appVersion: v3.13.3 - created: "2021-02-24T21:41:48.737080031Z" - description: Install Canal Network Plugin. - digest: 4b6ac74aec73a70d12186701660c1f221fdbcb582571029a6c8fbc2738065742 - home: https://www.projectcalico.org/ - keywords: - - canal - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-canal - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-canal/rke2-canal-v3.13.300-build20210223.tgz - version: v3.13.300-build20210223 - - apiVersion: v1 - appVersion: v3.13.3 - created: "2021-02-19T16:11:27.472930693Z" - description: Install Canal Network Plugin. - digest: 2396b0aca28a6d4a373a251b02e4efa12bbfedf29e37e45904b860176d0c80f8 - home: https://www.projectcalico.org/ - keywords: - - canal - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-canal - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-canal/rke2-canal-v3.13.3.tgz - version: v3.13.3 - rke2-coredns: - - apiVersion: v1 - appVersion: 1.7.1 - created: "2021-01-08T18:12:00.296423364Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services - digest: 335099356a98589e09f1bb940913b0ed6abb8d2c4db91720f87d1cf7697a5cf7 - home: https://coredns.io - icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png - keywords: - - coredns - - dns - - kubedns - name: rke2-coredns - sources: - - https://github.com/coredns/coredns - urls: - - assets/rke2-coredns/rke2-coredns-1.13.800.tgz - version: 1.13.800 - - apiVersion: v1 - appVersion: 1.6.9 - created: "2021-01-22T21:35:45.403680219Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services - digest: be60a62ec184cf6ca7b0ed917e6962e8a2578fa1eeef6a835e82d2b7709933d5 - home: https://coredns.io - icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png - keywords: - - coredns - - dns - - kubedns - maintainers: - - email: hello@acale.ph - name: Acaleph - - email: shashidhara.huawei@gmail.com - name: shashidharatd - - email: andor44@gmail.com - name: andor44 - - email: manuel@rueg.eu - name: mrueg - name: rke2-coredns - sources: - - https://github.com/coredns/coredns - urls: - - assets/rke2-coredns/rke2-coredns-1.10.101.tgz - version: 1.10.101 - - apiVersion: v1 - appVersion: 1.6.9 - created: "2021-02-24T21:41:48.738290233Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services - digest: 869cb592cac545f579b6de6b35de82de4904566fd91826bc16546fddc48fe1c4 - home: https://coredns.io - icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png - keywords: - - coredns - - dns - - kubedns - maintainers: - - email: hello@acale.ph - name: Acaleph - - email: shashidhara.huawei@gmail.com - name: shashidharatd - - email: andor44@gmail.com - name: andor44 - - email: manuel@rueg.eu - name: mrueg - name: rke2-coredns - sources: - - https://github.com/coredns/coredns - urls: - - assets/rke2-coredns/rke2-coredns-1.10.101-build2021022301.tgz - version: 1.10.101-build2021022301 - rke2-ingress-nginx: - - apiVersion: v1 - appVersion: 0.35.0 - created: "2021-02-24T21:42:02.60663315Z" - description: Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer - digest: 2480ed0be9032f8f839913e12f0528128a15483ced57c851baed605156532782 - home: https://github.com/kubernetes/ingress-nginx - icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png - keywords: - - ingress - - nginx - kubeVersion: '>=1.16.0-0' - maintainers: - - name: ChiefAlexander - name: rke2-ingress-nginx - sources: - - https://github.com/kubernetes/ingress-nginx - urls: - - assets/rke2-ingress-nginx/rke2-ingress-nginx-3.3.000.tgz - version: 3.3.000 - - apiVersion: v1 - appVersion: 0.30.0 - created: "2021-02-19T16:11:27.47593126Z" - description: An nginx Ingress controller that uses ConfigMap to store the nginx configuration. - digest: 768ce303918a97a2d0f9a333f4eb0f2ebb3b7f54b849e83c6bdd52f8b513af9b - home: https://github.com/kubernetes/ingress-nginx - icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png - keywords: - - ingress - - nginx - kubeVersion: '>=1.10.0-0' - maintainers: - - name: ChiefAlexander - - email: Trevor.G.Wood@gmail.com - name: taharah - name: rke2-ingress-nginx - sources: - - https://github.com/kubernetes/ingress-nginx - urls: - - assets/rke2-ingress-nginx/rke2-ingress-nginx-1.36.300.tgz - version: 1.36.300 - rke2-kube-proxy: - - apiVersion: v1 - appVersion: v1.20.2 - created: "2021-01-25T23:01:11.589999085Z" - description: Install Kube Proxy. - digest: 68f08c49c302bfe23e9c6f8074a21a6a3e0c90fdb16f5e6fb32a5a3ee3f7c717 - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.20.2.tgz - version: v1.20.2 - - apiVersion: v1 - appVersion: v1.19.8 - created: "2021-02-24T21:41:48.739048333Z" - description: Install Kube Proxy. - digest: f2bace51d33062e3ac713ebbedd48dd4df56c821dfa52da9fdf71891d601bcde - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.8.tgz - version: v1.19.8 - - apiVersion: v1 - appVersion: v1.19.7 - created: "2021-01-22T21:35:45.405178128Z" - description: Install Kube Proxy. - digest: def9baa9bc5c12267d3575a03a2e5f2eccc907a6058202ed09a6cd39967790ca - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.7.tgz - version: v1.19.7 - - apiVersion: v1 - appVersion: v1.19.5 - created: "2020-12-17T19:20:49.383692056Z" - description: Install Kube Proxy. - digest: f74f820857b79601f3b8e498e701297d71f3b37bbf94dc3ae96dfcca50fb80df - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.5.tgz - version: v1.19.5 - - apiVersion: v1 - appVersion: v1.18.16 - created: "2021-02-19T17:03:49.957724823Z" - description: Install Kube Proxy. - digest: a57acde11e30a9a15330ffec38686b605325b145f21935e79843b28652d46a21 - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.16.tgz - version: v1.18.16 - - apiVersion: v1 - appVersion: v1.18.15 - created: "2021-01-14T18:05:30.822746229Z" - description: Install Kube Proxy. - digest: 3a6429d05a3d22e3959ceac27db15f922f1033553e8e6b5da2eb7cd18ed9309f - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.15.tgz - version: v1.18.15 - - apiVersion: v1 - appVersion: v1.18.13 - created: "2020-12-10T22:07:42.184767459Z" - description: Install Kube Proxy. - digest: 15d192f5016b8573d2c6f17ab55fa6f14fa1352fcdef2c391a6a477b199867ec - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.13.tgz - version: v1.18.13 - - apiVersion: v1 - appVersion: v1.18.12 - created: "2020-12-07T21:17:34.244857883Z" - description: Install Kube Proxy. - digest: e1da2b245da23aaa526cb94c04ed48cd3e730b848c0d33e420dcfd5b15374f5e - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.12.tgz - version: v1.18.12 - - apiVersion: v1 - appVersion: v1.18.10 - created: "2020-10-15T22:21:23.252729387Z" - description: Install Kube Proxy. - digest: 1ae84231365f19d82a4ea7c6b069ce90308147ba77bef072290ef7464ff1694e - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.10.tgz - version: v1.18.10 - - apiVersion: v1 - appVersion: v1.18.9 - created: "2020-10-14T23:04:28.48143194Z" - description: Install Kube Proxy. - digest: e1e5b6f98c535fa5d90469bd3f731d331bdaa3f9154157d7625b367a7023f399 - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.9.tgz - version: v1.18.9 - - apiVersion: v1 - appVersion: v1.18.8 - created: "2020-09-29T00:14:59.633896455Z" - description: Install Kube Proxy. - digest: 7765237ddc39c416178242e7a6798d679a50f466ac18d3a412207606cd0d66ed - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.8.tgz - version: v1.18.8 - - apiVersion: v1 - appVersion: v1.18.4 - created: "2020-09-29T00:14:59.632610835Z" - description: Install Kube Proxy. - digest: b859363c5ecab8c46b53efa34d866b9c27840737ad1afec0eb9729b8968304fb - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.4.tgz - version: v1.18.4 - rke2-metrics-server: - - apiVersion: v1 - appVersion: 0.3.6 - created: "2021-02-19T16:11:27.477610954Z" - description: Metrics Server is a cluster-wide aggregator of resource usage data. - digest: 295435f65cc6c0c5ed8fd6b028cac5614b761789c5e09c0483170c3fd46f6e59 - home: https://github.com/kubernetes-incubator/metrics-server - keywords: - - metrics-server - maintainers: - - email: o.with@sportradar.com - name: olemarkus - - email: k.aasan@sportradar.com - name: kennethaasan - name: rke2-metrics-server - sources: - - https://github.com/kubernetes-incubator/metrics-server - urls: - - assets/rke2-metrics-server/rke2-metrics-server-2.11.100.tgz - version: 2.11.100 - - apiVersion: v1 - appVersion: 0.3.6 - created: "2021-02-24T21:41:48.739850734Z" - description: Metrics Server is a cluster-wide aggregator of resource usage data. - digest: a7cbec2f4764c99db298fb4e1f5297246253a3228daf2747281c953059160fc9 - home: https://github.com/kubernetes-incubator/metrics-server - keywords: - - metrics-server - maintainers: - - email: o.with@sportradar.com - name: olemarkus - - email: k.aasan@sportradar.com - name: kennethaasan - name: rke2-metrics-server - sources: - - https://github.com/kubernetes-incubator/metrics-server - urls: - - assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022300.tgz - version: 2.11.100-build2021022300 -generated: "2021-02-24T21:42:02.60300284Z" diff --git a/charts/rke2-canal/Chart.yaml b/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/Chart.yaml similarity index 65% rename from charts/rke2-canal/Chart.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300/build20210223/Chart.yaml index 69e3d3f..8520cb8 100644 --- a/charts/rke2-canal/Chart.yaml +++ b/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/Chart.yaml @@ -1,13 +1,13 @@ apiVersion: v1 -name: rke2-canal -description: Install Canal Network Plugin. -version: v3.13.300-build20210223 appVersion: v3.13.3 +description: Install Canal Network Plugin. home: https://www.projectcalico.org/ keywords: - - canal -sources: - - https://github.com/rancher/rke2-charts +- canal maintainers: - - name: Rancher Labs - email: charts@rancher.com +- email: charts@rancher.com + name: Rancher Labs +name: rke2-canal +sources: +- https://github.com/rancher/rke2-charts +version: v3.13.300-build20210223 diff --git a/charts/rke2-canal/templates/NOTES.txt b/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/NOTES.txt similarity index 100% rename from charts/rke2-canal/templates/NOTES.txt rename to charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/NOTES.txt diff --git a/charts/rke2-canal/templates/_helpers.tpl b/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/_helpers.tpl old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-canal/templates/_helpers.tpl rename to charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/_helpers.tpl diff --git a/charts/rke2-canal/templates/config.yaml b/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/config.yaml similarity index 100% rename from charts/rke2-canal/templates/config.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/config.yaml diff --git a/charts/rke2-canal/templates/crd.yaml b/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/crd.yaml similarity index 100% rename from charts/rke2-canal/templates/crd.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/crd.yaml diff --git a/charts/rke2-canal/templates/daemonset.yaml b/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/daemonset.yaml similarity index 100% rename from charts/rke2-canal/templates/daemonset.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/daemonset.yaml diff --git a/charts/rke2-canal/templates/rbac.yaml b/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/rbac.yaml similarity index 100% rename from charts/rke2-canal/templates/rbac.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/rbac.yaml diff --git a/charts/rke2-canal/templates/serviceaccount.yaml b/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/serviceaccount.yaml similarity index 100% rename from charts/rke2-canal/templates/serviceaccount.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/serviceaccount.yaml diff --git a/charts/rke2-canal/values.yaml b/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/values.yaml similarity index 100% rename from charts/rke2-canal/values.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300/build20210223/values.yaml diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/Chart.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/Chart.yaml new file mode 100644 index 0000000..52bdce9 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +appVersion: v3.13.3 +description: Install Canal Network Plugin. +home: https://www.projectcalico.org/ +keywords: +- canal +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-canal +sources: +- https://github.com/rancher/rke2-charts +version: v3.13.3 diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/NOTES.txt b/charts/rke2-canal/rke2-canal/v3.13.3/templates/NOTES.txt new file mode 100644 index 0000000..12a30ff --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/NOTES.txt @@ -0,0 +1,3 @@ +Canal network plugin has been installed. + +NOTE: It may take few minutes until Canal image install CNI files and node become in ready state. diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/_helpers.tpl b/charts/rke2-canal/rke2-canal/v3.13.3/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/config.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/templates/config.yaml new file mode 100644 index 0000000..37f28ef --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/config.yaml @@ -0,0 +1,67 @@ +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Canal installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .Release.Name }}-config + namespace: kube-system +data: + # Typha is disabled. + typha_service_name: {{ .Values.calico.typhaServiceName | quote }} + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + canal_iface: {{ .Values.flannel.iface | quote }} + + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: {{ .Values.calico.masquerade | quote }} + + # Configure the MTU to use + veth_mtu: {{ .Values.calico.vethuMTU | quote }} + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + + # Flannel network configuration. Mounted into the flannel container. + net-conf.json: | + { + "Network": {{ .Values.podCidr | quote }}, + "Backend": { + "Type": {{ .Values.flannel.backend | quote }} + } + } diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/crd.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/templates/crd.yaml new file mode 100644 index 0000000..0351759 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/crd.yaml @@ -0,0 +1,197 @@ +--- +# Source: calico/templates/kdd-crds.yaml + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMConfig + plural: ipamconfigs + singular: ipamconfig + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMHandle + plural: ipamhandles + singular: ipamhandle + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkSet + plural: networksets + singular: networkset diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/daemonset.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/templates/daemonset.yaml new file mode 100644 index 0000000..1431df8 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/daemonset.yaml @@ -0,0 +1,262 @@ +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the canal container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Release.Name | quote }} + namespace: kube-system + labels: + k8s-app: canal +spec: + selector: + matchLabels: + k8s-app: canal + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: canal + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure canal gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: canal + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {{ template "system_default_registry" . }}{{ .Values.calico.cniImage.repository }}:{{ .Values.calico.cniImage.tag }} + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-canal.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: {{ template "system_default_registry" . }}{{ .Values.calico.flexvolImage.repository }}:{{ .Values.calico.flexvolImage.tag }} + command: ['/usr/local/bin/flexvol.sh', '-s', '/usr/local/bin/flexvol', '-i', 'flexvoldriver'] + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true + containers: + # Runs canal container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + command: + - "start_runit" + image: {{ template "system_default_registry" . }}{{ .Values.calico.nodeImage.repository }}:{{ .Values.calico.nodeImage.tag }} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: {{ .Values.calico.datastoreType | quote }} + # Configure route aggregation based on pod CIDR. + - name: USE_POD_CIDR + value: {{ .Values.calico.usePodCIDR | quote }} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: {{ .Values.calico.waitForDatastore | quote }} + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: {{ .Values.calico.networkingBackend | quote }} + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: {{ .Values.calico.clusterType | quote}} + # Period, in seconds, at which felix re-applies all iptables state + - name: FELIX_IPTABLESREFRESHINTERVAL + value: {{ .Values.calico.felixIptablesRefreshInterval | quote}} + - name: FELIX_IPTABLESBACKEND + value: {{ .Values.calico.felixIptablesBackend | quote}} + # No IP address needed. + - name: IP + value: "" + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: {{ .Values.calico.felixDefaultEndpointToHostAction | quote }} + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: {{ .Values.calico.felixIpv6Support | quote }} + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: {{ .Values.calico.felixLogSeverityScreen | quote }} + - name: FELIX_HEALTHENABLED + value: {{ .Values.calico.felixHealthEnabled | quote }} + # enable promentheus metrics + - name: FELIX_PROMETHEUSMETRICSENABLED + value: {{ .Values.calico.felixPrometheusMetricsEnabled | quote }} + - name: FELIX_XDPENABLED + value: {{ .Values.calico.felixXDPEnabled | quote }} + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + host: localhost + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # This container runs flannel using the kube-subnet-mgr backend + # for allocating subnets. + - name: kube-flannel + image: {{ template "system_default_registry" . }}{{ .Values.flannel.image.repository }}:{{ .Values.flannel.image.tag }} + command: + - "/opt/bin/flanneld" + {{- range .Values.flannel.args }} + - {{ . | quote }} + {{- end }} + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: FLANNELD_IFACE + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: canal_iface + - name: FLANNELD_IP_MASQ + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: masquerade + volumeMounts: + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + # Used by canal. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used by flannel. + - name: flannel-cfg + configMap: + name: {{ .Release.Name }}-config + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/rbac.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/templates/rbac.yaml new file mode 100644 index 0000000..cd39730 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/rbac.yaml @@ -0,0 +1,163 @@ +--- +# Source: calico/templates/rbac.yaml + +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only requried for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + +--- +# Flannel ClusterRole +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: + - apiGroups: [""] + resources: + - pods + verbs: + - get + - apiGroups: [""] + resources: + - nodes + verbs: + - list + - watch + - apiGroups: [""] + resources: + - nodes/status + verbs: + - patch +--- +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: canal-flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: canal-calico +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/serviceaccount.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/templates/serviceaccount.yaml new file mode 100644 index 0000000..582d55b --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: canal + namespace: kube-system diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/values.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/values.yaml new file mode 100644 index 0000000..1bb70a0 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/values.yaml @@ -0,0 +1,74 @@ +--- + +# The IPv4 cidr pool to create on startup if none exists. Pod IPs will be +# chosen from this range. +podCidr: "10.42.0.0/16" + +flannel: + # kube-flannel image + image: + repository: rancher/hardened-flannel + tag: v0.13.0-rancher1 + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + iface: "" + # kube-flannel command arguments + args: + - "--ip-masq" + - "--kube-subnet-mgr" + # Backend for kube-flannel. Backend should not be changed + # at runtime. + backend: "vxlan" + +calico: + # CNI installation image. + cniImage: + repository: rancher/hardened-calico + tag: v3.13.3 + # Canal node image. + nodeImage: + repository: rancher/hardened-calico + tag: v3.13.3 + # Flexvol Image. + flexvolImage: + repository: rancher/hardened-calico + tag: v3.13.3 + # Datastore type for canal. It can be either kuberentes or etcd. + datastoreType: kubernetes + # Wait for datastore to initialize. + waitForDatastore: true + # Configure route aggregation based on pod CIDR. + usePodCIDR: true + # Disable BGP routing. + networkingBackend: none + # Cluster type to identify the deployment type. + clusterType: "k8s,canal" + # Disable file logging so `kubectl logs` works. + disableFileLogging: true + # Disable IPv6 on Kubernetes. + felixIpv6Support: false + # Period, in seconds, at which felix re-applies all iptables state + felixIptablesRefreshInterval: 60 + # iptables backend to use for felix, defaults to auto but can also be set to nft or legacy + felixIptablesBackend: auto + # Set Felix logging to "info". + felixLogSeverityScreen: info + # Enable felix healthcheck. + felixHealthEnabled: true + # Enable prometheus metrics + felixPrometheusMetricsEnabled: true + # Disable XDP Acceleration as we do not support it with our ubi7 base image + felixXDPEnabled: false + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: true + # Set Felix endpoint to host default action to ACCEPT. + felixDefaultEndpointToHostAction: ACCEPT + # Configure the MTU to use. + vethuMTU: 1450 + # Typha is disabled. + typhaServiceName: none + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/.helmignore b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/.helmignore new file mode 100644 index 0000000..7c04072 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/rke2-coredns/Chart.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/Chart.yaml old mode 100755 new mode 100644 similarity index 94% rename from charts/rke2-coredns/Chart.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/Chart.yaml index 4606ef8..c4ddcf4 --- a/charts/rke2-coredns/Chart.yaml +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/Chart.yaml @@ -1,7 +1,6 @@ apiVersion: v1 appVersion: 1.6.9 -description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS - Services +description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services home: https://coredns.io icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png keywords: diff --git a/charts/rke2-coredns/README.md b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/README.md old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/README.md rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/README.md diff --git a/charts/rke2-coredns/templates/NOTES.txt b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/NOTES.txt old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/NOTES.txt rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/NOTES.txt diff --git a/charts/rke2-coredns/templates/_helpers.tpl b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/_helpers.tpl old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/_helpers.tpl rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/_helpers.tpl diff --git a/charts/rke2-coredns/templates/clusterrole-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrole-autoscaler.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/clusterrole-autoscaler.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrole-autoscaler.yaml diff --git a/charts/rke2-coredns/templates/clusterrole.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrole.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/clusterrole.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrole.yaml diff --git a/charts/rke2-coredns/templates/clusterrolebinding-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrolebinding-autoscaler.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/clusterrolebinding-autoscaler.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrolebinding-autoscaler.yaml diff --git a/charts/rke2-coredns/templates/clusterrolebinding.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/clusterrolebinding.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrolebinding.yaml diff --git a/charts/rke2-coredns/templates/configmap-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/configmap-autoscaler.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/configmap-autoscaler.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/configmap-autoscaler.yaml diff --git a/charts/rke2-coredns/templates/configmap.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/configmap.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/configmap.yaml diff --git a/charts/rke2-coredns/templates/deployment-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/deployment-autoscaler.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/deployment-autoscaler.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/deployment-autoscaler.yaml diff --git a/charts/rke2-coredns/templates/deployment.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/deployment.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/deployment.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/deployment.yaml diff --git a/charts/rke2-coredns/templates/poddisruptionbudget.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/poddisruptionbudget.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/poddisruptionbudget.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/poddisruptionbudget.yaml diff --git a/charts/rke2-coredns/templates/podsecuritypolicy.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/podsecuritypolicy.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/podsecuritypolicy.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/podsecuritypolicy.yaml diff --git a/charts/rke2-coredns/templates/service-metrics.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/service-metrics.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/service-metrics.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/service-metrics.yaml diff --git a/charts/rke2-coredns/templates/service.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/service.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/service.yaml diff --git a/charts/rke2-coredns/templates/serviceaccount-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/serviceaccount-autoscaler.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/serviceaccount-autoscaler.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/serviceaccount-autoscaler.yaml diff --git a/charts/rke2-coredns/templates/serviceaccount.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/serviceaccount.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/serviceaccount.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/serviceaccount.yaml diff --git a/charts/rke2-coredns/templates/servicemonitor.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/servicemonitor.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/servicemonitor.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/servicemonitor.yaml diff --git a/charts/rke2-coredns/values.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/values.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/values.yaml diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/.helmignore b/charts/rke2-coredns/rke2-coredns/1.10.101/.helmignore new file mode 100644 index 0000000..7c04072 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/Chart.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/Chart.yaml new file mode 100644 index 0000000..fea533e --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 1.6.9 +description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS + Services +home: https://coredns.io +icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png +keywords: +- coredns +- dns +- kubedns +maintainers: +- email: hello@acale.ph + name: Acaleph +- email: shashidhara.huawei@gmail.com + name: shashidharatd +- email: andor44@gmail.com + name: andor44 +- email: manuel@rueg.eu + name: mrueg +name: rke2-coredns +sources: +- https://github.com/coredns/coredns +version: 1.10.101 diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/README.md b/charts/rke2-coredns/rke2-coredns/1.10.101/README.md new file mode 100644 index 0000000..0d41d40 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/README.md @@ -0,0 +1,138 @@ +# CoreDNS + +[CoreDNS](https://coredns.io/) is a DNS server that chains plugins and provides DNS Services + +# TL;DR; + +```console +$ helm install --name coredns --namespace=kube-system stable/coredns +``` + +## Introduction + +This chart bootstraps a [CoreDNS](https://github.com/coredns/coredns) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. This chart will provide DNS Services and can be deployed in multiple configuration to support various scenarios listed below: + + - CoreDNS as a cluster dns service and a drop-in replacement for Kube/SkyDNS. This is the default mode and CoreDNS is deployed as cluster-service in kube-system namespace. This mode is chosen by setting `isClusterService` to true. + - CoreDNS as an external dns service. In this mode CoreDNS is deployed as any kubernetes app in user specified namespace. The CoreDNS service can be exposed outside the cluster by using using either the NodePort or LoadBalancer type of service. This mode is chosen by setting `isClusterService` to false. + - CoreDNS as an external dns provider for kubernetes federation. This is a sub case of 'external dns service' which uses etcd plugin for CoreDNS backend. This deployment mode as a dependency on `etcd-operator` chart, which needs to be pre-installed. + +## Prerequisites + +- Kubernetes 1.10 or later + +## Installing the Chart + +The chart can be installed as follows: + +```console +$ helm install --name coredns --namespace=kube-system stable/coredns +``` + +The command deploys CoreDNS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists various ways to override default configuration during deployment. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete coredns +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +|:----------------------------------------|:--------------------------------------------------------------------------------------|:------------------------------------------------------------| +| `image.repository` | The image repository to pull from | coredns/coredns | +| `image.tag` | The image tag to pull from | `v1.6.9` | +| `image.pullPolicy` | Image pull policy | IfNotPresent | +| `replicaCount` | Number of replicas | 1 | +| `resources.limits.cpu` | Container maximum CPU | `100m` | +| `resources.limits.memory` | Container maximum memory | `128Mi` | +| `resources.requests.cpu` | Container requested CPU | `100m` | +| `resources.requests.memory` | Container requested memory | `128Mi` | +| `serviceType` | Kubernetes Service type | `ClusterIP` | +| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | {} | +| `prometheus.monitor.namespace` | Selector to select which namespaces the Endpoints objects are discovered from. | `""` | +| `service.clusterIP` | IP address to assign to service | `""` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `[]` | +| `service.annotations` | Annotations to add to service | `{prometheus.io/scrape: "true", prometheus.io/port: "9153"}`| +| `serviceAccount.create` | If true, create & use serviceAccount | false | +| `serviceAccount.name` | If not set & create is true, use template fullname | | +| `rbac.create` | If true, create & use RBAC resources | true | +| `rbac.pspEnable` | Specifies whether a PodSecurityPolicy should be created. | `false` | +| `isClusterService` | Specifies whether chart should be deployed as cluster-service or normal k8s app. | true | +| `priorityClassName` | Name of Priority Class to assign pods | `""` | +| `servers` | Configuration for CoreDNS and plugins | See values.yml | +| `affinity` | Affinity settings for pod assignment | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `tolerations` | Tolerations for pod assignment | [] | +| `zoneFiles` | Configure custom Zone files | [] | +| `extraSecrets` | Optional array of secrets to mount inside the CoreDNS container | [] | +| `customLabels` | Optional labels for Deployment(s), Pod, Service, ServiceMonitor objects | {} | +| `podDisruptionBudget` | Optional PodDisruptionBudget | {} | +| `autoscaler.enabled` | Optionally enabled a cluster-proportional-autoscaler for CoreDNS | `false` | +| `autoscaler.coresPerReplica` | Number of cores in the cluster per CoreDNS replica | `256` | +| `autoscaler.nodesPerReplica` | Number of nodes in the cluster per CoreDNS replica | `16` | +| `autoscaler.image.repository` | The image repository to pull autoscaler from | k8s.gcr.io/cluster-proportional-autoscaler-amd64 | +| `autoscaler.image.tag` | The image tag to pull autoscaler from | `1.7.1` | +| `autoscaler.image.pullPolicy` | Image pull policy for the autoscaler | IfNotPresent | +| `autoscaler.priorityClassName` | Optional priority class for the autoscaler pod. `priorityClassName` used if not set. | `""` | +| `autoscaler.affinity` | Affinity settings for pod assignment for autoscaler | {} | +| `autoscaler.nodeSelector` | Node labels for pod assignment for autoscaler | {} | +| `autoscaler.tolerations` | Tolerations for pod assignment for autoscaler | [] | +| `autoscaler.resources.limits.cpu` | Container maximum CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.limits.memory` | Container maximum memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.resources.requests.cpu` | Container requested CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.requests.memory` | Container requested memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.configmap.annotations` | Annotations to add to autoscaler config map. For example to stop CI renaming them | {} | + +See `values.yaml` for configuration notes. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install --name coredns \ + --set rbac.create=false \ + stable/coredns +``` + +The above command disables automatic creation of RBAC rules. + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install --name coredns -f values.yaml stable/coredns +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + + +## Caveats + +The chart will automatically determine which protocols to listen on based on +the protocols you define in your zones. This means that you could potentially +use both "TCP" and "UDP" on a single port. +Some cloud environments like "GCE" or "Azure container service" cannot +create external loadbalancers with both "TCP" and "UDP" protocols. So +When deploying CoreDNS with `serviceType="LoadBalancer"` on such cloud +environments, make sure you do not attempt to use both protocols at the same +time. + +## Autoscaling + +By setting `autoscaler.enabled = true` a +[cluster-proportional-autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler) +will be deployed. This will default to a coredns replica for every 256 cores, or +16 nodes in the cluster. These can be changed with `autoscaler.coresPerReplica` +and `autoscaler.nodesPerReplica`. When cluster is using large nodes (with more +cores), `coresPerReplica` should dominate. If using small nodes, +`nodesPerReplica` should dominate. + +This also creates a ServiceAccount, ClusterRole, and ClusterRoleBinding for +the autoscaler deployment. + +`replicaCount` is ignored if this is enabled. diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/NOTES.txt b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/NOTES.txt new file mode 100644 index 0000000..3a1883b --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/NOTES.txt @@ -0,0 +1,30 @@ +{{- if .Values.isClusterService }} +CoreDNS is now running in the cluster as a cluster-service. +{{- else }} +CoreDNS is now running in the cluster. +It can be accessed using the below endpoint +{{- if contains "NodePort" .Values.serviceType }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "coredns.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "$NODE_IP:$NODE_PORT" +{{- else if contains "LoadBalancer" .Values.serviceType }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status by running 'kubectl get svc -w {{ template "coredns.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "coredns.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo $SERVICE_IP +{{- else if contains "ClusterIP" .Values.serviceType }} + "{{ template "coredns.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local" + from within the cluster +{{- end }} +{{- end }} + +It can be tested with the following: + +1. Launch a Pod with DNS tools: + +kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools + +2. Query the DNS server: + +/ # host kubernetes diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/_helpers.tpl b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/_helpers.tpl new file mode 100644 index 0000000..cfdbef7 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/_helpers.tpl @@ -0,0 +1,158 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "coredns.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "coredns.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.servicePorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq .use_tcp true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- if index $innerdict "isudp" -}} + {{- printf "- {port: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- printf "- {port: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.containerPorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq .use_tcp true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- if index $innerdict "isudp" -}} + {{- printf "- {containerPort: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- printf "- {containerPort: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} + {{- end -}} + {{- end -}} +{{- end -}} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "coredns.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "coredns.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole-autoscaler.yaml new file mode 100644 index 0000000..b40bb0a --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole-autoscaler.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list","watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] +# Remove the configmaps rule once below issue is fixed: +# kubernetes-incubator/cluster-proportional-autoscaler#16 + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole.yaml new file mode 100644 index 0000000..4203a02 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole.yaml @@ -0,0 +1,38 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +{{- if .Values.rbac.pspEnable }} +- apiGroups: + - policy + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "coredns.fullname" . }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding-autoscaler.yaml new file mode 100644 index 0000000..d1ff736 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding-autoscaler.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }}-autoscaler +subjects: +- kind: ServiceAccount + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..7ae9d4f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "coredns.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap-autoscaler.yaml new file mode 100644 index 0000000..0712e0d --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap-autoscaler.yaml @@ -0,0 +1,34 @@ +{{- if .Values.autoscaler.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + {{- if .Values.customLabels }} + {{- toYaml .Values.customLabels | nindent 4 }} + {{- end }} + {{- if .Values.autoscaler.configmap.annotations }} + annotations: + {{- toYaml .Values.autoscaler.configmap.annotations | nindent 4 }} + {{- end }} +data: + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + linear: |- + { + "coresPerReplica": {{ .Values.autoscaler.coresPerReplica | float64 }}, + "nodesPerReplica": {{ .Values.autoscaler.nodesPerReplica | float64 }}, + "preventSinglePointFailure": true + } +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap.yaml new file mode 100644 index 0000000..b5069d3 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +data: + Corefile: |- + {{ range .Values.servers }} + {{- range $idx, $zone := .zones }}{{ if $idx }} {{ else }}{{ end }}{{ default "" $zone.scheme }}{{ default "." $zone.zone }}{{ else }}.{{ end -}} + {{- if .port }}:{{ .port }} {{ end -}} + { + {{- range .plugins }} + {{ .name }} {{ if .parameters }} {{if eq .name "kubernetes" }} {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDomain }} {{ end }} {{.parameters}}{{ end }}{{ if .configBlock }} { +{{ .configBlock | indent 12 }} + }{{ end }} + {{- end }} + } + {{ end }} + {{- range .Values.zoneFiles }} + {{ .filename }}: {{ toYaml .contents | indent 4 }} + {{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment-autoscaler.yaml new file mode 100644 index 0000000..6ddd209 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment-autoscaler.yaml @@ -0,0 +1,77 @@ +{{- if .Values.autoscaler.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + template: + metadata: + labels: + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.customLabels }} + {{ toYaml .Values.customLabels | nindent 8 }} + {{- end }} + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/configmap-autoscaler.yaml") . | sha256sum }} + {{- if .Values.isClusterService }} + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' + {{- end }} + spec: + serviceAccountName: {{ template "coredns.fullname" . }}-autoscaler + {{- $priorityClassName := default .Values.priorityClassName .Values.autoscaler.priorityClassName }} + {{- if $priorityClassName }} + priorityClassName: {{ $priorityClassName | quote }} + {{- end }} + {{- if .Values.autoscaler.affinity }} + affinity: +{{ toYaml .Values.autoscaler.affinity | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.tolerations }} + tolerations: +{{ toYaml .Values.autoscaler.tolerations | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.nodeSelector }} + nodeSelector: +{{ toYaml .Values.autoscaler.nodeSelector | indent 8 }} + {{- end }} + containers: + - name: autoscaler + image: {{ template "system_default_registry" . }}{{ .Values.autoscaler.image.repository }}:{{ .Values.autoscaler.image.tag }} + imagePullPolicy: {{ .Values.autoscaler.image.pullPolicy }} + resources: +{{ toYaml .Values.autoscaler.resources | indent 10 }} + command: + - /cluster-proportional-autoscaler + - --namespace={{ .Release.Namespace }} + - --configmap={{ template "coredns.fullname" . }}-autoscaler + - --target=Deployment/{{ template "coredns.fullname" . }} + - --logtostderr=true + - --v=2 +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment.yaml new file mode 100644 index 0000000..0ed3c52 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment.yaml @@ -0,0 +1,127 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + {{- if not .Values.autoscaler.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 10% + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + template: + metadata: + labels: + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- if .Values.isClusterService }} + scheduler.alpha.kubernetes.io/critical-pod: '' + {{- end }} + spec: + serviceAccountName: {{ template "coredns.serviceAccountName" . }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.isClusterService }} + dnsPolicy: Default + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if or (.Values.isClusterService) (.Values.tolerations) }} + tolerations: + {{- if .Values.isClusterService }} + - key: CriticalAddonsOnly + operator: Exists + {{- end }} + {{- if .Values.tolerations }} +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + containers: + - name: "coredns" + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns +{{- range .Values.extraSecrets }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: true +{{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + ports: +{{ include "coredns.containerPorts" . | indent 8 }} + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + volumes: + - name: config-volume + configMap: + name: {{ template "coredns.fullname" . }} + items: + - key: Corefile + path: Corefile + {{ range .Values.zoneFiles }} + - key: {{ .filename }} + path: {{ .filename }} + {{ end }} +{{- range .Values.extraSecrets }} + - name: {{ .name }} + secret: + secretName: {{ .name }} + defaultMode: 400 +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/poddisruptionbudget.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..1fee2de --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/poddisruptionbudget.yaml @@ -0,0 +1,28 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/podsecuritypolicy.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..4e7a36f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/podsecuritypolicy.yaml @@ -0,0 +1,57 @@ +{{- if .Values.rbac.pspEnable }} +{{ if .Capabilities.APIVersions.Has "policy/v1beta1" }} +apiVersion: policy/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: PodSecurityPolicy +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- else }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- end }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # Add back CAP_NET_BIND_SERVICE so that coredns can run on port 53 + allowedCapabilities: + - CAP_NET_BIND_SERVICE + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service-metrics.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service-metrics.yaml new file mode 100644 index 0000000..1657cd7 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service-metrics.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "coredns.fullname" . }}-metrics + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + selector: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + ports: + - name: metrics + port: 9153 + targetPort: 9153 +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service.yaml new file mode 100644 index 0000000..95c858f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + selector: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{ else }} + clusterIP: {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDNS }} + {{- end }} + {{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} + {{- end }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: +{{ include "coredns.servicePorts" . | indent 2 -}} + type: {{ default "ClusterIP" .Values.serviceType }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount-autoscaler.yaml new file mode 100644 index 0000000..1b218d2 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount-autoscaler.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount.yaml new file mode 100644 index 0000000..23f29a1 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.serviceAccountName" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/servicemonitor.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/servicemonitor.yaml new file mode 100644 index 0000000..ca0b691 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "coredns.fullname" . }} + {{- if .Values.prometheus.monitor.namespace }} + namespace: {{ .Values.prometheus.monitor.namespace }} + {{- end }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics + endpoints: + - port: metrics +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/values.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/values.yaml new file mode 100644 index 0000000..828589e --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/values.yaml @@ -0,0 +1,202 @@ +# Default values for coredns. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: rancher/hardened-coredns + tag: "v1.6.9" + pullPolicy: IfNotPresent + +replicaCount: 1 + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +serviceType: "ClusterIP" + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + +service: +# clusterIP: "" +# loadBalancerIP: "" +# externalTrafficPolicy: "" + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9153" + +serviceAccount: + create: true + # The name of the ServiceAccount to use + # If not set and create is true, a name is generated using the fullname template + name: coredns + +rbac: + # If true, create & use RBAC resources + create: true + # If true, create and use PodSecurityPolicy + pspEnable: false + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + # name: + +# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app. +isClusterService: true + +# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set. +priorityClassName: "system-cluster-critical" + +# Default zone is what Kubernetes recommends: +# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options +servers: +- zones: + - zone: . + port: 53 + plugins: + - name: errors + # Serves a /health endpoint on :8080, required for livenessProbe + - name: health + configBlock: |- + lameduck 5s + # Serves a /ready endpoint on :8181, required for readinessProbe + - name: ready + # Required to query kubernetes API for data + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + # Serves a /metrics endpoint on :9153, required for serviceMonitor + - name: prometheus + parameters: 0.0.0.0:9153 + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + +# Complete example with all the options: +# - zones: # the `zones` block can be left out entirely, defaults to "." +# - zone: hello.world. # optional, defaults to "." +# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS) +# - zone: foo.bar. +# scheme: dns:// +# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol +# # Note that this will not work if you are also exposing tls or grpc on the same server +# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS) +# plugins: # the plugins to use for this server block +# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it! +# parameters: foo bar # list of parameters after the plugin +# configBlock: |- # if the plugin supports extra block style config, supply it here +# hello world +# foo bar + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core +# for example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core +# for example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] + +# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +podDisruptionBudget: {} + +# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/ +zoneFiles: [] +# - filename: example.db +# domain: example.com +# contents: | +# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600 +# example.com. IN NS b.iana-servers.net. +# example.com. IN NS a.iana-servers.net. +# example.com. IN A 192.168.99.102 +# *.example.com. IN A 192.168.99.102 + +# optional array of secrets to mount inside coredns container +# possible usecase: need for secure connection with etcd backend +extraSecrets: [] +# - name: etcd-client-certs +# mountPath: /etc/coredns/tls/etcd +# - name: some-fancy-secret +# mountPath: /etc/wherever + +# Custom labels to apply to Deployment, Pod, Service, ServiceMonitor. Including autoscaler if enabled. +customLabels: {} + +## Configue a cluster-proportional-autoscaler for coredns +# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler +autoscaler: + # Enabled the cluster-proportional-autoscaler + enabled: false + + # Number of cores in the cluster per coredns replica + coresPerReplica: 256 + # Number of nodes in the cluster per coredns replica + nodesPerReplica: 16 + + image: + repository: k8s.gcr.io/cluster-proportional-autoscaler-amd64 + tag: "1.7.1" + pullPolicy: IfNotPresent + + # Optional priority class to be used for the autoscaler pods. priorityClassName used if not set. + priorityClassName: "" + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core + affinity: {} + + # Node labels for pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core + tolerations: [] + + # resources for autoscaler pod + resources: + requests: + cpu: "20m" + memory: "10Mi" + limits: + cpu: "20m" + memory: "10Mi" + + # Options for autoscaler configmap + configmap: + ## Annotations for the coredns-autoscaler configmap + # i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed + annotations: {} +k8sApp : "kube-dns" + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/.helmignore b/charts/rke2-coredns/rke2-coredns/1.13.800/.helmignore new file mode 100644 index 0000000..7c04072 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/Chart.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/Chart.yaml new file mode 100644 index 0000000..ea1b23c --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +appVersion: 1.7.1 +description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS + Services +home: https://coredns.io +icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png +keywords: +- coredns +- dns +- kubedns +name: rke2-coredns +sources: +- https://github.com/coredns/coredns +version: 1.13.800 diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/README.md b/charts/rke2-coredns/rke2-coredns/1.13.800/README.md new file mode 100644 index 0000000..9d9ad64 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/README.md @@ -0,0 +1,169 @@ +# ⚠️ Repo Archive Notice + +As of Nov 13, 2020, charts in this repo will no longer be updated. +For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/). + +# CoreDNS + +[CoreDNS](https://coredns.io/) is a DNS server that chains plugins and provides DNS Services + +## DEPRECATION NOTICE + +This chart is deprecated and no longer supported. + +# TL;DR; + +```console +$ helm install --name coredns --namespace=kube-system stable/coredns +``` + +## Introduction + +This chart bootstraps a [CoreDNS](https://github.com/coredns/coredns) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. This chart will provide DNS Services and can be deployed in multiple configuration to support various scenarios listed below: + + - CoreDNS as a cluster dns service and a drop-in replacement for Kube/SkyDNS. This is the default mode and CoreDNS is deployed as cluster-service in kube-system namespace. This mode is chosen by setting `isClusterService` to true. + - CoreDNS as an external dns service. In this mode CoreDNS is deployed as any kubernetes app in user specified namespace. The CoreDNS service can be exposed outside the cluster by using using either the NodePort or LoadBalancer type of service. This mode is chosen by setting `isClusterService` to false. + - CoreDNS as an external dns provider for kubernetes federation. This is a sub case of 'external dns service' which uses etcd plugin for CoreDNS backend. This deployment mode as a dependency on `etcd-operator` chart, which needs to be pre-installed. + +## Prerequisites + +- Kubernetes 1.10 or later + +## Installing the Chart + +The chart can be installed as follows: + +```console +$ helm install --name coredns --namespace=kube-system stable/coredns +``` + +The command deploys CoreDNS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists various ways to override default configuration during deployment. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete coredns +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +|:----------------------------------------|:--------------------------------------------------------------------------------------|:------------------------------------------------------------| +| `image.repository` | The image repository to pull from | coredns/coredns | +| `image.tag` | The image tag to pull from | `v1.7.1` | +| `image.pullPolicy` | Image pull policy | IfNotPresent | +| `replicaCount` | Number of replicas | 1 | +| `resources.limits.cpu` | Container maximum CPU | `100m` | +| `resources.limits.memory` | Container maximum memory | `128Mi` | +| `resources.requests.cpu` | Container requested CPU | `100m` | +| `resources.requests.memory` | Container requested memory | `128Mi` | +| `serviceType` | Kubernetes Service type | `ClusterIP` | +| `prometheus.service.enabled` | Set this to `true` to create Service for Prometheus metrics | `false` | +| `prometheus.service.annotations` | Annotations to add to the metrics Service | `{prometheus.io/scrape: "true", prometheus.io/port: "9153"}`| +| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | {} | +| `prometheus.monitor.namespace` | Selector to select which namespaces the Endpoints objects are discovered from. | `""` | +| `service.clusterIP` | IP address to assign to service | `""` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` | +| `service.externalIPs` | External IP addresses | [] | +| `service.externalTrafficPolicy` | Enable client source IP preservation | [] | +| `service.annotations` | Annotations to add to service | {} | +| `serviceAccount.create` | If true, create & use serviceAccount | false | +| `serviceAccount.name` | If not set & create is true, use template fullname | | +| `rbac.create` | If true, create & use RBAC resources | true | +| `rbac.pspEnable` | Specifies whether a PodSecurityPolicy should be created. | `false` | +| `isClusterService` | Specifies whether chart should be deployed as cluster-service or normal k8s app. | true | +| `priorityClassName` | Name of Priority Class to assign pods | `""` | +| `servers` | Configuration for CoreDNS and plugins | See values.yml | +| `affinity` | Affinity settings for pod assignment | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `tolerations` | Tolerations for pod assignment | [] | +| `zoneFiles` | Configure custom Zone files | [] | +| `extraVolumes` | Optional array of volumes to create | [] | +| `extraVolumeMounts` | Optional array of volumes to mount inside the CoreDNS container | [] | +| `extraSecrets` | Optional array of secrets to mount inside the CoreDNS container | [] | +| `customLabels` | Optional labels for Deployment(s), Pod, Service, ServiceMonitor objects | {} | +| `rollingUpdate.maxUnavailable` | Maximum number of unavailable replicas during rolling update | `1` | +| `rollingUpdate.maxSurge` | Maximum number of pods created above desired number of pods | `25%` | +| `podDisruptionBudget` | Optional PodDisruptionBudget | {} | +| `podAnnotations` | Optional Pod only Annotations | {} | +| `terminationGracePeriodSeconds` | Optional duration in seconds the pod needs to terminate gracefully. | 30 | +| `preStopSleep` | Definition of Kubernetes preStop hook executed before Pod termination | {} | +| `hpa.enabled` | Enable Hpa autoscaler instead of proportional one | `false` | +| `hpa.minReplicas` | Hpa minimum number of CoreDNS replicas | `1` | +| `hpa.maxReplicas` | Hpa maximum number of CoreDNS replicas | `2` | +| `hpa.metrics` | Metrics definitions used by Hpa to scale up and down | {} | +| `autoscaler.enabled` | Optionally enabled a cluster-proportional-autoscaler for CoreDNS | `false` | +| `autoscaler.coresPerReplica` | Number of cores in the cluster per CoreDNS replica | `256` | +| `autoscaler.nodesPerReplica` | Number of nodes in the cluster per CoreDNS replica | `16` | +| `autoscaler.min` | Min size of replicaCount | 0 | +| `autoscaler.max` | Max size of replicaCount | 0 (aka no max) | +| `autoscaler.includeUnschedulableNodes` | Should the replicas scale based on the total number or only schedulable nodes | `false` | +| `autoscaler.preventSinglePointFailure` | If true does not allow single points of failure to form | `true` | +| `autoscaler.image.repository` | The image repository to pull autoscaler from | k8s.gcr.io/cluster-proportional-autoscaler-amd64 | +| `autoscaler.image.tag` | The image tag to pull autoscaler from | `1.7.1` | +| `autoscaler.image.pullPolicy` | Image pull policy for the autoscaler | IfNotPresent | +| `autoscaler.priorityClassName` | Optional priority class for the autoscaler pod. `priorityClassName` used if not set. | `""` | +| `autoscaler.affinity` | Affinity settings for pod assignment for autoscaler | {} | +| `autoscaler.nodeSelector` | Node labels for pod assignment for autoscaler | {} | +| `autoscaler.tolerations` | Tolerations for pod assignment for autoscaler | [] | +| `autoscaler.resources.limits.cpu` | Container maximum CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.limits.memory` | Container maximum memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.resources.requests.cpu` | Container requested CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.requests.memory` | Container requested memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.configmap.annotations` | Annotations to add to autoscaler config map. For example to stop CI renaming them | {} | + +See `values.yaml` for configuration notes. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install --name coredns \ + --set rbac.create=false \ + stable/coredns +``` + +The above command disables automatic creation of RBAC rules. + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install --name coredns -f values.yaml stable/coredns +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + + +## Caveats + +The chart will automatically determine which protocols to listen on based on +the protocols you define in your zones. This means that you could potentially +use both "TCP" and "UDP" on a single port. +Some cloud environments like "GCE" or "Azure container service" cannot +create external loadbalancers with both "TCP" and "UDP" protocols. So +When deploying CoreDNS with `serviceType="LoadBalancer"` on such cloud +environments, make sure you do not attempt to use both protocols at the same +time. + +## Autoscaling + +By setting `autoscaler.enabled = true` a +[cluster-proportional-autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler) +will be deployed. This will default to a coredns replica for every 256 cores, or +16 nodes in the cluster. These can be changed with `autoscaler.coresPerReplica` +and `autoscaler.nodesPerReplica`. When cluster is using large nodes (with more +cores), `coresPerReplica` should dominate. If using small nodes, +`nodesPerReplica` should dominate. + +This also creates a ServiceAccount, ClusterRole, and ClusterRoleBinding for +the autoscaler deployment. + +`replicaCount` is ignored if this is enabled. + +By setting `hpa.enabled = true` a [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) +is enabled for Coredns deployment. This can scale number of replicas based on meitrics +like CpuUtilization, MemoryUtilization or Custom ones. diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/NOTES.txt b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/NOTES.txt new file mode 100644 index 0000000..3a1883b --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/NOTES.txt @@ -0,0 +1,30 @@ +{{- if .Values.isClusterService }} +CoreDNS is now running in the cluster as a cluster-service. +{{- else }} +CoreDNS is now running in the cluster. +It can be accessed using the below endpoint +{{- if contains "NodePort" .Values.serviceType }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "coredns.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "$NODE_IP:$NODE_PORT" +{{- else if contains "LoadBalancer" .Values.serviceType }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status by running 'kubectl get svc -w {{ template "coredns.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "coredns.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo $SERVICE_IP +{{- else if contains "ClusterIP" .Values.serviceType }} + "{{ template "coredns.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local" + from within the cluster +{{- end }} +{{- end }} + +It can be tested with the following: + +1. Launch a Pod with DNS tools: + +kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools + +2. Query the DNS server: + +/ # host kubernetes diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/_helpers.tpl b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/_helpers.tpl new file mode 100644 index 0000000..6b089e7 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/_helpers.tpl @@ -0,0 +1,158 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "coredns.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "coredns.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.servicePorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq (default false .use_tcp) true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- if index $innerdict "isudp" -}} + {{- printf "- {port: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- printf "- {port: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.containerPorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq (default false .use_tcp) true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- if index $innerdict "isudp" -}} + {{- printf "- {containerPort: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- printf "- {containerPort: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} + {{- end -}} + {{- end -}} +{{- end -}} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "coredns.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "coredns.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole-autoscaler.yaml new file mode 100644 index 0000000..b40bb0a --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole-autoscaler.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list","watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] +# Remove the configmaps rule once below issue is fixed: +# kubernetes-incubator/cluster-proportional-autoscaler#16 + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole.yaml new file mode 100644 index 0000000..4203a02 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole.yaml @@ -0,0 +1,38 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +{{- if .Values.rbac.pspEnable }} +- apiGroups: + - policy + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "coredns.fullname" . }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding-autoscaler.yaml new file mode 100644 index 0000000..d1ff736 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding-autoscaler.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }}-autoscaler +subjects: +- kind: ServiceAccount + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..7ae9d4f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "coredns.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap-autoscaler.yaml new file mode 100644 index 0000000..608a0b7 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap-autoscaler.yaml @@ -0,0 +1,37 @@ +{{- if .Values.autoscaler.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + {{- if .Values.customLabels }} + {{- toYaml .Values.customLabels | nindent 4 }} + {{- end }} + {{- if .Values.autoscaler.configmap.annotations }} + annotations: + {{- toYaml .Values.autoscaler.configmap.annotations | nindent 4 }} + {{- end }} +data: + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + linear: |- + { + "coresPerReplica": {{ .Values.autoscaler.coresPerReplica | float64 }}, + "nodesPerReplica": {{ .Values.autoscaler.nodesPerReplica | float64 }}, + "preventSinglePointFailure": {{ .Values.autoscaler.preventSinglePointFailure }}, + "min": {{ .Values.autoscaler.min | int }}, + "max": {{ .Values.autoscaler.max | int }}, + "includeUnschedulableNodes": {{ .Values.autoscaler.includeUnschedulableNodes }} + } +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap.yaml new file mode 100644 index 0000000..b5069d3 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +data: + Corefile: |- + {{ range .Values.servers }} + {{- range $idx, $zone := .zones }}{{ if $idx }} {{ else }}{{ end }}{{ default "" $zone.scheme }}{{ default "." $zone.zone }}{{ else }}.{{ end -}} + {{- if .port }}:{{ .port }} {{ end -}} + { + {{- range .plugins }} + {{ .name }} {{ if .parameters }} {{if eq .name "kubernetes" }} {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDomain }} {{ end }} {{.parameters}}{{ end }}{{ if .configBlock }} { +{{ .configBlock | indent 12 }} + }{{ end }} + {{- end }} + } + {{ end }} + {{- range .Values.zoneFiles }} + {{ .filename }}: {{ toYaml .contents | indent 4 }} + {{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment-autoscaler.yaml new file mode 100644 index 0000000..8461532 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment-autoscaler.yaml @@ -0,0 +1,77 @@ +{{- if and (.Values.autoscaler.enabled) (not .Values.hpa.enabled) }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + template: + metadata: + labels: + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.customLabels }} + {{ toYaml .Values.customLabels | nindent 8 }} + {{- end }} + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/configmap-autoscaler.yaml") . | sha256sum }} + {{- if .Values.isClusterService }} + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' + {{- end }} + spec: + serviceAccountName: {{ template "coredns.fullname" . }}-autoscaler + {{- $priorityClassName := default .Values.priorityClassName .Values.autoscaler.priorityClassName }} + {{- if $priorityClassName }} + priorityClassName: {{ $priorityClassName | quote }} + {{- end }} + {{- if .Values.autoscaler.affinity }} + affinity: +{{ toYaml .Values.autoscaler.affinity | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.tolerations }} + tolerations: +{{ toYaml .Values.autoscaler.tolerations | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.nodeSelector }} + nodeSelector: +{{ toYaml .Values.autoscaler.nodeSelector | indent 8 }} + {{- end }} + containers: + - name: autoscaler + image: {{ template "system_default_registry" . }}{{ .Values.autoscaler.image.repository }}:{{ .Values.autoscaler.image.tag }} + imagePullPolicy: {{ .Values.autoscaler.image.pullPolicy }} + resources: +{{ toYaml .Values.autoscaler.resources | indent 10 }} + command: + - /cluster-proportional-autoscaler + - --namespace={{ .Release.Namespace }} + - --configmap={{ template "coredns.fullname" . }}-autoscaler + - --target=Deployment/{{ template "coredns.fullname" . }} + - --logtostderr=true + - --v=2 +{{- end }} diff --git a/charts/rke2-coredns/templates/deployment.yaml.orig b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment.yaml similarity index 93% rename from charts/rke2-coredns/templates/deployment.yaml.orig rename to charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment.yaml index be11dcd..e67dd15 100644 --- a/charts/rke2-coredns/templates/deployment.yaml.orig +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment.yaml @@ -7,7 +7,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name | quote }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" {{- if .Values.isClusterService }} - k8s-app: {{ .Chart.Name | quote }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" {{- end }} @@ -28,14 +28,14 @@ spec: matchLabels: app.kubernetes.io/instance: {{ .Release.Name | quote }} {{- if .Values.isClusterService }} - k8s-app: {{ .Chart.Name | quote }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} {{- end }} app.kubernetes.io/name: {{ template "coredns.name" . }} template: metadata: labels: {{- if .Values.isClusterService }} - k8s-app: {{ .Chart.Name | quote }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} {{- end }} app.kubernetes.io/name: {{ template "coredns.name" . }} app.kubernetes.io/instance: {{ .Release.Name | quote }} @@ -76,7 +76,7 @@ spec: {{- end }} containers: - name: "coredns" - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: diff --git a/charts/rke2-coredns/templates/hpa.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/hpa.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/hpa.yaml rename to charts/rke2-coredns/rke2-coredns/1.13.800/templates/hpa.yaml diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/poddisruptionbudget.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..1fee2de --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/poddisruptionbudget.yaml @@ -0,0 +1,28 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/podsecuritypolicy.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..4e7a36f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/podsecuritypolicy.yaml @@ -0,0 +1,57 @@ +{{- if .Values.rbac.pspEnable }} +{{ if .Capabilities.APIVersions.Has "policy/v1beta1" }} +apiVersion: policy/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: PodSecurityPolicy +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- else }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- end }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # Add back CAP_NET_BIND_SERVICE so that coredns can run on port 53 + allowedCapabilities: + - CAP_NET_BIND_SERVICE + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/service-metrics.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/service-metrics.yaml new file mode 100644 index 0000000..0f99adf --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/service-metrics.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheus.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "coredns.fullname" . }}-metrics + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: +{{ toYaml .Values.prometheus.service.annotations | indent 4 }} +spec: + selector: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + ports: + - name: metrics + port: 9153 + targetPort: 9153 +{{- end }} diff --git a/charts/rke2-coredns/templates/service.yaml.orig b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/service.yaml similarity index 85% rename from charts/rke2-coredns/templates/service.yaml.orig rename to charts/rke2-coredns/rke2-coredns/1.13.800/templates/service.yaml index 0ca5edf..d7124ac 100644 --- a/charts/rke2-coredns/templates/service.yaml.orig +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/service.yaml @@ -7,7 +7,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name | quote }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" {{- if .Values.isClusterService }} - k8s-app: {{ .Chart.Name | quote }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" {{- end }} @@ -21,11 +21,13 @@ spec: selector: app.kubernetes.io/instance: {{ .Release.Name | quote }} {{- if .Values.isClusterService }} - k8s-app: {{ .Chart.Name | quote }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} {{- end }} app.kubernetes.io/name: {{ template "coredns.name" . }} {{- if .Values.service.clusterIP }} clusterIP: {{ .Values.service.clusterIP }} + {{ else }} + clusterIP: {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDNS }} {{- end }} {{- if .Values.service.externalIPs }} externalIPs: diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount-autoscaler.yaml new file mode 100644 index 0000000..1b218d2 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount-autoscaler.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount.yaml new file mode 100644 index 0000000..23f29a1 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.serviceAccountName" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/servicemonitor.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/servicemonitor.yaml new file mode 100644 index 0000000..ca0b691 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "coredns.fullname" . }} + {{- if .Values.prometheus.monitor.namespace }} + namespace: {{ .Values.prometheus.monitor.namespace }} + {{- end }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics + endpoints: + - port: metrics +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/values.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/values.yaml new file mode 100644 index 0000000..49a1e8b --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/values.yaml @@ -0,0 +1,259 @@ +# Default values for coredns. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: rancher/hardened-coredns + tag: "v1.7.1" + pullPolicy: IfNotPresent + +replicaCount: 1 + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +## Create HorizontalPodAutoscaler object. +## +# autoscaling: +# minReplicas: 1 +# maxReplicas: 10 +# metrics: +# - type: Resource +# resource: +# name: cpu +# targetAverageUtilization: 60 +# - type: Resource +# resource: +# name: memory +# targetAverageUtilization: 60 + +rollingUpdate: + maxUnavailable: 1 + maxSurge: 25% + +# Under heavy load it takes more that standard time to remove Pod endpoint from a cluster. +# This will delay termination of our pod by `preStopSleep`. To make sure kube-proxy has +# enough time to catch up. +# preStopSleep: 5 +terminationGracePeriodSeconds: 30 + +podAnnotations: {} +# cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + +serviceType: "ClusterIP" + +prometheus: + service: + enabled: false + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9153" + monitor: + enabled: false + additionalLabels: {} + namespace: "" + +service: +# clusterIP: "" +# loadBalancerIP: "" +# externalIPs: [] +# externalTrafficPolicy: "" + annotations: {} + +serviceAccount: + create: true + # The name of the ServiceAccount to use + # If not set and create is true, a name is generated using the fullname template + name: coredns + +rbac: + # If true, create & use RBAC resources + create: true + # If true, create and use PodSecurityPolicy + pspEnable: false + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + # name: + +# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app. +isClusterService: true + +# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set. +priorityClassName: "system-cluster-critical" + +# Default zone is what Kubernetes recommends: +# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options +servers: +- zones: + - zone: . + port: 53 + plugins: + - name: errors + # Serves a /health endpoint on :8080, required for livenessProbe + - name: health + configBlock: |- + lameduck 5s + # Serves a /ready endpoint on :8181, required for readinessProbe + - name: ready + # Required to query kubernetes API for data + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + # Serves a /metrics endpoint on :9153, required for serviceMonitor + - name: prometheus + parameters: 0.0.0.0:9153 + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + +# Complete example with all the options: +# - zones: # the `zones` block can be left out entirely, defaults to "." +# - zone: hello.world. # optional, defaults to "." +# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS) +# - zone: foo.bar. +# scheme: dns:// +# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol +# # Note that this will not work if you are also exposing tls or grpc on the same server +# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS) +# plugins: # the plugins to use for this server block +# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it! +# parameters: foo bar # list of parameters after the plugin +# configBlock: |- # if the plugin supports extra block style config, supply it here +# hello world +# foo bar + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core +# for example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core +# for example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] + +# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +podDisruptionBudget: {} + +# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/ +zoneFiles: [] +# - filename: example.db +# domain: example.com +# contents: | +# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600 +# example.com. IN NS b.iana-servers.net. +# example.com. IN NS a.iana-servers.net. +# example.com. IN A 192.168.99.102 +# *.example.com. IN A 192.168.99.102 + +# optional array of extra volumes to create +extraVolumes: [] +# - name: some-volume-name +# emptyDir: {} +# optional array of mount points for extraVolumes +extraVolumeMounts: [] +# - name: some-volume-name +# mountPath: /etc/wherever + +# optional array of secrets to mount inside coredns container +# possible usecase: need for secure connection with etcd backend +extraSecrets: [] +# - name: etcd-client-certs +# mountPath: /etc/coredns/tls/etcd +# - name: some-fancy-secret +# mountPath: /etc/wherever + +# Custom labels to apply to Deployment, Pod, Service, ServiceMonitor. Including autoscaler if enabled. +customLabels: {} + +## Alternative configuration for HPA deployment if wanted +# +hpa: + enabled: false + minReplicas: 1 + maxReplicas: 2 + metrics: {} + +## Configue a cluster-proportional-autoscaler for coredns +# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler +autoscaler: + # Enabled the cluster-proportional-autoscaler + enabled: false + + # Number of cores in the cluster per coredns replica + coresPerReplica: 256 + # Number of nodes in the cluster per coredns replica + nodesPerReplica: 16 + # Min size of replicaCount + min: 0 + # Max size of replicaCount (default of 0 is no max) + max: 0 + # Whether to include unschedulable nodes in the nodes/cores calculations - this requires version 1.8.0+ of the autoscaler + includeUnschedulableNodes: false + # If true does not allow single points of failure to form + preventSinglePointFailure: true + + image: + repository: k8s.gcr.io/cluster-proportional-autoscaler-amd64 + tag: "1.8.0" + pullPolicy: IfNotPresent + + # Optional priority class to be used for the autoscaler pods. priorityClassName used if not set. + priorityClassName: "" + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core + affinity: {} + + # Node labels for pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core + tolerations: [] + + # resources for autoscaler pod + resources: + requests: + cpu: "20m" + memory: "10Mi" + limits: + cpu: "20m" + memory: "10Mi" + + # Options for autoscaler configmap + configmap: + ## Annotations for the coredns-autoscaler configmap + # i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed + annotations: {} +k8sApp : "kube-dns" + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/.helmignore b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/Chart.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/Chart.yaml new file mode 100644 index 0000000..45c8c14 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +appVersion: 0.30.0 +description: An nginx Ingress controller that uses ConfigMap to store the nginx configuration. +home: https://github.com/kubernetes/ingress-nginx +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +kubeVersion: '>=1.10.0-0' +maintainers: +- name: ChiefAlexander +- email: Trevor.G.Wood@gmail.com + name: taharah +name: rke2-ingress-nginx +sources: +- https://github.com/kubernetes/ingress-nginx +version: 1.36.300 diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/OWNERS b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/OWNERS new file mode 100644 index 0000000..0001de3 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/OWNERS @@ -0,0 +1,6 @@ +approvers: +- ChiefAlexander +- taharah +reviewers: +- ChiefAlexander +- taharah diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/README.md b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/README.md new file mode 100644 index 0000000..87dfdb4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/README.md @@ -0,0 +1,361 @@ +# nginx-ingress + +[nginx-ingress](https://github.com/kubernetes/ingress-nginx) is an Ingress controller that uses ConfigMap to store the nginx configuration. + +To use, add the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +## TL;DR; + +```console +$ helm install stable/nginx-ingress +``` + +## Introduction + +This chart bootstraps an nginx-ingress deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + + - Kubernetes 1.6+ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/nginx-ingress +``` + +The command deploys nginx-ingress on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the nginx-ingress chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`controller.name` | name of the controller component | `controller` +`controller.image.repository` | controller container image repository | `quay.io/kubernetes-ingress-controller/nginx-ingress-controller` +`controller.image.tag` | controller container image tag | `0.30.0` +`controller.image.pullPolicy` | controller container image pull policy | `IfNotPresent` +`controller.image.runAsUser` | User ID of the controller process. Value depends on the Linux distribution used inside of the container image. | `101` +`controller.useComponentLabel` | Wether to add component label so the HPA can work separately for controller and defaultBackend. *Note: don't change this if you have an already running deployment as it will need the recreation of the controller deployment* | `false` +`controller.containerPort.http` | The port that the controller container listens on for http connections. | `80` +`controller.containerPort.https` | The port that the controller container listens on for https connections. | `443` +`controller.config` | nginx [ConfigMap](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md) entries | none +`controller.hostNetwork` | If the nginx deployment / daemonset should run on the host's network namespace. Do not set this when `controller.service.externalIPs` is set and `kube-proxy` is used as there will be a port-conflict for port `80` | false +`controller.defaultBackendService` | default 404 backend service; needed only if `defaultBackend.enabled = false` and version < 0.21.0| `""` +`controller.dnsPolicy` | If using `hostNetwork=true`, change to `ClusterFirstWithHostNet`. See [pod's dns policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) for details | `ClusterFirst` +`controller.dnsConfig` | custom pod dnsConfig. See [pod's dns config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-config) for details | `{}` +`controller.reportNodeInternalIp` | If using `hostNetwork=true`, setting `reportNodeInternalIp=true`, will pass the flag `report-node-internal-ip-address` to nginx-ingress. This sets the status of all Ingress objects to the internal IP address of all nodes running the NGINX Ingress controller. +`controller.electionID` | election ID to use for the status update | `ingress-controller-leader` +`controller.extraEnvs` | any additional environment variables to set in the pods | `{}` +`controller.extraContainers` | Sidecar containers to add to the controller pod. See [LemonLDAP::NG controller](https://github.com/lemonldap-ng-controller/lemonldap-ng-controller) as example | `{}` +`controller.extraVolumeMounts` | Additional volumeMounts to the controller main container | `{}` +`controller.extraVolumes` | Additional volumes to the controller pod | `{}` +`controller.extraInitContainers` | Containers, which are run before the app containers are started | `[]` +`controller.ingressClass` | name of the ingress class to route through this controller | `nginx` +`controller.maxmindLicenseKey` | Maxmind license key to download GeoLite2 Databases. See [Accessing and using GeoLite2 database](https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases/) | `""` +`controller.scope.enabled` | limit the scope of the ingress controller | `false` (watch all namespaces) +`controller.scope.namespace` | namespace to watch for ingress | `""` (use the release namespace) +`controller.extraArgs` | Additional controller container arguments | `{}` +`controller.kind` | install as Deployment, DaemonSet or Both | `Deployment` +`controller.deploymentAnnotations` | annotations to be added to deployment | `{}` +`controller.autoscaling.enabled` | If true, creates Horizontal Pod Autoscaler | false +`controller.autoscaling.minReplicas` | If autoscaling enabled, this field sets minimum replica count | `2` +`controller.autoscaling.maxReplicas` | If autoscaling enabled, this field sets maximum replica count | `11` +`controller.autoscaling.targetCPUUtilizationPercentage` | Target CPU utilization percentage to scale | `"50"` +`controller.autoscaling.targetMemoryUtilizationPercentage` | Target memory utilization percentage to scale | `"50"` +`controller.daemonset.useHostPort` | If `controller.kind` is `DaemonSet`, this will enable `hostPort` for TCP/80 and TCP/443 | false +`controller.daemonset.hostPorts.http` | If `controller.daemonset.useHostPort` is `true` and this is non-empty, it sets the hostPort | `"80"` +`controller.daemonset.hostPorts.https` | If `controller.daemonset.useHostPort` is `true` and this is non-empty, it sets the hostPort | `"443"` +`controller.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`controller.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`controller.terminationGracePeriodSeconds` | how many seconds to wait before terminating a pod | `60` +`controller.minReadySeconds` | how many seconds a pod needs to be ready before killing the next, during update | `0` +`controller.nodeSelector` | node labels for pod assignment | `{}` +`controller.podAnnotations` | annotations to be added to pods | `{}` +`controller.deploymentLabels` | labels to add to the deployment metadata | `{}` +`controller.podLabels` | labels to add to the pod container metadata | `{}` +`controller.podSecurityContext` | Security context policies to add to the controller pod | `{}` +`controller.replicaCount` | desired number of controller pods | `1` +`controller.minAvailable` | minimum number of available controller pods for PodDisruptionBudget | `1` +`controller.resources` | controller pod resource requests & limits | `{}` +`controller.priorityClassName` | controller priorityClassName | `nil` +`controller.lifecycle` | controller pod lifecycle hooks | `{}` +`controller.service.annotations` | annotations for controller service | `{}` +`controller.service.labels` | labels for controller service | `{}` +`controller.publishService.enabled` | if true, the controller will set the endpoint records on the ingress objects to reflect those on the service | `false` +`controller.publishService.pathOverride` | override of the default publish-service name | `""` +`controller.service.enabled` | if disabled no service will be created. This is especially useful when `controller.kind` is set to `DaemonSet` and `controller.daemonset.useHostPorts` is `true` | true +`controller.service.clusterIP` | internal controller cluster service IP (set to `"-"` to pass an empty value) | `nil` +`controller.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the controller service | `false` +`controller.service.externalIPs` | controller service external IP addresses. Do not set this when `controller.hostNetwork` is set to `true` and `kube-proxy` is used as there will be a port-conflict for port `80` | `[]` +`controller.service.externalTrafficPolicy` | If `controller.service.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable [source IP preservation](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport) | `"Cluster"` +`controller.service.sessionAffinity` | Enables client IP based session affinity. Must be `ClientIP` or `None` if set. | `""` +`controller.service.healthCheckNodePort` | If `controller.service.type` is `NodePort` or `LoadBalancer` and `controller.service.externalTrafficPolicy` is set to `Local`, set this to [the managed health-check port the kube-proxy will expose](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport). If blank, a random port in the `NodePort` range will be assigned | `""` +`controller.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.service.enableHttp` | if port 80 should be opened for service | `true` +`controller.service.enableHttps` | if port 443 should be opened for service | `true` +`controller.service.targetPorts.http` | Sets the targetPort that maps to the Ingress' port 80 | `80` +`controller.service.targetPorts.https` | Sets the targetPort that maps to the Ingress' port 443 | `443` +`controller.service.ports.http` | Sets service http port | `80` +`controller.service.ports.https` | Sets service https port | `443` +`controller.service.type` | type of controller service to create | `LoadBalancer` +`controller.service.nodePorts.http` | If `controller.service.type` is either `NodePort` or `LoadBalancer` and this is non-empty, it sets the nodePort that maps to the Ingress' port 80 | `""` +`controller.service.nodePorts.https` | If `controller.service.type` is either `NodePort` or `LoadBalancer` and this is non-empty, it sets the nodePort that maps to the Ingress' port 443 | `""` +`controller.service.nodePorts.tcp` | Sets the nodePort for an entry referenced by its key from `tcp` | `{}` +`controller.service.nodePorts.udp` | Sets the nodePort for an entry referenced by its key from `udp` | `{}` +`controller.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 10 +`controller.livenessProbe.periodSeconds` | How often to perform the probe | 10 +`controller.livenessProbe.timeoutSeconds` | When the probe times out | 5 +`controller.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`controller.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`controller.livenessProbe.port` | The port number that the liveness probe will listen on. | 10254 +`controller.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 10 +`controller.readinessProbe.periodSeconds` | How often to perform the probe | 10 +`controller.readinessProbe.timeoutSeconds` | When the probe times out | 1 +`controller.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`controller.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`controller.readinessProbe.port` | The port number that the readiness probe will listen on. | 10254 +`controller.metrics.enabled` | if `true`, enable Prometheus metrics | `false` +`controller.metrics.service.annotations` | annotations for Prometheus metrics service | `{}` +`controller.metrics.service.clusterIP` | cluster IP address to assign to service (set to `"-"` to pass an empty value) | `nil` +`controller.metrics.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the metrics service | `false` +`controller.metrics.service.externalIPs` | Prometheus metrics service external IP addresses | `[]` +`controller.metrics.service.labels` | labels for metrics service | `{}` +`controller.metrics.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.metrics.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.metrics.service.servicePort` | Prometheus metrics service port | `9913` +`controller.metrics.service.type` | type of Prometheus metrics service to create | `ClusterIP` +`controller.metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` +`controller.metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` +`controller.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels. | `false` +`controller.metrics.serviceMonitor.namespace` | namespace where servicemonitor resource should be created | `the same namespace as nginx ingress` +`controller.metrics.serviceMonitor.namespaceSelector` | [namespaceSelector](https://github.com/coreos/prometheus-operator/blob/v0.34.0/Documentation/api.md#namespaceselector) to configure what namespaces to scrape | `will scrape the helm release namespace only` +`controller.metrics.serviceMonitor.scrapeInterval` | interval between Prometheus scraping | `30s` +`controller.metrics.prometheusRule.enabled` | Set this to `true` to create prometheusRules for Prometheus operator | `false` +`controller.metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` +`controller.metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `the same namespace as nginx ingress` +`controller.metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be prometheus in YAML format, check values for an example. | `[]` +`controller.admissionWebhooks.enabled` | Create Ingress admission webhooks. Validating webhook will check the ingress syntax. | `false` +`controller.admissionWebhooks.failurePolicy` | Failure policy for admission webhooks | `Fail` +`controller.admissionWebhooks.port` | Admission webhook port | `8080` +`controller.admissionWebhooks.service.annotations` | Annotations for admission webhook service | `{}` +`controller.admissionWebhooks.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the admission webhook service | `false` +`controller.admissionWebhooks.service.clusterIP` | cluster IP address to assign to admission webhook service (set to `"-"` to pass an empty value) | `nil` +`controller.admissionWebhooks.service.externalIPs` | Admission webhook service external IP addresses | `[]` +`controller.admissionWebhooks.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.admissionWebhooks.service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.admissionWebhooks.service.servicePort` | Admission webhook service port | `443` +`controller.admissionWebhooks.service.type` | Type of admission webhook service to create | `ClusterIP` +`controller.admissionWebhooks.patch.enabled` | If true, will use a pre and post install hooks to generate a CA and certificate to use for validating webhook endpoint, and patch the created webhooks with the CA. | `true` +`controller.admissionWebhooks.patch.image.repository` | Repository to use for the webhook integration jobs | `jettech/kube-webhook-certgen` +`controller.admissionWebhooks.patch.image.tag` | Tag to use for the webhook integration jobs | `v1.0.0` +`controller.admissionWebhooks.patch.image.pullPolicy` | Image pull policy for the webhook integration jobs | `IfNotPresent` +`controller.admissionWebhooks.patch.priorityClassName` | Priority class for the webhook integration jobs | `""` +`controller.admissionWebhooks.patch.podAnnotations` | Annotations for the webhook job pods | `{}` +`controller.admissionWebhooks.patch.nodeSelector` | Node selector for running admission hook patch jobs | `{}` +`controller.customTemplate.configMapName` | configMap containing a custom nginx template | `""` +`controller.customTemplate.configMapKey` | configMap key containing the nginx template | `""` +`controller.addHeaders` | configMap key:value pairs containing [custom headers](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers) added before sending response to the client | `{}` +`controller.proxySetHeaders` | configMap key:value pairs containing [custom headers](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#proxy-set-headers) added before sending request to the backends| `{}` +`controller.headers` | DEPRECATED, Use `controller.proxySetHeaders` instead. | `{}` +`controller.updateStrategy` | allows setting of RollingUpdate strategy | `{}` +`controller.configMapNamespace` | The nginx-configmap namespace name | `""` +`controller.tcp.configMapNamespace` | The tcp-services-configmap namespace name | `""` +`controller.udp.configMapNamespace` | The udp-services-configmap namespace name | `""` +`defaultBackend.enabled` | Use default backend component | `true` +`defaultBackend.name` | name of the default backend component | `default-backend` +`defaultBackend.image.repository` | default backend container image repository | `k8s.gcr.io/defaultbackend-amd64` +`defaultBackend.image.tag` | default backend container image tag | `1.5` +`defaultBackend.image.pullPolicy` | default backend container image pull policy | `IfNotPresent` +`defaultBackend.image.runAsUser` | User ID of the controller process. Value depends on the Linux distribution used inside of the container image. By default uses nobody user. | `65534` +`defaultBackend.useComponentLabel` | Whether to add component label so the HPA can work separately for controller and defaultBackend. *Note: don't change this if you have an already running deployment as it will need the recreation of the defaultBackend deployment* | `false` +`defaultBackend.extraArgs` | Additional default backend container arguments | `{}` +`defaultBackend.extraEnvs` | any additional environment variables to set in the defaultBackend pods | `[]` +`defaultBackend.port` | Http port number | `8080` +`defaultBackend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 +`defaultBackend.livenessProbe.periodSeconds` | How often to perform the probe | 10 +`defaultBackend.livenessProbe.timeoutSeconds` | When the probe times out | 5 +`defaultBackend.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`defaultBackend.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`defaultBackend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 0 +`defaultBackend.readinessProbe.periodSeconds` | How often to perform the probe | 5 +`defaultBackend.readinessProbe.timeoutSeconds` | When the probe times out | 5 +`defaultBackend.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`defaultBackend.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 +`defaultBackend.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`defaultBackend.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`defaultBackend.nodeSelector` | node labels for pod assignment | `{}` +`defaultBackend.podAnnotations` | annotations to be added to pods | `{}` +`defaultBackend.deploymentLabels` | labels to add to the deployment metadata | `{}` +`defaultBackend.podLabels` | labels to add to the pod container metadata | `{}` +`defaultBackend.replicaCount` | desired number of default backend pods | `1` +`defaultBackend.minAvailable` | minimum number of available default backend pods for PodDisruptionBudget | `1` +`defaultBackend.resources` | default backend pod resource requests & limits | `{}` +`defaultBackend.priorityClassName` | default backend priorityClassName | `nil` +`defaultBackend.podSecurityContext` | Security context policies to add to the default backend | `{}` +`defaultBackend.service.annotations` | annotations for default backend service | `{}` +`defaultBackend.service.clusterIP` | internal default backend cluster service IP (set to `"-"` to pass an empty value) | `nil` +`defaultBackend.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the default backend service | `false` +`defaultBackend.service.externalIPs` | default backend service external IP addresses | `[]` +`defaultBackend.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`defaultBackend.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`defaultBackend.service.type` | type of default backend service to create | `ClusterIP` +`defaultBackend.serviceAccount.create` | if `true`, create a backend service account. Only useful if you need a pod security policy to run the backend. | `true` +`defaultBackend.serviceAccount.name` | The name of the backend service account to use. If not set and `create` is `true`, a name is generated using the fullname template. Only useful if you need a pod security policy to run the backend. | `` +`imagePullSecrets` | name of Secret resource containing private registry credentials | `nil` +`rbac.create` | if `true`, create & use RBAC resources | `true` +`rbac.scope` | if `true`, do not create & use clusterrole and -binding. Set to `true` in combination with `controller.scope.enabled=true` to disable load-balancer status updates and scope the ingress entirely. | `false` +`podSecurityPolicy.enabled` | if `true`, create & use Pod Security Policy resources | `false` +`serviceAccount.create` | if `true`, create a service account for the controller | `true` +`serviceAccount.name` | The name of the controller service account to use. If not set and `create` is `true`, a name is generated using the fullname template. | `` +`revisionHistoryLimit` | The number of old history to retain to allow rollback. | `10` +`tcp` | TCP service key:value pairs. The value is evaluated as a template. | `{}` +`udp` | UDP service key:value pairs The value is evaluated as a template. | `{}` +`releaseLabelOverride` | If provided, the value will be used as the `release` label instead of .Release.Name | `""` + +These parameters can be passed via Helm's `--set` option +```console +$ helm install stable/nginx-ingress --name my-release \ + --set controller.metrics.enabled=true +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install stable/nginx-ingress --name my-release -f values.yaml +``` + +A useful trick to debug issues with ingress is to increase the logLevel +as described [here](https://github.com/kubernetes/ingress-nginx/blob/master/docs/troubleshooting.md#debug) + +```console +$ helm install stable/nginx-ingress --set controller.extraArgs.v=2 +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +## Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics. + +```console +$ helm install stable/nginx-ingress --name my-release \ + --set controller.metrics.enabled=true +``` + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. + +## nginx-ingress nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: +* in [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +* in [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230) to re-enable the http server + +## ExternalDNS Service configuration + +Add an [ExternalDNS](https://github.com/kubernetes-sigs/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +## AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/master/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +## AWS L4 NLB with SSL Redirection + +`ssl-redirect` and `force-ssl-redirect` flag are not working with AWS Network Load Balancer. You need to turn if off and add additional port with `server-snippet` in order to make it work. + +The port NLB `80` will be mapped to nginx container port `80` and NLB port `443` will be mapped to nginx container port `8000` (special). Then we use `$server_port` to manage redirection on port `80` +``` +controller: + config: + ssl-redirect: "false" # we use `special` port to control ssl redirection + server-snippet: | + listen 8000; + if ( $server_port = 80 ) { + return 308 https://$host$request_uri; + } + containerPort: + http: 80 + https: 443 + special: 8000 + service: + targetPorts: + http: http + https: special + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443" + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "your-arn" + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +## AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +## Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +## Helm error when upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +``` +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customconfig-values.yaml new file mode 100644 index 0000000..f12eac3 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + config: + use-proxy-protocol: "true" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 0000000..382bc50 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,15 @@ +controller: + kind: DaemonSet + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-headers-values.yaml new file mode 100644 index 0000000..a29690f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-headers-values.yaml @@ -0,0 +1,6 @@ +controller: + kind: DaemonSet + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-nodeport-values.yaml new file mode 100644 index 0000000..ebc8f10 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + service: + type: NodePort diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..3484704 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-values.yaml new file mode 100644 index 0000000..e6866d7 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-values.yaml new file mode 100644 index 0000000..f0a6060 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-values.yaml @@ -0,0 +1,6 @@ +controller: + kind: DaemonSet + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-default-values.yaml new file mode 100644 index 0000000..ddb2562 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-default-values.yaml @@ -0,0 +1,2 @@ +controller: + kind: DaemonSet diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-metrics-values.yaml new file mode 100644 index 0000000..5ce435d --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-metrics-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + metrics: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-psp-values.yaml new file mode 100644 index 0000000..b441c1a --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-psp-values.yaml @@ -0,0 +1,5 @@ +controller: + kind: DaemonSet + +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-and-psp-values.yaml new file mode 100644 index 0000000..2cf9d6f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-and-psp-values.yaml @@ -0,0 +1,7 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: true + +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-values.yaml new file mode 100644 index 0000000..2d2cb47 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-autoscaling-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-autoscaling-values.yaml new file mode 100644 index 0000000..e9701da --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-autoscaling-values.yaml @@ -0,0 +1,3 @@ +controller: + autoscaling: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customconfig-values.yaml new file mode 100644 index 0000000..401aea4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customconfig-values.yaml @@ -0,0 +1,3 @@ +controller: + config: + use-proxy-protocol: "true" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customnodeport-values.yaml new file mode 100644 index 0000000..6958eaa --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,14 @@ +controller: + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-default-values.yaml new file mode 100644 index 0000000..b15f0e4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-default-values.yaml @@ -0,0 +1 @@ +# Left blank to test default values diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-headers-values.yaml new file mode 100644 index 0000000..f3873af --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-headers-values.yaml @@ -0,0 +1,5 @@ +controller: + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-metrics-values.yaml new file mode 100644 index 0000000..9a93fa5 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-metrics-values.yaml @@ -0,0 +1,3 @@ +controller: + metrics: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-nodeport-values.yaml new file mode 100644 index 0000000..ffdc47b --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-nodeport-values.yaml @@ -0,0 +1,3 @@ +controller: + service: + type: NodePort diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-psp-values.yaml new file mode 100644 index 0000000..7aae860 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-psp-values.yaml @@ -0,0 +1,2 @@ +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..7b06c1e --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,13 @@ +controller: + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-values.yaml new file mode 100644 index 0000000..7c55d44 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-values.yaml @@ -0,0 +1,9 @@ +controller: + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-values.yaml new file mode 100644 index 0000000..c8bc204 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-values.yaml @@ -0,0 +1,3 @@ +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-and-psp-values.yaml new file mode 100644 index 0000000..0590d7c --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-and-psp-values.yaml @@ -0,0 +1,6 @@ +controller: + admissionWebhooks: + enabled: true + +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-values.yaml new file mode 100644 index 0000000..07e1a92 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-values.yaml @@ -0,0 +1,3 @@ +controller: + admissionWebhooks: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/NOTES.txt b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/NOTES.txt new file mode 100644 index 0000000..e18a901 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/NOTES.txt @@ -0,0 +1,71 @@ +The nginx-ingress controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ template "nginx-ingress.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ template "nginx-ingress.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ template "nginx-ingress.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "nginx-ingress.name" . }},component={{ .Values.controller.name }},release={{ template "nginx-ingress.releaseLabel" . }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + name: example + namespace: foo + spec: + rules: + - host: www.example.com + http: + paths: + - backend: + serviceName: exampleService + servicePort: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/_helpers.tpl b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/_helpers.tpl new file mode 100644 index 0000000..1881171 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/_helpers.tpl @@ -0,0 +1,134 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nginx-ingress.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nginx-ingress.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.controller.fullname" -}} +{{- printf "%s-%s" (include "nginx-ingress.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* +Allow for the ability to override the release name used as a label in many places. +*/}} +{{- define "nginx-ingress.releaseLabel" -}} +{{- .Values.releaseLabelOverride | default .Release.Name | trunc 63 -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} + +{{- define "nginx-ingress.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" .Release.Namespace (include "nginx-ingress.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "nginx-ingress.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "nginx-ingress.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "nginx-ingress.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "nginx-ingress.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "nginx-ingress.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "deployment.apiVersion" -}} +{{- if semverCompare ">=1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/templates/addheaders-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/addheaders-configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/addheaders-configmap.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/addheaders-configmap.yaml diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..a248326 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "nginx-ingress.fullname" . }}-admission +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..c99fdf8 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "nginx-ingress.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-createSecret.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..e0d2c04 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,55 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-create + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + {{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 + {{- end }} + template: + metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-create +{{- with .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + containers: + - name: create + image: {{ template "system_default_registry" . }}{{ .Values.controller.admissionWebhooks.patch.image.repository }}:{{ .Values.controller.admissionWebhooks.patch.image.tag }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ template "nginx-ingress.controller.fullname" . }}-admission,{{ template "nginx-ingress.controller.fullname" . }}-admission.{{ .Release.Namespace }}.svc + - --namespace={{ .Release.Namespace }} + - --secret-name={{ template "nginx-ingress.fullname". }}-admission + restartPolicy: OnFailure + serviceAccountName: {{ template "nginx-ingress.fullname" . }}-admission + {{- with .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 2000 +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..4f60fd9 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-patch + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + {{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 + {{- end }} + template: + metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-patch +{{- with .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + containers: + - name: patch + image: {{ template "system_default_registry" . }}{{ .Values.controller.admissionWebhooks.patch.image.repository }}:{{ .Values.controller.admissionWebhooks.patch.image.tag }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.pullPolicy }} + args: + - patch + - --webhook-name={{ template "nginx-ingress.fullname" . }}-admission + - --namespace={{ .Release.Namespace }} + - --patch-mutating=false + - --secret-name={{ template "nginx-ingress.fullname". }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + restartPolicy: OnFailure + serviceAccountName: {{ template "nginx-ingress.fullname" . }}-admission + {{- with .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 2000 +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..a23f927 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..665769f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..0e4873f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..c0822f9 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/validating-webhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..cd962e5 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,31 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app: {{ template "nginx-ingress.name" . }}-admission + chart: {{ template "nginx-ingress.chart" . }} + component: "admission-webhook" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - extensions + - networking.k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + clientConfig: + service: + namespace: {{ .Release.Namespace }} + name: {{ template "nginx-ingress.controller.fullname" . }}-admission + path: /extensions/v1beta1/ingresses +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrole.yaml new file mode 100644 index 0000000..14667eb --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrole.yaml @@ -0,0 +1,71 @@ +{{- if and (.Values.rbac.create) (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..39decda --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and (.Values.rbac.create) (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "nginx-ingress.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-configmap.yaml new file mode 100644 index 0000000..25625b4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-configmap.yaml @@ -0,0 +1,22 @@ +{{- if or .Values.controller.config (or (or .Values.controller.proxySetHeaders .Values.controller.headers) .Values.controller.addHeaders) }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +data: +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.controller.config }} +{{ toYaml .Values.controller.config | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-daemonset.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-daemonset.yaml new file mode 100644 index 0000000..da79809 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-daemonset.yaml @@ -0,0 +1,257 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") }} +{{- $useHostPort := .Values.controller.daemonset.useHostPort -}} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: DaemonSet +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + name: {{ template "nginx-ingress.controller.fullname" . }} + annotations: +{{ toYaml .Values.controller.deploymentAnnotations | indent 4}} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.useComponentLabel }} + app.kubernetes.io/component: controller + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + updateStrategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + component: "{{ .Values.controller.name }}" + app.kubernetes.io/component: controller + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8}} + {{- end }} + spec: +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: "{{ .Values.controller.priorityClassName }}" +{{- end }} + {{- if .Values.controller.podSecurityContext }} + securityContext: +{{ toYaml .Values.controller.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.controller.name }} + image: {{ template "system_default_registry" . }}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }} + imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" + {{- if .Values.controller.lifecycle }} + lifecycle: +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + {{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "nginx-ingress.defaultBackend.fullname" . }} + {{- else }} + {{- if (semverCompare "<0.21.0" .Values.controller.image.tag) }} + - --default-backend-service={{ required ".Values.controller.defaultBackendService is required if .Values.defaultBackend.enabled=false and .Values.controller.image.tag < 0.21.0" .Values.controller.defaultBackendService }} + {{- else if .Values.controller.defaultBackendService }} + - --default-backend-service={{ .Values.controller.defaultBackendService }} + {{- end }} + {{- end }} + {{- if and (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) .Values.controller.publishService.enabled }} + - --publish-service={{ template "nginx-ingress.controller.publishServicePath" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --election-id={{ .Values.controller.electionID }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --ingress-class={{ .Values.controller.ingressClass }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- else }} + - --nginx-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- end }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- if and (.Values.controller.reportNodeInternalIp) (.Values.controller.hostNetwork)}} + - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + {{- end }} + {{- if .Values.controller.maxmindLicenseKey }} + - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + {{- if (semverCompare ">=0.16.0" .Values.controller.image.tag) }} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ .Values.controller.image.runAsUser }} + allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.extraEnvs }} +{{ toYaml .Values.controller.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.livenessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ index $hostPorts $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + containerPort: {{ $key }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + containerPort: {{ $key }} + protocol: UDP + {{- if $useHostPort }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.readinessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} + volumeMounts: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: "/usr/local/certificates/" + readOnly: true +{{- end }} +{{- if .Values.controller.extraVolumeMounts }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12}} +{{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 12 }} +{{- if .Values.controller.extraContainers }} +{{ toYaml .Values.controller.extraContainers | indent 8}} +{{- end }} +{{- if .Values.controller.extraInitContainers }} + initContainers: +{{ toYaml .Values.controller.extraInitContainers | indent 8}} +{{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: 60 +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} + volumes: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ template "nginx-ingress.fullname". }}-admission +{{- end }} +{{- if .Values.controller.extraVolumes }} +{{ toYaml .Values.controller.extraVolumes | indent 8}} +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-deployment.yaml new file mode 100644 index 0000000..65e8f58 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-deployment.yaml @@ -0,0 +1,255 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") }} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + {{- if .Values.controller.deploymentLabels }} +{{ toYaml .Values.controller.deploymentLabels | indent 4 }} + {{- end }} + name: {{ template "nginx-ingress.controller.fullname" . }} + annotations: +{{ toYaml .Values.controller.deploymentAnnotations | indent 4}} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.useComponentLabel }} + app.kubernetes.io/component: controller + {{- end }} +{{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + strategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + component: "{{ .Values.controller.name }}" + app.kubernetes.io/component: controller + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + spec: +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: "{{ .Values.controller.priorityClassName }}" +{{- end }} + {{- if .Values.controller.podSecurityContext }} + securityContext: +{{ toYaml .Values.controller.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.controller.name }} + image: {{ template "system_default_registry" . }}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }} + imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" + {{- if .Values.controller.lifecycle }} + lifecycle: +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + {{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "nginx-ingress.defaultBackend.fullname" . }} + {{- else }} + {{- if (semverCompare "<0.21.0" .Values.controller.image.tag) }} + - --default-backend-service={{ required ".Values.controller.defaultBackendService is required if .Values.defaultBackend.enabled=false and .Values.controller.image.tag < 0.21.0" .Values.controller.defaultBackendService }} + {{- else if .Values.controller.defaultBackendService }} + - --default-backend-service={{ .Values.controller.defaultBackendService }} + {{- end }} + {{- end }} + {{- if and (semverCompare ">=0.9.0-beta.1" (trimPrefix "nginx-" .Values.controller.image.tag)) .Values.controller.publishService.enabled }} + - --publish-service={{ template "nginx-ingress.controller.publishServicePath" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" (trimPrefix "nginx-" .Values.controller.image.tag)) }} + - --election-id={{ .Values.controller.electionID }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" (trimPrefix "nginx-" .Values.controller.image.tag)) }} + - --ingress-class={{ .Values.controller.ingressClass }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" (trimPrefix "nginx-" .Values.controller.image.tag)) }} + - --configmap={{ default .Release.Namespace .Values.controller.configMapNamespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- else }} + - --nginx-configmap={{ default .Release.Namespace .Values.controller.configMapNamespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- end }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ default .Release.Namespace .Values.controller.tcp.configMapNamespace }}/{{ template "nginx-ingress.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ default .Release.Namespace .Values.controller.udp.configMapNamespace }}/{{ template "nginx-ingress.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- if and (.Values.controller.scope.enabled) (.Values.rbac.scope) }} + - --update-status=false + {{- end }} + {{- if and (.Values.controller.reportNodeInternalIp) (.Values.controller.hostNetwork) }} + - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + {{- end }} + {{- if .Values.controller.maxmindLicenseKey }} + - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + {{- if (semverCompare ">=0.16.0" (trimPrefix "nginx-" .Values.controller.image.tag)) }} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ .Values.controller.image.runAsUser }} + allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.extraEnvs }} +{{ toYaml .Values.controller.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.livenessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + containerPort: {{ $key }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + containerPort: {{ $key }} + protocol: UDP + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.readinessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} + volumeMounts: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: "/usr/local/certificates/" + readOnly: true +{{- end }} +{{- if .Values.controller.extraVolumeMounts }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12}} +{{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 12 }} +{{- if .Values.controller.extraContainers }} +{{ toYaml .Values.controller.extraContainers | indent 8}} +{{- end }} +{{- if .Values.controller.extraInitContainers }} + initContainers: +{{ toYaml .Values.controller.extraInitContainers | indent 8}} +{{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} + volumes: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ template "nginx-ingress.fullname". }}-admission +{{- end }} +{{- if .Values.controller.extraVolumes }} +{{ toYaml .Values.controller.extraVolumes | indent 8}} +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-hpa.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-hpa.yaml new file mode 100644 index 0000000..77d3533 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-hpa.yaml @@ -0,0 +1,34 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") }} +{{- if .Values.controller.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + scaleTargetRef: + apiVersion: {{ template "deployment.apiVersion" . }} + kind: Deployment + name: {{ template "nginx-ingress.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: +{{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/templates/controller-metrics-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-metrics-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-metrics-service.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-metrics-service.yaml diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..888515a --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (gt (.Values.controller.replicaCount | int) 1) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + app.kubernetes.io/component: controller + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-prometheusrules.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..4a43957 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-prometheusrules.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "nginx-ingress.controller.fullname" . }} + {{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} +{{ toYaml .Values.controller.metrics.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "nginx-ingress.name" $ }} + rules: {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-psp.yaml new file mode 100644 index 0000000..ccbf636 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-psp.yaml @@ -0,0 +1,80 @@ +{{- if .Values.podSecurityPolicy.enabled}} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + #- 'emptyDir' + - 'projected' + - 'secret' + #- 'downwardAPI' + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- if or .Values.controller.hostNetwork .Values.controller.daemonset.useHostPort }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.daemonset.useHostPort }} +{{- range $key, $value := .Values.controller.daemonset.hostPorts }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-role.yaml new file mode 100644 index 0000000..bb9ff14 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-role.yaml @@ -0,0 +1,91 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }}-{{ .Values.controller.ingressClass }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "nginx-ingress.fullname" . }}] +{{- end }} + +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..c1186c0 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-service.yaml new file mode 100644 index 0000000..15d51a0 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-service.yaml @@ -0,0 +1,94 @@ +{{- if .Values.controller.service.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.service.annotations }} + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: +{{- if .Values.controller.service.labels }} +{{ toYaml .Values.controller.service.labels | indent 4 }} +{{- end }} + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: +{{- if not .Values.controller.service.omitClusterIP }} + {{- with .Values.controller.service.clusterIP }} + clusterIP: {{ if eq "-" . }}""{{ else }}{{ . | quote }}{{ end }} + {{- end }} +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} +{{- if and (semverCompare ">=1.7-0" .Capabilities.KubeVersion.GitVersion) (.Values.controller.service.externalTrafficPolicy) }} + externalTrafficPolicy: "{{ .Values.controller.service.externalTrafficPolicy }}" +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: "{{ .Values.controller.service.sessionAffinity }}" +{{- end }} +{{- if and (semverCompare ">=1.7-0" .Capabilities.KubeVersion.GitVersion) (.Values.controller.service.healthCheckNodePort) }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + port: {{ $key }} + protocol: TCP + targetPort: "{{ $key }}-tcp" + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + port: {{ $key }} + protocol: UDP + targetPort: "{{ $key }}-udp" + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + type: "{{ .Values.controller.service.type }}" +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..7b688e6 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.serviceAccountName" . }} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-servicemonitor.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..f3129ea --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-servicemonitor.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "nginx-ingress.controller.fullname" . }} + {{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: +{{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | indent 4 -}} + {{ else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + {{- end }} + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.controller.name }}" + release: {{ template "nginx-ingress.releaseLabel" . }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/templates/controller-webhook-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-webhook-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-webhook-service.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-webhook-service.yaml diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..a4c8d23 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-deployment.yaml @@ -0,0 +1,110 @@ +{{- if .Values.defaultBackend.enabled }} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + {{- if .Values.defaultBackend.deploymentLabels }} +{{ toYaml .Values.defaultBackend.deploymentLabels | indent 4 }} + {{- end }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.defaultBackend.useComponentLabel }} + app.kubernetes.io/component: default-backend + {{- end }} + replicas: {{ .Values.defaultBackend.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: + {{- range $key, $value := .Values.defaultBackend.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + {{- if .Values.defaultBackend.podLabels }} +{{ toYaml .Values.defaultBackend.podLabels | indent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: "{{ .Values.defaultBackend.priorityClassName }}" +{{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: +{{ toYaml .Values.defaultBackend.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.defaultBackend.name }} + image: {{ template "system_default_registry" . }}{{ .Values.defaultBackend.image.repository }}:{{ .Values.defaultBackend.image.tag }} + imagePullPolicy: "{{ .Values.defaultBackend.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + securityContext: + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + {{- if .Values.defaultBackend.extraEnvs }} + env: +{{ toYaml .Values.defaultBackend.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + resources: +{{ toYaml .Values.defaultBackend.resources | indent 12 }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: +{{ toYaml .Values.defaultBackend.nodeSelector | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: +{{ toYaml .Values.defaultBackend.tolerations | indent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: +{{ toYaml .Values.defaultBackend.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..0713c01 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if gt (.Values.defaultBackend.replicaCount | int) 1 }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + app.kubernetes.io/component: default-backend + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-psp.yaml new file mode 100644 index 0000000..38191d4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-psp.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }}-backend + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-role.yaml new file mode 100644 index 0000000..11fbba9 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-role.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-backend +rules: + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "nginx-ingress.fullname" . }}-backend] +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..7d03ef4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-backend +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-service.yaml new file mode 100644 index 0000000..23dba19 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-service.yaml @@ -0,0 +1,45 @@ +{{- if .Values.defaultBackend.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: + {{- range $key, $value := .Values.defaultBackend.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.defaultBackend.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: +{{- if not .Values.defaultBackend.service.omitClusterIP }} + {{- with .Values.defaultBackend.service.clusterIP }} + clusterIP: {{ if eq "-" . }}""{{ else }}{{ . | quote }}{{ end }} + {{- end }} +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: +{{ toYaml .Values.defaultBackend.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.defaultBackend.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + type: "{{ .Values.defaultBackend.service.type }}" +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..94689a6 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/templates/proxyheaders-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/proxyheaders-configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/proxyheaders-configmap.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/proxyheaders-configmap.yaml diff --git a/charts/rke2-ingress-nginx/templates/tcp-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/tcp-configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/tcp-configmap.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/tcp-configmap.yaml diff --git a/charts/rke2-ingress-nginx/templates/udp-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/udp-configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/udp-configmap.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/udp-configmap.yaml diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/values.yaml new file mode 100644 index 0000000..da74bbe --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/values.yaml @@ -0,0 +1,578 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md +## +controller: + name: controller + image: + repository: rancher/nginx-ingress-controller + tag: "nginx-0.30.0-rancher1" + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: false + + # Configures the ports the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # Maxmind license key to download GeoLite2 Databases + # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: true + + # Optionally customize the pod dnsConfig. + dnsConfig: {} + + # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirstWithHostNet + + # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + ## Use host ports 80 and 443 + daemonset: + useHostPort: false + + hostPorts: + http: 80 + https: 443 + + ## Required only if defaultBackend.enabled = false + ## Must be / + ## + defaultBackendService: "" + + ## Election ID to use for status update + ## + electionID: ingress-controller-leader + + ## Name of the ingress class to route through this controller + ## + ingressClass: nginx + + # labels to add to the deployment metadata + deploymentLabels: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + ## Allows customization of the external service + ## the ingress will be bound to via DNS + publishService: + enabled: false + ## Allows overriding of the publish service to bind to + ## Must be / + ## + pathOverride: "" + + ## Limit the scope of the controller + ## + scope: + enabled: false + namespace: "" # defaults to .Release.Namespace + + ## Allows customization of the configmap / nginx-configmap namespace + ## + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the tcp-services-configmap namespace + ## + tcp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the udp-services-configmap namespace + ## + udp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Additional command line arguments to pass to nginx-ingress-controller + ## E.g. to specify the default SSL certificate you can use + ## extraArgs: + ## default-ssl-certificate: "/" + extraArgs: {} + + ## Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + ## DaemonSet or Deployment + ## + kind: Deployment + + ## Annotations to be added to the controller deployment + ## + deploymentAnnotations: {} + + # The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # minReadySeconds to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: "kubernetes.io/hostname" + + ## terminationGracePeriodSeconds + ## + terminationGracePeriodSeconds: 60 + + ## Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 100m + # memory: 64Mi + + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: false + + annotations: {} + labels: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + externalTrafficPolicy: "" + + # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: "" + + healthCheckNodePort: 0 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + # type: NodePort + # nodePorts: + # http: 32080 + # https: 32443 + # tcp: + # 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + extraContainers: [] + ## Additional containers to be added to the controller pod. + ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + extraVolumeMounts: [] + ## Additional volumeMounts to the controller main container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the controller pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraInitContainers: [] + ## Containers, which are run before the app containers are started. + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + admissionWebhooks: + enabled: false + failurePolicy: Fail + port: 8443 + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + patch: + enabled: true + image: + repository: jettech/kube-webhook-certgen + tag: v1.0.0 + pullPolicy: IfNotPresent + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9913 + type: ClusterIP + + serviceMonitor: + enabled: false + additionalLabels: {} + namespace: "" + namespaceSelector: {} + # Default: scrape .Release.Namespace only + # To scrape all, use the following: + # namespaceSelector: + # any: true + scrapeInterval: 30s + # honorLabels: true + + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: TooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 5XXs + # summary: More than 5% of the all requests did return 5XX, this require your attention + # - alert: TooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 4XXs + # summary: More than 5% of the all requests did return 4XX, this require your attention + + + lifecycle: {} + + priorityClassName: "" + +## Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + + ## If false, controller.defaultBackendService must be provided + ## + enabled: true + + name: default-backend + image: + repository: rancher/nginx-ingress-controller-defaultbackend + tag: "1.5-rancher1" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: false + + extraArgs: {} + + serviceAccount: + create: true + name: + ## Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + # labels to add to the deployment metadata + deploymentLabels: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + +# If provided, the value will be used as the `release` label instead of .Release.Name +releaseLabelOverride: "" + +## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 +rbac: + create: true + scope: false + +# If true, create & use Pod Security Policy resources +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: + +## Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# TCP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# UDP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp +## +udp: {} +# 53: "kube-system/kube-dns:53" + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/.helmignore b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rke2-ingress-nginx/Chart.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/Chart.yaml old mode 100755 new mode 100644 similarity index 92% rename from charts/rke2-ingress-nginx/Chart.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/Chart.yaml index 0689764..694932d --- a/charts/rke2-ingress-nginx/Chart.yaml +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/Chart.yaml @@ -1,7 +1,6 @@ apiVersion: v1 appVersion: 0.35.0 -description: Ingress controller for Kubernetes using NGINX as a reverse proxy and - load balancer +description: Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer home: https://github.com/kubernetes/ingress-nginx icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png keywords: diff --git a/charts/rke2-ingress-nginx/OWNERS b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/OWNERS old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/OWNERS rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/OWNERS diff --git a/charts/rke2-ingress-nginx/README.md b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/README.md old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/README.md rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/README.md diff --git a/charts/rke2-ingress-nginx/ci/daemonset-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-customconfig-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-customconfig-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-customconfig-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-customnodeport-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-customnodeport-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-customnodeport-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-headers-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-headers-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-headers-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-internal-lb-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-internal-lb-values.yaml similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-internal-lb-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-internal-lb-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-nodeport-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-nodeport-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-nodeport-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-udp-configMapNamespace-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-udp-configMapNamespace-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-udp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-tcp-udp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-udp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-tcp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deamonset-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-default-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deamonset-default-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-default-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deamonset-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-metrics-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deamonset-metrics-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-metrics-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deamonset-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-psp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deamonset-psp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-psp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-webhook-and-psp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-webhook-and-psp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deamonset-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-webhook-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deamonset-webhook-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-webhook-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-autoscaling-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-autoscaling-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-autoscaling-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-autoscaling-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-customconfig-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-customconfig-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-customconfig-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-customnodeport-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-customnodeport-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-customnodeport-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-default-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-default-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-default-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-headers-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-headers-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-headers-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-internal-lb-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-internal-lb-values.yaml similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-internal-lb-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-internal-lb-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-metrics-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-metrics-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-metrics-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-nodeport-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-nodeport-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-nodeport-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-psp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-psp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-psp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-udp-configMapNamespace-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-udp-configMapNamespace-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-udp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-tcp-udp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-udp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-tcp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-webhook-and-psp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-webhook-and-psp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-webhook-and-psp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-webhook-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-webhook-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-webhook-values.yaml diff --git a/charts/rke2-ingress-nginx/templates/NOTES.txt b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/NOTES.txt old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/NOTES.txt rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/NOTES.txt diff --git a/charts/rke2-ingress-nginx/templates/_helpers.tpl b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/_helpers.tpl old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/_helpers.tpl rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/_helpers.tpl diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/clusterrole.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/clusterrole.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/clusterrolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/clusterrolebinding.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/job-createSecret.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/job-createSecret.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/job-patchWebhook.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/job-patchWebhook.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/psp.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/psp.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/role.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/role.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/role.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/rolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/rolebinding.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/serviceaccount.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/serviceaccount.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/validating-webhook.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/validating-webhook.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/validating-webhook.yaml diff --git a/charts/rke2-ingress-nginx/templates/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/clusterrole.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/clusterrole.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/clusterrole.yaml diff --git a/charts/rke2-ingress-nginx/templates/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/clusterrolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/clusterrolebinding.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/clusterrolebinding.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-configmap-addheaders.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-addheaders.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-configmap-addheaders.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-addheaders.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-proxyheaders.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-configmap-proxyheaders.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-proxyheaders.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-configmap-tcp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-tcp.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-configmap-tcp.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-tcp.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-configmap-udp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-udp.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-configmap-udp.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-udp.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-configmap.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-daemonset.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-daemonset.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-daemonset.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-daemonset.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-deployment.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-deployment.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-deployment.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-hpa.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-hpa.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-hpa.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-hpa.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-poddisruptionbudget.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-poddisruptionbudget.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-poddisruptionbudget.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-prometheusrules.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-prometheusrules.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-prometheusrules.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-prometheusrules.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-psp.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-psp.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-psp.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-role.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-role.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-role.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-rolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-rolebinding.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-rolebinding.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-service-internal.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-internal.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-service-internal.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-internal.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-service-metrics.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-metrics.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-service-metrics.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-metrics.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-service-webhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-webhook.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-service-webhook.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-webhook.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-service.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-serviceaccount.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-serviceaccount.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-serviceaccount.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-servicemonitor.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-servicemonitor.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-servicemonitor.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-servicemonitor.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-deployment.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-deployment.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-deployment.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-poddisruptionbudget.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-poddisruptionbudget.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-poddisruptionbudget.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-psp.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-psp.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-psp.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-role.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-role.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-role.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-rolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-rolebinding.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-rolebinding.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-service.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-service.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-serviceaccount.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-serviceaccount.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-serviceaccount.yaml diff --git a/charts/rke2-ingress-nginx/values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/values.yaml diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/Chart.yaml new file mode 100644 index 0000000..c2fa2dd --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.10 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.10 diff --git a/charts/rke2-kube-proxy/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/NOTES.txt similarity index 100% rename from charts/rke2-kube-proxy/templates/NOTES.txt rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/NOTES.txt diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/rbac.yaml similarity index 100% rename from charts/rke2-kube-proxy/templates/rbac.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/rbac.yaml diff --git a/charts/rke2-kube-proxy/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/serviceaccount.yaml similarity index 100% rename from charts/rke2-kube-proxy/templates/serviceaccount.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/serviceaccount.yaml diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/values.yaml new file mode 100644 index 0000000..5674ac7 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.10 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/Chart.yaml new file mode 100644 index 0000000..6f63cb8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.12 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.12 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/values.yaml new file mode 100644 index 0000000..e5f9bbc --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.12 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/Chart.yaml new file mode 100644 index 0000000..f42e9e7 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.13 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.13 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/values.yaml new file mode 100644 index 0000000..07afd93 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.13 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/Chart.yaml new file mode 100644 index 0000000..3605f53 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.15 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.15 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/_helpers.tpl old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-kube-proxy/templates/_helpers.tpl rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/_helpers.tpl diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/config.yaml new file mode 100644 index 0000000..20a215e --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/daemonset.yaml similarity index 100% rename from charts/rke2-kube-proxy/templates/daemonset.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/daemonset.yaml diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/values.yaml new file mode 100644 index 0000000..59a6be0 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/values.yaml @@ -0,0 +1,221 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.15 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/Chart.yaml new file mode 100644 index 0000000..9281912 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.16 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.16 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/_helpers.tpl new file mode 100644 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/config.yaml similarity index 100% rename from charts/rke2-kube-proxy/templates/config.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/config.yaml diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/daemonset.yaml new file mode 100644 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/values.yaml new file mode 100644 index 0000000..32d2f92 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/values.yaml @@ -0,0 +1,142 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.16 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/Chart.yaml new file mode 100644 index 0000000..64b0584 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.4 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.4 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/values.yaml new file mode 100644 index 0000000..670bb47 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/kube-proxy + tag: v1.18.4 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/Chart.yaml new file mode 100644 index 0000000..625dfde --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.8 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.8 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/values.yaml new file mode 100644 index 0000000..544723e --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.8 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/Chart.yaml new file mode 100644 index 0000000..b46eee9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.9 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.9 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/values.yaml new file mode 100644 index 0000000..904c70a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.9 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/Chart.yaml new file mode 100644 index 0000000..46d5eae --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.19.5 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.19.5 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/_helpers.tpl new file mode 100644 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/config.yaml new file mode 100644 index 0000000..20a215e --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/daemonset.yaml new file mode 100644 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/values.yaml new file mode 100644 index 0000000..9fe3cb4 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/values.yaml @@ -0,0 +1,221 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.19.5 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/Chart.yaml new file mode 100644 index 0000000..5478cf9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.19.7 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.19.7 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/_helpers.tpl new file mode 100644 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/config.yaml new file mode 100644 index 0000000..536a12a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddress | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/daemonset.yaml new file mode 100644 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/values.yaml new file mode 100644 index 0000000..bc250f1 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/values.yaml @@ -0,0 +1,221 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.19.7 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/Chart.yaml similarity index 55% rename from charts/rke2-kube-proxy/Chart.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/Chart.yaml index 367c94e..b970eaa 100644 --- a/charts/rke2-kube-proxy/Chart.yaml +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/Chart.yaml @@ -1,12 +1,12 @@ apiVersion: v1 -name: rke2-kube-proxy -description: Install Kube Proxy. -version: v1.19.8 appVersion: v1.19.8 +description: Install Kube Proxy. keywords: - - kube-proxy -sources: - - https://github.com/rancher/rke2-charts +- kube-proxy maintainers: - - name: Rancher Labs - email: charts@rancher.com +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.19.8 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/_helpers.tpl new file mode 100644 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/config.yaml new file mode 100644 index 0000000..536a12a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddress | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/daemonset.yaml new file mode 100644 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/values.yaml similarity index 100% rename from charts/rke2-kube-proxy/values.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/values.yaml diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/Chart.yaml new file mode 100644 index 0000000..89613c9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.20.2 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.20.2 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/_helpers.tpl new file mode 100644 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/config.yaml new file mode 100644 index 0000000..536a12a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddress | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/daemonset.yaml new file mode 100644 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/values.yaml new file mode 100644 index 0000000..892511a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/values.yaml @@ -0,0 +1,142 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.20.2 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/.helmignore b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/.helmignore new file mode 100644 index 0000000..37ea1d7 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +OWNERS +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rke2-metrics-server/Chart.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/Chart.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/Chart.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/Chart.yaml diff --git a/charts/rke2-metrics-server/README.md b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/README.md old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/README.md rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/README.md diff --git a/charts/rke2-metrics-server/ci/ci-values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/ci/ci-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/ci/ci-values.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/ci/ci-values.yaml diff --git a/charts/rke2-metrics-server/templates/NOTES.txt b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/NOTES.txt old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/NOTES.txt rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/NOTES.txt diff --git a/charts/rke2-metrics-server/templates/_helpers.tpl b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/_helpers.tpl old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/_helpers.tpl rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/_helpers.tpl diff --git a/charts/rke2-metrics-server/templates/aggregated-metrics-reader-cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/aggregated-metrics-reader-cluster-role.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/aggregated-metrics-reader-cluster-role.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/aggregated-metrics-reader-cluster-role.yaml diff --git a/charts/rke2-metrics-server/templates/auth-delegator-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/auth-delegator-crb.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/auth-delegator-crb.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/auth-delegator-crb.yaml diff --git a/charts/rke2-metrics-server/templates/cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/cluster-role.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/cluster-role.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/cluster-role.yaml diff --git a/charts/rke2-metrics-server/templates/metric-server-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metric-server-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/metric-server-service.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metric-server-service.yaml diff --git a/charts/rke2-metrics-server/templates/metrics-api-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-api-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/metrics-api-service.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-api-service.yaml diff --git a/charts/rke2-metrics-server/templates/metrics-server-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-crb.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/metrics-server-crb.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-crb.yaml diff --git a/charts/rke2-metrics-server/templates/metrics-server-deployment.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-deployment.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/metrics-server-deployment.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-deployment.yaml diff --git a/charts/rke2-metrics-server/templates/metrics-server-serviceaccount.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-serviceaccount.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/metrics-server-serviceaccount.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-serviceaccount.yaml diff --git a/charts/rke2-metrics-server/templates/pdb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/pdb.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/pdb.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/pdb.yaml diff --git a/charts/rke2-metrics-server/templates/psp.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/psp.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/psp.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/psp.yaml diff --git a/charts/rke2-metrics-server/templates/role-binding.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/role-binding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/role-binding.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/role-binding.yaml diff --git a/charts/rke2-metrics-server/templates/tests/test-version.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/tests/test-version.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/tests/test-version.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/tests/test-version.yaml diff --git a/charts/rke2-metrics-server/values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/values.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/values.yaml diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/.helmignore b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/.helmignore new file mode 100644 index 0000000..37ea1d7 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +OWNERS +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/Chart.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/Chart.yaml new file mode 100644 index 0000000..0abfceb --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +appVersion: 0.3.6 +description: Metrics Server is a cluster-wide aggregator of resource usage data. +home: https://github.com/kubernetes-incubator/metrics-server +keywords: +- metrics-server +maintainers: +- email: o.with@sportradar.com + name: olemarkus +- email: k.aasan@sportradar.com + name: kennethaasan +name: rke2-metrics-server +sources: +- https://github.com/kubernetes-incubator/metrics-server +version: 2.11.100 diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/README.md b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/README.md new file mode 100644 index 0000000..678f084 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/README.md @@ -0,0 +1,39 @@ +# metrics-server + +[Metrics Server](https://github.com/kubernetes-incubator/metrics-server) is a cluster-wide aggregator of resource usage data. Resource metrics are used by components like `kubectl top` and the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale) to scale workloads. To autoscale based upon a custom metric, see the [Prometheus Adapter chart](https://github.com/helm/charts/blob/master/stable/prometheus-adapter). + +## Configuration + +Parameter | Description | Default +--- | --- | --- +`rbac.create` | Enable Role-based authentication | `true` +`rbac.pspEnabled` | Enable pod security policy support | `false` +`serviceAccount.create` | If `true`, create a new service account | `true` +`serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | `` +`apiService.create` | Create the v1beta1.metrics.k8s.io API service | `true` +`hostNetwork.enabled` | Enable hostNetwork mode | `false` +`image.repository` | Image repository | `k8s.gcr.io/metrics-server-amd64` +`image.tag` | Image tag | `v0.3.2` +`image.pullPolicy` | Image pull policy | `IfNotPresent` +`imagePullSecrets` | Image pull secrets | `[]` +`args` | Command line arguments | `[]` +`resources` | CPU/Memory resource requests/limits. | `{}` +`tolerations` | List of node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`nodeSelector` | Node labels for pod assignment | `{}` +`affinity` | Node affinity | `{}` +`replicas` | Number of replicas | `1` +`extraVolumeMounts` | Ability to provide volume mounts to the pod | `[]` +`extraVolumes` | Ability to provide volumes to the pod | `[]` +`livenessProbe` | Container liveness probe | See values.yaml +`podLabels` | Labels to be added to pods | `{}` +`podAnnotations` | Annotations to be added to pods | `{}` +`priorityClassName` | Pod priority class | `""` +`readinessProbe` | Container readiness probe | See values.yaml +`service.annotations` | Annotations to add to the service | `{}` +`service.labels` | Labels to be added to the metrics-server service | `{}` +`service.port` | Service port to expose | `443` +`service.type` | Type of service to create | `ClusterIP` +`podDisruptionBudget.enabled` | Create a PodDisruptionBudget | `false` +`podDisruptionBudget.minAvailable` | Minimum available instances; ignored if there is no PodDisruptionBudget | +`podDisruptionBudget.maxUnavailable` | Maximum unavailable instances; ignored if there is no PodDisruptionBudget | +`extraContainers` | Add additional containers | `[]` diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/ci/ci-values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/ci/ci-values.yaml new file mode 100644 index 0000000..a9d81b4 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/ci/ci-values.yaml @@ -0,0 +1,5 @@ +# CI is running on GKE, which already ships metrics-server. This cause +# conflicts on the apiService resource. + +apiService: + create: false diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/NOTES.txt b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/NOTES.txt new file mode 100644 index 0000000..1034c12 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/NOTES.txt @@ -0,0 +1,11 @@ +The metric server has been deployed. +{{ if .Values.apiService.create }} +In a few minutes you should be able to list metrics using the following +command: + + kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes" +{{ else }} +NOTE: You have disabled the API service creation for this release. The metrics +API will not work with this release unless you configure the metrics API +service outside of this Helm chart. +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/_helpers.tpl b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/_helpers.tpl new file mode 100644 index 0000000..b59ca03 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/_helpers.tpl @@ -0,0 +1,59 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "metrics-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "metrics-server.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "metrics-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a service name that defaults to app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "metrics-server.service.fullname" -}} +{{- .Values.service.nameOverride | default .Chart.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "metrics-server.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "metrics-server.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/aggregated-metrics-reader-cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/aggregated-metrics-reader-cluster-role.yaml new file mode 100644 index 0000000..e91a3d8 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/aggregated-metrics-reader-cluster-role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:{{ template "metrics-server.name" . }}-aggregated-reader + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods","nodes"] + verbs: ["get", "list", "watch"] +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/auth-delegator-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/auth-delegator-crb.yaml new file mode 100644 index 0000000..e82fca0 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/auth-delegator-crb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "metrics-server.fullname" . }}:system:auth-delegator + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/cluster-role.yaml new file mode 100644 index 0000000..8763acd --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/cluster-role.yaml @@ -0,0 +1,34 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:{{ template "metrics-server.fullname" . }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + - namespaces + verbs: + - get + - list + - watch + {{- if .Values.rbac.pspEnabled }} + - apiGroups: + - extensions + - policy + resources: + - podsecuritypolicies + resourceNames: + - privileged-{{ template "metrics-server.fullname" . }} + verbs: + - use + {{- end -}} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metric-server-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metric-server-service.yaml new file mode 100644 index 0000000..0d64cd1 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metric-server-service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- with .Values.service.labels -}} + {{ toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.service.annotations | trim | nindent 4 }} +spec: + ports: + - port: {{ .Values.service.port }} + protocol: TCP + targetPort: https + selector: + app: {{ template "metrics-server.name" . }} + release: {{ .Release.Name }} + type: {{ .Values.service.type }} + diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-api-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-api-service.yaml new file mode 100644 index 0000000..552ffea --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-api-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.apiService.create -}} +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.metrics.k8s.io + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + service: + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + group: metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-crb.yaml new file mode 100644 index 0000000..eb04c6f --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-crb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:{{ template "metrics-server.fullname" . }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:{{ template "metrics-server.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-deployment.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-deployment.yaml new file mode 100644 index 0000000..2e54f27 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-deployment.yaml @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "metrics-server.name" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.replicas }} + template: + metadata: + labels: + app: {{ template "metrics-server.name" . }} + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + {{- with .Values.podAnnotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "metrics-server.serviceAccountName" . }} +{{- if .Values.hostNetwork.enabled }} + hostNetwork: true +{{- end }} + containers: + {{- if .Values.extraContainers }} + {{- ( tpl (toYaml .Values.extraContainers) . ) | nindent 8 }} + {{- end }} + - name: metrics-server + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /metrics-server + - --cert-dir=/tmp + - --logtostderr + - --secure-port=8443 + {{- range .Values.args }} + - {{ . }} + {{- end }} + ports: + - containerPort: 8443 + name: https + livenessProbe: + {{- toYaml .Values.livenessProbe | trim | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | trim | nindent 12 }} + resources: + {{- toYaml .Values.resources | trim | nindent 12 }} + securityContext: + {{- toYaml .Values.securityContext | trim | nindent 12 }} + volumeMounts: + - name: tmp + mountPath: /tmp + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 10 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.nodeSelector | trim | nindent 8 }} + affinity: + {{- toYaml .Values.affinity | trim | nindent 8 }} + tolerations: + {{- toYaml .Values.tolerations | trim | nindent 8 }} + volumes: + - name: tmp + emptyDir: {} + {{- with .Values.extraVolumes }} + {{- toYaml . | nindent 6}} + {{- end }} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-serviceaccount.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-serviceaccount.yaml new file mode 100644 index 0000000..4d748ed --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/pdb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/pdb.yaml new file mode 100644 index 0000000..3831097 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.podDisruptionBudget.enabled -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "metrics-server.name" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/psp.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/psp.yaml new file mode 100644 index 0000000..b5cb7da --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/psp.yaml @@ -0,0 +1,26 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: privileged-{{ template "metrics-server.fullname" . }} +spec: + allowedCapabilities: + - '*' + fsGroup: + rule: RunAsAny + privileged: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - '*' + hostPID: true + hostIPC: true + hostNetwork: true + hostPorts: + - min: 1 + max: 65536 +{{- end }} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/role-binding.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/role-binding.yaml new file mode 100644 index 0000000..3169f24 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/role-binding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "metrics-server.fullname" . }}-auth-reader + namespace: kube-system + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/tests/test-version.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/tests/test-version.yaml new file mode 100644 index 0000000..3648e6d --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/tests/test-version.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "metrics-server.fullname" . }}-test + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['/bin/sh'] + args: + - -c + - 'wget -qO- https://{{ include "metrics-server.fullname" . }}:{{ .Values.service.port }}/version | grep -F {{ .Values.image.tag }}' + restartPolicy: Never + diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/values.yaml new file mode 100644 index 0000000..30ca72e --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/values.yaml @@ -0,0 +1,113 @@ +rbac: + # Specifies whether RBAC resources should be created + create: true + pspEnabled: false + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +apiService: + # Specifies if the v1beta1.metrics.k8s.io API service should be created. + # + # You typically want this enabled! If you disable API service creation you have to + # manage it outside of this chart for e.g horizontal pod autoscaling to + # work with this release. + create: true + +hostNetwork: + # Specifies if metrics-server should be started in hostNetwork mode. + # + # You would require this enabled if you use alternate overlay networking for pods and + # API server unable to communicate with metrics-server. As an example, this is required + # if you use Weave network on EKS + enabled: false + +image: + repository: rancher/hardened-k8s-metrics-server + tag: v0.3.6 + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - registrySecretName + +args: +# enable this if you have self-signed certificates, see: https://github.com/kubernetes-incubator/metrics-server +# - --kubelet-insecure-tls + - --kubelet-preferred-address-types=InternalIP + +resources: {} + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +replicas: 1 + +extraContainers: [] + +podLabels: {} + +podAnnotations: {} +# The following annotations guarantee scheduling for critical add-on pods. +# See more at: https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ +# scheduler.alpha.kubernetes.io/critical-pod: '' + +## Set a pod priorityClassName +priorityClassName: system-node-critical + +extraVolumeMounts: [] +# - name: secrets +# mountPath: /etc/kubernetes/secrets +# readOnly: true + +extraVolumes: [] +# - name: secrets +# secret: +# secretName: kube-apiserver + +livenessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + +readinessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["all"] + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + +service: + annotations: {} + labels: {} + # Add these labels to have metrics-server show up in `kubectl cluster-info` + # kubernetes.io/cluster-service: "true" + # kubernetes.io/name: "Metrics-server" + port: 443 + type: ClusterIP + +podDisruptionBudget: + # https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + enabled: false + minAvailable: + maxUnavailable: + +global: + systemDefaultRegistry: "" diff --git a/index.yaml b/index.yaml index 215b65c..dee5a53 100644 --- a/index.yaml +++ b/index.yaml @@ -3,7 +3,7 @@ entries: rke2-canal: - apiVersion: v1 appVersion: v3.13.3 - created: "2021-02-24T21:41:48.737080031Z" + created: "2021-02-25T17:59:12.931728-08:00" description: Install Canal Network Plugin. digest: 4b6ac74aec73a70d12186701660c1f221fdbcb582571029a6c8fbc2738065742 home: https://www.projectcalico.org/ @@ -20,7 +20,7 @@ entries: version: v3.13.300-build20210223 - apiVersion: v1 appVersion: v3.13.3 - created: "2021-02-19T16:11:27.472930693Z" + created: "2021-02-25T17:59:12.931223-08:00" description: Install Canal Network Plugin. digest: 2396b0aca28a6d4a373a251b02e4efa12bbfedf29e37e45904b860176d0c80f8 home: https://www.projectcalico.org/ @@ -38,8 +38,9 @@ entries: rke2-coredns: - apiVersion: v1 appVersion: 1.7.1 - created: "2021-01-08T18:12:00.296423364Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services + created: "2021-02-25T17:59:12.935081-08:00" + description: CoreDNS is a DNS server that chains plugins and provides Kubernetes + DNS Services digest: 335099356a98589e09f1bb940913b0ed6abb8d2c4db91720f87d1cf7697a5cf7 home: https://coredns.io icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png @@ -55,8 +56,9 @@ entries: version: 1.13.800 - apiVersion: v1 appVersion: 1.6.9 - created: "2021-01-22T21:35:45.403680219Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services + created: "2021-02-25T17:59:12.934088-08:00" + description: CoreDNS is a DNS server that chains plugins and provides Kubernetes + DNS Services digest: be60a62ec184cf6ca7b0ed917e6962e8a2578fa1eeef6a835e82d2b7709933d5 home: https://coredns.io icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png @@ -81,8 +83,9 @@ entries: version: 1.10.101 - apiVersion: v1 appVersion: 1.6.9 - created: "2021-02-24T21:41:48.738290233Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services + created: "2021-02-25T17:59:12.933187-08:00" + description: CoreDNS is a DNS server that chains plugins and provides Kubernetes + DNS Services digest: 869cb592cac545f579b6de6b35de82de4904566fd91826bc16546fddc48fe1c4 home: https://coredns.io icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png @@ -108,8 +111,9 @@ entries: rke2-ingress-nginx: - apiVersion: v1 appVersion: 0.35.0 - created: "2021-02-24T21:42:02.60663315Z" - description: Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + created: "2021-02-25T17:59:12.938912-08:00" + description: Ingress controller for Kubernetes using NGINX as a reverse proxy + and load balancer digest: 2480ed0be9032f8f839913e12f0528128a15483ced57c851baed605156532782 home: https://github.com/kubernetes/ingress-nginx icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png @@ -127,8 +131,9 @@ entries: version: 3.3.000 - apiVersion: v1 appVersion: 0.30.0 - created: "2021-02-19T16:11:27.47593126Z" - description: An nginx Ingress controller that uses ConfigMap to store the nginx configuration. + created: "2021-02-25T17:59:12.93704-08:00" + description: An nginx Ingress controller that uses ConfigMap to store the nginx + configuration. digest: 768ce303918a97a2d0f9a333f4eb0f2ebb3b7f54b849e83c6bdd52f8b513af9b home: https://github.com/kubernetes/ingress-nginx icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png @@ -149,7 +154,7 @@ entries: rke2-kube-proxy: - apiVersion: v1 appVersion: v1.20.2 - created: "2021-01-25T23:01:11.589999085Z" + created: "2021-02-25T17:59:12.952328-08:00" description: Install Kube Proxy. digest: 68f08c49c302bfe23e9c6f8074a21a6a3e0c90fdb16f5e6fb32a5a3ee3f7c717 keywords: @@ -165,7 +170,7 @@ entries: version: v1.20.2 - apiVersion: v1 appVersion: v1.19.8 - created: "2021-02-24T21:41:48.739048333Z" + created: "2021-02-25T17:59:12.951821-08:00" description: Install Kube Proxy. digest: f2bace51d33062e3ac713ebbedd48dd4df56c821dfa52da9fdf71891d601bcde keywords: @@ -181,7 +186,7 @@ entries: version: v1.19.8 - apiVersion: v1 appVersion: v1.19.7 - created: "2021-01-22T21:35:45.405178128Z" + created: "2021-02-25T17:59:12.951293-08:00" description: Install Kube Proxy. digest: def9baa9bc5c12267d3575a03a2e5f2eccc907a6058202ed09a6cd39967790ca keywords: @@ -197,7 +202,7 @@ entries: version: v1.19.7 - apiVersion: v1 appVersion: v1.19.5 - created: "2020-12-17T19:20:49.383692056Z" + created: "2021-02-25T17:59:12.950165-08:00" description: Install Kube Proxy. digest: f74f820857b79601f3b8e498e701297d71f3b37bbf94dc3ae96dfcca50fb80df keywords: @@ -213,7 +218,7 @@ entries: version: v1.19.5 - apiVersion: v1 appVersion: v1.18.16 - created: "2021-02-19T17:03:49.957724823Z" + created: "2021-02-25T17:59:12.94611-08:00" description: Install Kube Proxy. digest: a57acde11e30a9a15330ffec38686b605325b145f21935e79843b28652d46a21 keywords: @@ -229,7 +234,7 @@ entries: version: v1.18.16 - apiVersion: v1 appVersion: v1.18.15 - created: "2021-01-14T18:05:30.822746229Z" + created: "2021-02-25T17:59:12.94555-08:00" description: Install Kube Proxy. digest: 3a6429d05a3d22e3959ceac27db15f922f1033553e8e6b5da2eb7cd18ed9309f keywords: @@ -245,7 +250,7 @@ entries: version: v1.18.15 - apiVersion: v1 appVersion: v1.18.13 - created: "2020-12-10T22:07:42.184767459Z" + created: "2021-02-25T17:59:12.944801-08:00" description: Install Kube Proxy. digest: 15d192f5016b8573d2c6f17ab55fa6f14fa1352fcdef2c391a6a477b199867ec keywords: @@ -261,7 +266,7 @@ entries: version: v1.18.13 - apiVersion: v1 appVersion: v1.18.12 - created: "2020-12-07T21:17:34.244857883Z" + created: "2021-02-25T17:59:12.944083-08:00" description: Install Kube Proxy. digest: e1da2b245da23aaa526cb94c04ed48cd3e730b848c0d33e420dcfd5b15374f5e keywords: @@ -277,7 +282,7 @@ entries: version: v1.18.12 - apiVersion: v1 appVersion: v1.18.10 - created: "2020-10-15T22:21:23.252729387Z" + created: "2021-02-25T17:59:12.939693-08:00" description: Install Kube Proxy. digest: 1ae84231365f19d82a4ea7c6b069ce90308147ba77bef072290ef7464ff1694e keywords: @@ -293,7 +298,7 @@ entries: version: v1.18.10 - apiVersion: v1 appVersion: v1.18.9 - created: "2020-10-14T23:04:28.48143194Z" + created: "2021-02-25T17:59:12.948298-08:00" description: Install Kube Proxy. digest: e1e5b6f98c535fa5d90469bd3f731d331bdaa3f9154157d7625b367a7023f399 keywords: @@ -309,7 +314,7 @@ entries: version: v1.18.9 - apiVersion: v1 appVersion: v1.18.8 - created: "2020-09-29T00:14:59.633896455Z" + created: "2021-02-25T17:59:12.947562-08:00" description: Install Kube Proxy. digest: 7765237ddc39c416178242e7a6798d679a50f466ac18d3a412207606cd0d66ed keywords: @@ -325,7 +330,7 @@ entries: version: v1.18.8 - apiVersion: v1 appVersion: v1.18.4 - created: "2020-09-29T00:14:59.632610835Z" + created: "2021-02-25T17:59:12.946814-08:00" description: Install Kube Proxy. digest: b859363c5ecab8c46b53efa34d866b9c27840737ad1afec0eb9729b8968304fb keywords: @@ -342,7 +347,7 @@ entries: rke2-metrics-server: - apiVersion: v1 appVersion: 0.3.6 - created: "2021-02-19T16:11:27.477610954Z" + created: "2021-02-25T17:59:12.953693-08:00" description: Metrics Server is a cluster-wide aggregator of resource usage data. digest: 295435f65cc6c0c5ed8fd6b028cac5614b761789c5e09c0483170c3fd46f6e59 home: https://github.com/kubernetes-incubator/metrics-server @@ -361,7 +366,7 @@ entries: version: 2.11.100 - apiVersion: v1 appVersion: 0.3.6 - created: "2021-02-24T21:41:48.739850734Z" + created: "2021-02-25T17:59:12.952919-08:00" description: Metrics Server is a cluster-wide aggregator of resource usage data. digest: a7cbec2f4764c99db298fb4e1f5297246253a3228daf2747281c953059160fc9 home: https://github.com/kubernetes-incubator/metrics-server @@ -378,4 +383,4 @@ entries: urls: - assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022300.tgz version: 2.11.100-build2021022300 -generated: "2021-02-24T21:42:02.60300284Z" +generated: "2021-02-25T17:59:12.927381-08:00" From 5e6287c3a9f94623b3f8ee6f50c45cdbd3d8f3b6 Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Thu, 25 Feb 2021 18:05:35 -0800 Subject: [PATCH 05/10] Rename chart rke2-canal-v3.13.300-build20210223 This seems to be an issue with the regenerate-assets script, most likely introduced due to the weird naming scheme for this chart which was allowed in the old build scripts. The new build scripts expects the chart version to always end with the packageVersion, e.g. this should be `rke2-canal-v3.13.3-build2021022300`, but since this is already released I'm not going to tamper with it. Signed-off-by: Arvind Iyengar --- .../Chart.yaml | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/config.yaml | 0 .../templates/crd.yaml | 0 .../templates/daemonset.yaml | 0 .../templates/rbac.yaml | 0 .../templates/serviceaccount.yaml | 0 .../values.yaml | 0 9 files changed, 0 insertions(+), 0 deletions(-) rename charts/rke2-canal/{rke2-canal-v3.13.300/build20210223 => rke2-canal-v3.13.300-build20210223}/Chart.yaml (100%) rename charts/rke2-canal/{rke2-canal-v3.13.300/build20210223 => rke2-canal-v3.13.300-build20210223}/templates/NOTES.txt (100%) rename charts/rke2-canal/{rke2-canal-v3.13.300/build20210223 => rke2-canal-v3.13.300-build20210223}/templates/_helpers.tpl (100%) rename charts/rke2-canal/{rke2-canal-v3.13.300/build20210223 => rke2-canal-v3.13.300-build20210223}/templates/config.yaml (100%) rename charts/rke2-canal/{rke2-canal-v3.13.300/build20210223 => rke2-canal-v3.13.300-build20210223}/templates/crd.yaml (100%) rename charts/rke2-canal/{rke2-canal-v3.13.300/build20210223 => rke2-canal-v3.13.300-build20210223}/templates/daemonset.yaml (100%) rename charts/rke2-canal/{rke2-canal-v3.13.300/build20210223 => rke2-canal-v3.13.300-build20210223}/templates/rbac.yaml (100%) rename charts/rke2-canal/{rke2-canal-v3.13.300/build20210223 => rke2-canal-v3.13.300-build20210223}/templates/serviceaccount.yaml (100%) rename charts/rke2-canal/{rke2-canal-v3.13.300/build20210223 => rke2-canal-v3.13.300-build20210223}/values.yaml (100%) diff --git a/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/Chart.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/Chart.yaml similarity index 100% rename from charts/rke2-canal/rke2-canal-v3.13.300/build20210223/Chart.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/Chart.yaml diff --git a/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/NOTES.txt b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/NOTES.txt similarity index 100% rename from charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/NOTES.txt rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/NOTES.txt diff --git a/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/_helpers.tpl b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/_helpers.tpl similarity index 100% rename from charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/_helpers.tpl rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/_helpers.tpl diff --git a/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/config.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/config.yaml similarity index 100% rename from charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/config.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/config.yaml diff --git a/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/crd.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/crd.yaml similarity index 100% rename from charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/crd.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/crd.yaml diff --git a/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/daemonset.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/daemonset.yaml similarity index 100% rename from charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/daemonset.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/daemonset.yaml diff --git a/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/rbac.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/rbac.yaml similarity index 100% rename from charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/rbac.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/rbac.yaml diff --git a/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/serviceaccount.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/serviceaccount.yaml similarity index 100% rename from charts/rke2-canal/rke2-canal-v3.13.300/build20210223/templates/serviceaccount.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/serviceaccount.yaml diff --git a/charts/rke2-canal/rke2-canal-v3.13.300/build20210223/values.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/values.yaml similarity index 100% rename from charts/rke2-canal/rke2-canal-v3.13.300/build20210223/values.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/values.yaml From 4ecd0f7203c230bdd9c67908c1c6ef5a1fa75a82 Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Thu, 25 Feb 2021 18:32:13 -0800 Subject: [PATCH 06/10] Temporarily point to fork --- configuration.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configuration.yaml b/configuration.yaml index 56d6667..2e3833e 100644 --- a/configuration.yaml +++ b/configuration.yaml @@ -1,8 +1,8 @@ template: live sync: -- url: https://github.com/rancher/rke2-charts.git - branch: main-source +- url: https://github.com/aiyengar2/rke2-charts.git + branch: migrate-source dropReleaseCandidates: true helmRepo: cname: rke2-charts.rancher.io \ No newline at end of file From 6c7ccf31139ce4716dfc82dfd238dad895ed0348 Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Thu, 25 Feb 2021 18:50:18 -0800 Subject: [PATCH 07/10] test make sync --- .../rke2-canal-v3.13.300-build2021022301.tgz | Bin 0 -> 5915 bytes .../rke2-coredns-1.10.101-build2021022302.tgz | Bin 0 -> 10575 bytes .../rke2-ingress-nginx-3.3.001.tgz | Bin 0 -> 20585 bytes .../rke2-kube-proxy-v1.19.801.tgz | Bin 0 -> 4315 bytes ...etrics-server-2.11.100-build2021022301.tgz | Bin 0 -> 5378 bytes .../v3.13.300-build2021022301/Chart.yaml | 13 + .../templates/NOTES.txt | 3 + .../templates/_helpers.tpl | 7 + .../templates/config.yaml | 67 ++ .../templates/crd.yaml | 197 ++++++ .../templates/daemonset.yaml | 262 +++++++ .../templates/rbac.yaml | 163 +++++ .../templates/serviceaccount.yaml | 6 + .../v3.13.300-build2021022301/values.yaml | 74 ++ .../1.10.101-build2021022302/.helmignore | 22 + .../1.10.101-build2021022302/Chart.yaml | 23 + .../1.10.101-build2021022302/README.md | 138 ++++ .../templates/NOTES.txt | 30 + .../templates/_helpers.tpl | 158 +++++ .../templates/clusterrole-autoscaler.yaml | 35 + .../templates/clusterrole.yaml | 38 + .../clusterrolebinding-autoscaler.yaml | 28 + .../templates/clusterrolebinding.yaml | 24 + .../templates/configmap-autoscaler.yaml | 34 + .../templates/configmap.yaml | 30 + .../templates/deployment-autoscaler.yaml | 77 ++ .../templates/deployment.yaml | 127 ++++ .../templates/poddisruptionbudget.yaml | 28 + .../templates/podsecuritypolicy.yaml | 57 ++ .../templates/service-metrics.yaml | 33 + .../templates/service.yaml | 40 ++ .../templates/serviceaccount-autoscaler.yaml | 21 + .../templates/serviceaccount.yaml | 16 + .../templates/servicemonitor.yaml | 33 + .../1.10.101-build2021022302/values.yaml | 202 ++++++ .../rke2-ingress-nginx/3.3.001/.helmignore | 22 + .../rke2-ingress-nginx/3.3.001/Chart.yaml | 16 + .../rke2-ingress-nginx/3.3.001/OWNERS | 5 + .../rke2-ingress-nginx/3.3.001/README.md | 221 ++++++ .../ci/daemonset-customconfig-values.yaml | 9 + .../ci/daemonset-customnodeport-values.yaml | 18 + .../3.3.001/ci/daemonset-headers-values.yaml | 10 + .../ci/daemonset-internal-lb-values.yaml | 10 + .../3.3.001/ci/daemonset-nodeport-values.yaml | 6 + ...set-tcp-udp-configMapNamespace-values.yaml | 16 + .../3.3.001/ci/daemonset-tcp-udp-values.yaml | 12 + .../3.3.001/ci/daemonset-tcp-values.yaml | 10 + .../3.3.001/ci/deamonset-default-values.yaml | 6 + .../3.3.001/ci/deamonset-metrics-values.yaml | 8 + .../3.3.001/ci/deamonset-psp-values.yaml | 9 + .../ci/deamonset-webhook-and-psp-values.yaml | 9 + .../3.3.001/ci/deamonset-webhook-values.yaml | 6 + .../ci/deployment-autoscaling-values.yaml | 7 + .../ci/deployment-customconfig-values.yaml | 7 + .../ci/deployment-customnodeport-values.yaml | 16 + .../3.3.001/ci/deployment-default-values.yaml | 4 + .../3.3.001/ci/deployment-headers-values.yaml | 9 + .../ci/deployment-internal-lb-values.yaml | 9 + .../3.3.001/ci/deployment-metrics-values.yaml | 7 + .../ci/deployment-nodeport-values.yaml | 5 + .../3.3.001/ci/deployment-psp-values.yaml | 6 + ...ent-tcp-udp-configMapNamespace-values.yaml | 15 + .../3.3.001/ci/deployment-tcp-udp-values.yaml | 11 + .../3.3.001/ci/deployment-tcp-values.yaml | 7 + .../ci/deployment-webhook-and-psp-values.yaml | 8 + .../3.3.001/ci/deployment-webhook-values.yaml | 5 + .../3.3.001/templates/NOTES.txt | 71 ++ .../3.3.001/templates/_helpers.tpl | 132 ++++ .../job-patch/clusterrole.yaml | 27 + .../job-patch/clusterrolebinding.yaml | 20 + .../job-patch/job-createSecret.yaml | 60 ++ .../job-patch/job-patchWebhook.yaml | 62 ++ .../admission-webhooks/job-patch/psp.yaml | 36 + .../admission-webhooks/job-patch/role.yaml | 20 + .../job-patch/rolebinding.yaml | 20 + .../job-patch/serviceaccount.yaml | 12 + .../validating-webhook.yaml | 33 + .../3.3.001/templates/clusterrole.yaml | 76 ++ .../3.3.001/templates/clusterrolebinding.yaml | 16 + .../controller-configmap-addheaders.yaml | 10 + .../controller-configmap-proxyheaders.yaml | 15 + .../templates/controller-configmap-tcp.yaml | 13 + .../templates/controller-configmap-udp.yaml | 13 + .../templates/controller-configmap.yaml | 20 + .../templates/controller-daemonset.yaml | 252 +++++++ .../templates/controller-deployment.yaml | 256 +++++++ .../3.3.001/templates/controller-hpa.yaml | 36 + .../controller-poddisruptionbudget.yaml | 15 + .../templates/controller-prometheusrules.yaml | 21 + .../3.3.001/templates/controller-psp.yaml | 86 +++ .../3.3.001/templates/controller-role.yaml | 96 +++ .../templates/controller-rolebinding.yaml | 17 + .../controller-service-internal.yaml | 44 ++ .../templates/controller-service-metrics.yaml | 43 ++ .../templates/controller-service-webhook.yaml | 33 + .../3.3.001/templates/controller-service.yaml | 83 +++ .../templates/controller-serviceaccount.yaml | 9 + .../templates/controller-servicemonitor.yaml | 42 ++ .../templates/default-backend-deployment.yaml | 97 +++ .../default-backend-poddisruptionbudget.yaml | 15 + .../templates/default-backend-psp.yaml | 33 + .../templates/default-backend-role.yaml | 14 + .../default-backend-rolebinding.yaml | 17 + .../templates/default-backend-service.yaml | 34 + .../default-backend-serviceaccount.yaml | 9 + .../rke2-ingress-nginx/3.3.001/values.yaml | 666 ++++++++++++++++++ .../rke2-kube-proxy/v1.19.801/Chart.yaml | 12 + .../v1.19.801/templates/NOTES.txt | 2 + .../v1.19.801/templates/_helpers.tpl | 21 + .../v1.19.801/templates/config.yaml | 69 ++ .../v1.19.801/templates/daemonset.yaml | 78 ++ .../v1.19.801/templates/rbac.yaml | 12 + .../v1.19.801/templates/serviceaccount.yaml | 5 + .../rke2-kube-proxy/v1.19.801/values.yaml | 142 ++++ .../2.11.100-build2021022301/.helmignore | 22 + .../2.11.100-build2021022301/Chart.yaml | 15 + .../2.11.100-build2021022301/README.md | 39 + .../ci/ci-values.yaml | 5 + .../templates/NOTES.txt | 11 + .../templates/_helpers.tpl | 59 ++ ...ggregated-metrics-reader-cluster-role.yaml | 18 + .../templates/auth-delegator-crb.yaml | 19 + .../templates/cluster-role.yaml | 34 + .../templates/metric-server-service.yaml | 25 + .../templates/metrics-api-service.yaml | 20 + .../templates/metrics-server-crb.yaml | 19 + .../templates/metrics-server-deployment.yaml | 88 +++ .../metrics-server-serviceaccount.yaml | 12 + .../templates/pdb.yaml | 23 + .../templates/psp.yaml | 26 + .../templates/role-binding.yaml | 20 + .../templates/tests/test-version.yaml | 21 + .../2.11.100-build2021022301/values.yaml | 113 +++ index.yaml | 99 +++ 134 files changed, 6098 insertions(+) create mode 100755 assets/rke2-canal/rke2-canal-v3.13.300-build2021022301.tgz create mode 100755 assets/rke2-coredns/rke2-coredns-1.10.101-build2021022302.tgz create mode 100755 assets/rke2-ingress-nginx/rke2-ingress-nginx-3.3.001.tgz create mode 100755 assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.801.tgz create mode 100755 assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022301.tgz create mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/Chart.yaml create mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/NOTES.txt create mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/_helpers.tpl create mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/config.yaml create mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/crd.yaml create mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/daemonset.yaml create mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/rbac.yaml create mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/serviceaccount.yaml create mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/values.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/.helmignore create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/Chart.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/README.md create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/NOTES.txt create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/_helpers.tpl create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole-autoscaler.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding-autoscaler.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap-autoscaler.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment-autoscaler.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/poddisruptionbudget.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/podsecuritypolicy.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service-metrics.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount-autoscaler.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/servicemonitor.yaml create mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/.helmignore create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/Chart.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/OWNERS create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/README.md create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customconfig-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customnodeport-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-headers-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-internal-lb-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-nodeport-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-configMapNamespace-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-default-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-metrics-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-psp-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-and-psp-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-autoscaling-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customconfig-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customnodeport-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-default-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-headers-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-internal-lb-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-metrics-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-nodeport-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-psp-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-configMapNamespace-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-and-psp-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-values.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/NOTES.txt create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/_helpers.tpl create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrole.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-createSecret.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/psp.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/role.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/rolebinding.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/serviceaccount.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/validating-webhook.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrole.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrolebinding.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-addheaders.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-proxyheaders.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-tcp.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-udp.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-daemonset.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-deployment.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-hpa.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-poddisruptionbudget.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-prometheusrules.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-psp.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-role.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-rolebinding.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-internal.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-metrics.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-webhook.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-serviceaccount.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-servicemonitor.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-deployment.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-poddisruptionbudget.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-psp.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-role.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-rolebinding.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-service.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-serviceaccount.yaml create mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/values.yaml create mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/Chart.yaml create mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/NOTES.txt create mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/_helpers.tpl create mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/config.yaml create mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/daemonset.yaml create mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/rbac.yaml create mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/serviceaccount.yaml create mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/values.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/.helmignore create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/Chart.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/README.md create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/ci/ci-values.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/NOTES.txt create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/_helpers.tpl create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/aggregated-metrics-reader-cluster-role.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/auth-delegator-crb.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/cluster-role.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metric-server-service.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-api-service.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-crb.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-deployment.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-serviceaccount.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/pdb.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/psp.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/role-binding.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/tests/test-version.yaml create mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/values.yaml mode change 100644 => 100755 index.yaml diff --git a/assets/rke2-canal/rke2-canal-v3.13.300-build2021022301.tgz b/assets/rke2-canal/rke2-canal-v3.13.300-build2021022301.tgz new file mode 100755 index 0000000000000000000000000000000000000000..39ab3a7c92d01db64d6e45d8212ac31837770de8 GIT binary patch literal 5915 zcmV+$7v$(4iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBjbK5wQ@O;*<=u+ybd@&{YCC-eVtE+QlIZ=%*>nu4lTU%Qi zh-^t%g8&x*Woxs!-+l#v@1kTov7Ol~^FtDmKzF0jXf*l-kvDkY`GgbJ?k$Pbtu+bR zPfz=FI-Sn((UJYT)9Fac1r6{t{)&7hwZ{6NBN(y@PWyQw{ej7vzQ#vGmb`N=aEzy^LoDMmvGm zb{#B^#;P*rPREUlA6gdIkk>&-y<)e0Nw~|A+7P zU)TRjJf7z@8gF2-L>P=#N8r;yLL>wOEx?yZG=kt@n55$f=p4A<2zW=8RxKEb00tuk zx0ErMVdD+>OQDd%T#69%k}8mdFR;~!M9`yw>_T(D(>gk6by}VF{&BO>m@~pTvTg(5 z4cx>t^pZC~L$bgIxWCPJ0Ev-MR13N8Lb{NbL4o>X_Bs@sgJN=0d_!C=LJN{ipkfFTyb9)F(z6wk&|b92m|u%oyP}d!U!d z%^wAzRHaOb73B;2m5Tu1DhnXMIfe zcRXl%9*w+^sDCum-?qDoXB@Q`E~JC`f%rGb13SPX{8siwEk(=%;6lR;jV9&`3~XPb zLB?Fu(9$|{?Yq!i-7&(OmiO*yIGyk2wKp69bz6t!otskw!RK`F!1;9m3)AbAbj=B95l4+q41Xh3TcAOXM*!tiQ^M#!?bvP!)n`IxlypJM7#+t2 zpVLJw(aAB8#X{o3&Gn3!WhFR7B7olD^vb}B6^=w;eifjcQblGA;lsy~g+uwmPMtF| zC|`U?<;`dzhnU4mqjXF$6bHyPov-aO3ov40ZB~l?ZspCjcRXm`{H*pY7n>#gbIK5y zSS-wXRRaDn%Q5T+b0oDuO+r92EM32Wro(yW1u8m#!m7^hgvjiVBQ5Mk8%-DWj-0=HT7iE1XMI z%YjizE5f>P+_8AFUdb{x{EL;7Eel{T)lSNMBC!@Qi=DI)ri2kWh1z`Mb8Tde;ez<< zD*kNN3`O%61=GOx?{ha#7Q_+>(@TGe z{!P~TlYQr!1%*jTI!022sFxTk2vJMwtF+@N`*PtvY{Ez<_V?2focKO6lupozf?I?@ zSiPy-yaLs5OZ5^&EFqrJ_jWmCdz;1i`{^jdl1ssFOSG$5NDEI3Fq%C^Ndg2d$$UH|&CFl6ap5tLj6wDBLYO#ja9g#zZPq@Ll!vmfDkf z0`FP>hwt9MtFHgUgM-)g{}PYqd5t$PwjT2?IB!Hz66_1FAQ0ABNL6CZUy#U{=+?FKNK8q88Od2mA~E5esby__O*DE%IsNNi2*g zb@bUU#c{mUiF1>?1r$|!ww)N01LHK*aUI0;Fu4KGOq4N4Q*$>JSCXD=Q=3(AK;kbg zi(6p(sR(^an1$maCB}ZW9&QIF^?_R_M@TgF31e%BL=>B~6eP~hX@Y#}dUfGnp7s8H zNnEE*BZW=pn(`tMcuD)_UfRIt=|VBhI<3Rje*V!}@T%E`zh=JJF9ok`Q?DZn-tj8y zXumPBm@-@;GYzaWrt~@U3{F!Zxeu}2((o-E$8`GZ^@sk|us`XKr^CzB{&aY9(VtGs zJwhEfyD**hhJ)$FLwY__RAvGYZ7tv08&Y0fi@NDpLdNw%kk4`3^rqdGeMh%+U>5PQbA&JUN zRh*cCYZ+tdy-y-Cql{|m=(N2rNEA`NP)Ub}qn;5S+|ocVo4b+Z?KUtZ|N2|Q{YpIR zF6L3YSX*!*Vs5q;%C)fSA&-$HvE;;aZ1;%P-<8={{#9OTN%*>1QX)y9s-Qfw#g+Im zOHoQZyWL0@n__}^>4kf`(W6H_!N35#^&LAhC#Ht$lD(u>9@!UMKF|8k|i)81-$+a=+?JP62 z?P8Zg{oK5=&ZH#D{K9DQ-wdpZ_mveb3(RT%L_ARFgm%gTVaVSIW-lQ zWFKCFA5I1S0EYBZuVk4c2|YC|N!*w(z>_860eflPYecrVjVuyVu`8C2b$x9`k_J^7yV zf35!~AQ%d+@QJE{cKd%04&NPD0I|nQX#2cgYuUc#D*hQVDyq`pjv@U6uSZ<(fmTW zvg$a6K<5T5u25RofGg@Jp*r^@kWsb>ih{2-O8fd3=|AH>ZX~;b#fDRu(cwmgeDt+#sk}rmN46BPH8r-4o_YAyhsr4{$z5 zUw2_B#!1^k>_$WelaO8L+d7{TI(f5m#%#n}Vm>8BtT(vs)(FW%P!~z=-=QDt2T+_u z_D_6Y#9X_C;i7E6feXrMNdJith+A9L0)Z1Zk}ficqlJx53kd--G&Au3K_g>DMWh7; z0thtcLi(72Pdha8s>t%}2g0}cHa@6{R41VV)Di+(GwzP7CwwO2LfYPBq((w6zv|{koYQkQN-Z^12-O`n9{G zp(cwwc4wZX466%&ZMF+^w$!ruw?Z@It8OU-{KguEAm$m%iu}k5?pmczDMNb0!L)Zd zJhS2RN@Uq4B-qkAXUnw=Tt<~{R`AznKx|EY*hes1@Ue{4`CPmn-e1HVs z1&Mye^%c%H8$2l4r}|j6vYA^^8T%Nu<-1X^21#C*WZvHVtelBqKGhGrE|1~iL!VP* z!Im-GZzH0YUAuj>3?XLJl@XYkyO>-jc`>jelA5i4>nvSNuKyHMlP2p_hBK15vPF#6 zcFgpOEge-1F*4eBE=(&~KO^Jwet)!OE!aYuqWrG}i$k<|1FBmA7n-+j0@_AC+9J|z zpVRh?@Odk#<7Nx(=nz+gwkv$aLwr*oQjZ84rAfpxQZ^qn7D;gdSfi{ zqE5GYerXOHI^)mP4he?@!5!scyyiZfj0T(9J*krP0!!G_D)?dr=DkY3)Od%Yf7_0g zY}*tgBNOeUUrR0D?!jA6nLq2B*zeT*nY?gge*3ovh-N*zY*uAS8a8BZgRkErOn$8u za20b^J?Uk;M!_P=a_4AKc=9fh6bng0l^35lp)n?d&0|z4)IxVNxJ_v)oKkBhuf)%$ ztz@6dnA6hc_yls76&T+uba~4SiL0?0t`!#fdNLZMr|B8-Z;X!267%Y{Q5(s8Er8gHi;~i3vcEyrPMvT@OP8G>v4ZNx;&kl77wK^FCX4cT2|6*OM!Ru=(m%> zWO{aaHO-jyP)e#r(c9@U2FPP=4r6G%xSeq-_*)Ig(^~8;cHqhRpm&*?f`j45>4%fv zul?caLpirm*1bc@9#Q38Pwe^ic+$Um)Cwq+il>xq5cUM6+MC3l_0I>tPY0vP$%pg) z_^N+))gONv3@81o&nM>(rG8fh_?F?+VFQW#3T1U<4!xBr3BLU{LhF6lof4 zv-9eXIB7n-z0%~{Z!;+a-g2`E^p=FX5_a>_k_BY0^AoaDh8F0Otg2wSNIV-Ycfd}r z-k2L%ae4Ldhvy}-==n5|f3zwfDl2j@`g}CHygc7jsBemA#a|9u`^P_L7qOJvJ@zJq z%?V40IUS6R6xjsl>G|cykAq?Dd9e#kV|rBS_{PHYO>&$}pZ3pAuFog^;pymdFq}*- zKV6O|C%wtwa`-6u-cj?uU5V|!5Mmq2gVE>X@%3nQc{O?D63p(F*u5ofxrbo~N#~az z$NkU!tHIb@OJ)xi-03dsS^aFUkol$a%P*A= zMpbsfo=P{4jk|{VP;UvNvvw$gm|>M9C!>tcy!!8ED3_eAHzcTvLdLv(=j@4AmTFYR zlQi&Z8sv!5%T=~vS`lfn6>ep!;#m>t$(Vb!O&*8IuK$e^}ngl(%f&+)~($e!qIo)tXq zKA4xAZS@0{T?y7n=sZ2PcTn6jzkKnWfw+N#$b~9{_pFYtSHo$g`;+MA4evtAd2{@J zmSbiba&GbJF|&$#JUZz;GkzP)uir_}Plm((`RR0U_9!ZzGoF!`@ZLl6XnJun{_j5p z(RnfN1GbV!X!(0|{B~`adrHXp zm6z-Gebl>e&)Era%Vy@?x4qm8v}WJ(cF)jGkUJ;dWx$+KhL^Hu5AZd!kZRTqa_VP2 zlmBjwj6Fc+6U2p}X>V8)8R~x1r#Amn&WQhn1;F>!|8@?KtM$L{k2|mVpD*#;lmD4XLZk5p2Ha`>$|JAO>(q|%hAg2g>e|J1jAA;rzMEQdMPCRr@IOu z%j~WzfGiyTrP*7%rHeq4`|dbmU{kmjs38)8+H(nWdoD*cwH)n<#RC%2k5a^u>cU@} zNy5*D)5EiHvL6;)qo}1EMEh!ma#rZLuxZ{8##637GH()&)&S47X+V{4!}UpH>-Vxo6?sXfnBe51WZlh;<7=(v`rLmRemH!?FMHhpj?0!*?v3rV@gAX(&$ zi3&`813fFNV1tMXlnAkMK~13u7L3%I)l3PWXoMu#{lkF6_y3Rrsan7}E#mDAb z=`w&EPIfy&spS?J#WE=OyHrS+L#-3Fjjsv|T5(&Cy0+Wu(#^NcicdSFnxg+&t+P|K z(kYHrnx|!VAh({KO0ct?$1uNtwyP}PHO1TbQW>dVG@VQ8=utW@$&yGjQ6HzTzi|d8 zOH}qm6jEgi$870<;B0Lp7?LK6K1~X@(cslV>)6=}K5@68%@8DpVue8x3N9p(3l26I zUkVS`>5NBCs`o*4P^u#f%v?n4#DF2RpuyC{*ecZO@pPwOu(+ z`&9V$|E;j}m3%Fpvou5YwDw=>OwMe-$L}9cU^_f`b9o|@B;7UKBz~Z_CMLfjtH9gb zAm@a^xKu7tV~QJ4lrO%|BIkrDc zVQyr3R8em|NM&qo0PKDHbK5x5;Qo62EBd2nH*wCQB)^i;SGiip&hEN4aeQ%RxAwBB z7$PAFV-ny1plnU-``f?5i!V`>PHUclZ9#>F(_xcK-mKwPG>j%7jGx z51p0AsuTB}Tqq^~z+4bUdob>LC{6P}yTM`bqZeTja*|4Y>zHwTa&`ek00Q+xU_Qni z3)Rg7xWIf&LM*%yOK=ZHQl_HUZrha@ z5ay9kjX3?FO%gQ3qHS^ce9U5&@y~x_oczkDL~)SPp?8HR*NjJ^=e5B0^;%&5f0gz1 z-w7gAB0@3O&2WN9+=CI0WAAn91ZLoE!G{qMM%@4F=P9Xf0b-I6seb{4Y1V^or<3Tv5}at1y1Osl5{>3B85Z|x>j`tPeVC-U2mW!K z35ogXx$k)?X9<=goM|u#ql#_=5mUvws0RZS3#@OUC?e|6Me!Td$FbV1A6cCFb!y3VUuO z1JjQ2CxrvNe|2;W`RZw+rQT1Wsd}EX=mLisCvsw#tyZi^U+XC^X8fMa2)+(CwhTW! zZwrX9i`I@0BE=yY5G>$&q~@9{O^-_uBbLRH+UpTcV>ZDNAmJ8a%g#P94pa$^7_MFj zKxrCy^${!+y)Af`nk5e@Croh+!WaoBQZgk_lzdWD*qMWrMIwNo+#{4pCX}kifecE2 z1rFk|0(hp#w;T+-sYgBV#Y9M)v>Y`dCz6CH_C2qVn7=ZrnS8CZ-;##QambPcQ>A@w z3V6eWZbGmIIo9Z}H=Ga*^h(%W5Y*`mE=iEf3A4#+S;X^Ux& z3C}iBYM(+&?_cw?2Wrijt$DDgei{Ycv@qKMbI!QX>p;y3R`R|*!YGy_z%)u3p%NH{ z-iywQ&W;v`gjqT1qbz!IY!Z> zHgNYvw>Ge9MVdy|-8IPfuDxXNONRLbidlf8^HU8cLK2mF7cGp0LmVYoD&D%a9mFg| zF%a59QN#npQv@VE)Ia9Xt5zxyDwHt5_TB)+u^e%h4M%g}0Z19cUZ=*U1WQgrQDu}8 zzBNYKt@zfQI3-dG={-eiC(!?CaHSpt#;*~NrkV%P#xiUx0U3`2MSrPNh{6$`>Y~YU zO=8B<$_%QrtMVL0eRGtl9eB)=G{zDGzCr32ToXA06vtXL*gc^(pW6*kzqWMCOa1f` z`f92mL{y2H7!M?{Oai8oa2!u|ATm2%3nD*I$7r#;w7nQyGag5Q2F$DwYwiXh!Vy+9 z$ykYn9G@B8e0X|2BH;+|FBysj_z_jzd?3`B?Ss83K0EQ*xS*W#!yML@6GS*aiC$-Rw(8QxLU9^^fwZHj*XLf6#geF{ zE=hvbo);O-y?;ROpZuQ1)6@EDibF*ZO*08X@+_2!awH(d z+$kra)>`MCwHyyH$27$4Tqi0uYoV01$K9aYZEq>Pm=ZZ@jl0?e@wW6lT2a%}NV+yX zi!x;qKC0czh44wn2_0U9BOGNhp~KT5W%=#bH#p3+cdghW3fcvZaVQ!8fO#TH&svZm z8IE4xq#O(3>|M?Jo@2GShX(^3${w6CyLzx!9t%Ex^6sNy@{G1ns!d_d zT(cuHj$3KeZ*LjCO$Z&fP%C9oLbuBnJgXUo}ecfW%5g zG1JoE!rkq3pSL>)?aqs~RdbbAi>c%Ui`IbgRzXYKQy~AMAZmiQ6noVGR&_NK5k!5% zvxp^#(Bgi`67wKrRAMSc4{qJ^F11ld_vsm2ygLFxMWFEv5vArCWnul+Zl`dl1#i8sD6zng1PF2 zG4PjGFsf=+G3qy=WK?wuea>k$Qe+S*?`?PTa02gCI!7D|L#%^{ zMeL%FV|JZ8JUxrORXqjx>c}`}T>3dhrFfh|0<`;xOE`OX^7`}nyZ0Yn{_s@o^H9bx z#1dMqJirEDzCiFE#~2A5oaJ}#?x4lsXTfNSUCEOkoJvU01SGmrD(wJYLqcdKad8Bc!NKXd)qcL&NMj0`TvFYA}n=K#aqn z5+N~4!+^6y+j!NDZZM^h8oTtfwEZO(5=X{7)NxA|$LyNWVbAlr0lYyO4M%{KPhQ6ZJo=#JzlZri?piwY0+jlF$L`$LyvZQDFe+Q&58Y+w*n< z_`f=UCboErA1^GXFhX)6>^Z&5( ze0OvHKgRXtOZyp&Nz&87p8<)noTT_=qSz9Sa1Wlf^#*M}^IqSih#I@UoTi|Y0Rqpq zYe9qugqFmI5go1Ksg9ioCf8USt4<=RmhZ-xa}r^=1<5lS!r`9&Ns^0fFd#SJw+diN zW~ko=zhj$sk&mGS*93maP)r5{M}X2)V;*?_#0I)P&okqkD~QQ;a3j>3-%$CH$9Pmwf3@RX&$H2(IsUXCP=4Z#1nJ0=(FF1 z*EElbNr;%qczf8 zeG}NnC>*)nQVs-)DoBp#5Lb?8%ta14hklGw?T^sfiDZDrj6@(pmTLFn_0=`v5yhee zy@IsDS+WbsRdcPi8rD|^(FsNrH)B0Q0uoH6wzh4+Sb>>fX>>cvr0z=OG!E7&^>l>9 z-|@upQU9}DA$FS6NXiJT?oCP^y-a zf~=#fBc^t}4t3gCwsm#Dkc=@kq))S2gV9iWFM-eq-&Ap*;(7kZ8^*4bR4OkUou^=O zy+$~^0u!Wm2&pC$Gf6p=BNG=+n3}1o{-i~i4cOvHY0TC%>*c+FIyui%2Hd?5$LB6- zVMhnu%MYg*U{|w2ylGy^Krf`HPX72l1SVNw8=mT2?@}G6*F`>a?@GA|+S9D_5h`JK zWyQJ#g$kB6WbiK#*t`gn7SYhOd3N=>Ebh4VsDU(No~ygd0N2aA5qL$EF2Qdzxjy)I zc4u;a6%I7-!Un{f1d&-zHehLUTk;eRB@NAz0Y;(El-&@f@F$&=(jwE zBy4ZbX`m)zdxp?jX%3fT?rbLhlM{QIY6yK4UYW#foh_9~C-0gMjAq$e!l+d`#3~4s z24|Vf(}o0~Fl1bZ!z>cY0)vpnF%FeTLVY&IO{b$Cd#A=Ai&vc&WB{{ft&!_auW7*- zlQ#6n@lGycdrBnk*cf6KrL8}TyMI&PF(Vi0i&pszr1Xp~fz*fN94Q$-7^&Y<4a>vD z8%1oRh;0tTqem>t+S2`M!Tjbe>yt@MwD$JWWB}M_GgH+aFUg#zr8`I&DF}$>I2HVSeT| zi=&Zvllc)erhPQ{MBDzudRCmW{(Sf7SMdMOi2v9i9cM8vrQ7^z+(5TBOO@nG4UVSb=Q(A{V#M0jZKT9Jdank_! zMq;Yp{!#5vnu?|&qdY&|jk5y|4}s-92j7DCIAPW(&l4#HC@un{q07o5LS*`V=mELG zQP10gW%A9px`+4UXMxEK0*svUKb2?xw-&eG+_m`pU%ed6qWWKl`}O?K!{?j)&&RoH z61H~0Klb6iInf__qSuiA_KtIR*p?-_7Vx^_Oy!M*R-V`^^j|J}R{xQ*NaO_pbk3pr z)5v^%U!BSNhd!0-s)Js=dD#`Myw(v)U3Hcg6vyi{jtUW5&qenjbwo_b8rMswNSX)t z*#OO1kza5BFP8s(LL>cZ=+b83BKiOPdENfo-QURn$GLt(`|p)mSQ|5NV+Q_4W+034 zdpuCf#vKK7*02XKt>rAaVnx>f8HZ9Iiw0=5*w^wqp;R4iR-%68V7a&aU%?DmB>xZg zJ2m;=+1uUJe|(hdYw74F}pAyDqz3yYnJ@a3knx?YU8^Sy%9?YMY1R!Y@mq*={k!M7CYeAO5!wjjtRwx=QT~g z*z}dDfh)_i!jVclxP?K?kUUhJ*m1z>h|B_3Yu5}c<#>##ydZQK<8wx+{0ky+#<8E5 zZU*?SPVDtwi_ZUbd<?%vaA9sBLsMS`)y=QSP^Vskb+ghA3uBByBXvBUEFx`_@t`Wl;~FJh^ALO2OPG zAHVolc;ff`V+-vliWn8|Xgu+Ez;`A1{ht5&h6pMAyH8JMaoC9s{d+1hw8R+)`CENc zg`3L-nJg!#8#SEP(~auls9~_YNkeVuFJGE^G|IXuL!eoqa0Q-A3pf8_#`I2T>X-}m z7fT^fS|LUA%C{TlqEYU?A|~a^(?$DAJ?iKx_zm*U)Io>p%AWbs zTwv_U1~fICpeUv8Ozl}MB^s9HguZ!RDw{s9Z&6;o45F00IhUbDW$nV^?UVt9EJ+ZJ z3IVGaxUhJBxpApj=*u~(ZMj1(h+i%xV;mTX_z=sNg`(cRpBJwTBW6R%gp4rfFLTFv z@p$}l_dnN({vBTn?f*3d0Os5O-A=dDsr&yAHuXOr=lXhP{l-fF{Y3%3vRz&(OEljh z1@2QWqQc$s=boqh!;pVB!YoLsztH@i#WA77pY+R)`aUn;amRgvZhoS98S)-K;ylB|HS*fLmZ4*7}@; z&junMY)Q}V_+W#2HUlFnoSkHgjojBTeVAdsj5W?UJe@_scJZiWVsFnd(U(|P_G^sO zo}iKqQ~D;O_6Nt2 zgNN0Xo1Z*+fulzFszLr|wH3!GQj6W@yEUxmxjvGGjuBn+OZS{A{#38=8O&Jzl~2f* zJuD+B)4yU_z6FpPj+VYJUq+<|FFG$emD}zU*nj)*;k@)f*C<8t369a^0*8!7q6dfN zu975}Wio$wP6dCwcQuprK#X~nm=$ByM<6knu%_kh~JEDL^h+^xfl?*szGVlOU9JA|l2jz9%s0{8X zicB^CJ>XRkkeesy|w^4Wfu7X&k3FtJGM58U#M0<#}2IoNZ<;HbROOxd_5keb*n zI8uEySnFxgQB9$+u6SNr@FaJvJaKpQ7j;eu&u^rh{jAIW=XZ>EApbLB*Yq0k=;-`Z z-5W6>&#=5^{HoxEx_NqjT)lCAdQ#lr89fp|32gPXd@fvlS>rO)GmugJWnY#_JsZVb z3iBmsp2fHaPu?qtM|AQ;D}!$c&2Aj*59Xsd+6Rn;P%JT3^YzEH5y$pj=%X|};tAux zksPVfM~K43YYl7mSXr9JIKfn+xTqwUH+`E4shb-HY8OtflWxIZ81It^P!zeZn`;_} z3Q&-H+Y>^|&b9>I^x*K|VDG>K7zk5Gej&LS|gi#_{CGDp1WpS%nEcx!2U0@r9 z>|0aFR`>ce_YGN+GK#6}ft&lUqTuOAgw4&oxx3IlE}c&7VO9zLA(y6o24oFd$($ znXmCtBXx6t+y{r4q39Kg)#}MPRK3?YYE!q$aB~vpER5;2xm{b9$h^Ezu6=aurfKR6 zi615@o~eS(KmYA_Bv-5fv*7&i?suoY|99Bgy#Mtm*IK{-P*)&at>sNwm~T_@*p!8N zybtYsqu0IU|B7bdBKhAvsMr5|zQ1?4k^hfzHS0XJFFh}C{XS64&w1BsBfo!#hU0&# zX#Fm(`SRbzfWE5y-`(3kn3Dg`H}d~cuBGFD_A#MDUDch5AY@6qel)x_t(NvAE=G@5 zEr6PxaPNKEeg>=fLO{WxDs(sfWMmUaP7_+_+UxW^Kfa4fRGRFE9*V27NBX0{R z9_ly_JWV+skQ*H73{Uv4?Ev1Zf`1@RSSH}~bvuX8>-)d6yV3t2<=O(PZ+YHF`|H!wk(8;lXO zK5Rz&2oWm4eGmiD2vW|*B*H?sy>Op!SF+>Hf1kYnZ_m5DybKu?EXLjsTEhjc*2J-` zkeR2+mkVZ=qKhD;vPklB8kd)sp0~9Hr&MwlWuX!;o~I?1)|;Wvm=uzuR8cTZ<&elx z*4O5X+X(jGZ7Av>0;8(;-*dgh0O+;b6;)+B|7e%VE0-e-AOD8qq!`aC_1jLcf-xv~ z041ESNgO*?Rye7793l#RY_Y=;5DG~aOOnReR#3=H!5Jmgt+pRQ(kClleG; zK91RS&-0+=76Tx~Do2T$3#-2C(Gf(Pr7c38rJ?yVU>p>8+80-old}s;4KeBVPK``h zWMH^~h$yH)0&3QRBCI^~`FgO>H8{;PCe)EN0fD7de7~&gyO)X;%Jc~8bkaPOT&6UD zQwlO4WCp&7PD)mc(p(kFnZR7vp&|oOa5?G;S zer-4@d4%oOw)35jp|BmWIA+(P_m!lh9^4c#w!9KKCzX&GVJMN5e|C^o_HIS6n5F!& za_`gAt;*lqAh4FkTF{7G09~VrW;PpR&ec+H908(?D;}0g(R$whfoIP?ko4KJ9=st! zg5nD|X0ZE4;-zMq8qpjJKT*;=Aq+D2s2$-LOS9)LlS#{hyetjgI5oE0OkQ)Gu(6U0 zaXiIjXVMBlLa>k^>2zV4FBLOivF@PX)v*@8ChWM(Ezi4!bN8Vqn19{E2^JwI`UUU> zZ69*oy1H}A_4aOit$SYgZ~3rmHMqClEnLa$1vb5S#l0) zI?O}oz)>L4a0coP6i75&9rYaU<+vLh20zXpepc^-f};A{%sDvK*lV=yJA&)DZsBxr z#^gE2LY+tt?X4&%SF)YMhg&$yl0N3@EOixeS47^z?XJ?>P+;rE1u;p86hW9~4V2s} zF=pTROwbKUvILILe_9233-_|y=_E_7p;;6pIBD3>O%#}xt5dN1;w@Q@gc=1(eZgYJ zER-p*4RN%FSy<~FH*hX`t;4T&-NI#_8TxQ4GzGI` zf|u2iF0eEflVotIOf79f*c_5-Kv3)Q=Q-fK{Ax1=+6q}=4(<`Mxzr;a<#%li(0VzH z#-1tjOt3mB+EiE8I1!;zX_ZAiDHC@KU+$Jdel7*M_J3KO16$rz8UFgWGjUvVqe;GY z!4qKppK&P7yYM9v)G;2E=D_!t+{e{@P^6kSgS&;(b3jqVvC!)X2|k-yoLo{7P z3B-tHaik70LueE%qG07SdD`80ZwicWq`-Ne1DSE4j3+3Ds~6fo8#uTt&%snN|5+!R zPMTl?IJX@@_b8nWDT^MyjMj7>VBRP}t0PrhO%*?^j`-N<&%-IWz!=6R<7AS=OHklG zMzDZ{TR3uGEwKJ>p{Yn&Wal7R!cvEIXm;hA{PQh+7GbBAt^;=mcV!AnpEsBr zgRo8tK7LxZucaU_<-N$`<@Nyv^JWSBKSr_199SNObx=?)2tIcey-TfqgE&VM6Cg5W z#3eHy6aYf=_>Nx+6%?E%ASht9d*F%I4f@}(m$Q#&tjQKiqQm{g6f_4zW<@}jMm>jn*$tit-SzB)dBKpT2#DII zY9W=VSb~C;BOvp>ZC&wr<p@ep-t%Cdnz~RoeK77{gMxW#>O$RgX$n@>i<-Vs-4Ifn6-QHuFFKg#Q*gPvbO>iL z1*KrloH*J_MCd5gvLT!~6ikQIX2;Q1n}x462h$<7xpA}=h*<9&6d#|Qb_37y{FZPu z-9hz-zc3E?1|?~XwSQQ$6pl}UV~P@+FO*ogtH~no0%N#zz7CYcmxiAe#|mWmY?BV;oO* z0KvbG-oDYcwh79K+%KqDVmMnI`g+)$cfy76Y^q`e*CPd}mKmlUi%T=9HGtCmHgcqt zzV|AO)6yT?p2_+`V~nIIClu-cWX@4-l|_o`givgwe<^2@g)G)v zLZ|)e>f?Q+Gft_eFeutgSf=n%-3e8Harl){wKPoTGsMRu#v3^@C@Gh9*V9c@zFdY-l^m!{FCRZ5zzaGE#fpPGU ze$`HwTsT?xuCG`~ne?oGK2Il&8H!w_O_;2@Muc*j1Gr$`KS!8aZ?akv&V16{%Ypot zemT|Fzf^RIEF2j^yfPs>kf19JLZPqO%CwS+q{K<8w`k6+g8xYBB_U9PJxLOq?{{QH z7@Ct8-69o;V$j5FNVQ$$1_CPtE&013Lh{0`mM< zmr@!dK6EAHpXy-}#<8d2Z|}h6l&itUCL)#)iX;x;bYMY=L`e<}YR9{rvheF2?RyL; z7A}!O0IJp9*Z~_!dCy|JW3uau8?Ok}Z*dJ6_iE-)C9}4caz=!G8BE(n(q?blowoo60RY=Sw>1C& literal 0 HcmV?d00001 diff --git a/assets/rke2-ingress-nginx/rke2-ingress-nginx-3.3.001.tgz b/assets/rke2-ingress-nginx/rke2-ingress-nginx-3.3.001.tgz new file mode 100755 index 0000000000000000000000000000000000000000..ff3dd0e27987c6430a07dc8dc82474aed9d7f9ec GIT binary patch literal 20585 zcmV*RKwiHeiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ{dK)+LAUc2ZDX>)b*vhDz7u_80FLPFrokTmfq9fVK{EkmP z8|(&2jJg{=0Gblxc)xv(eZ75>^WoO_8!4KWJ&pOs5xX0OLZPZqC=?3x7L6?&%qe2b z3g$StADk~B<@O5t-e0y;hQs0T zzkuPE+OI7U-(nxR7}|u+2jW*K zehnPr`y^lkzKHzkz!^BlgG&MBok!-xX3P2DcsLC2t?WZP4CcLCw7MhIWs{x-GzL8j zC>#WGF|D5b?fa2EIlN2lYXy!;;zPe+~n@1pGODRveTJ;U>e zLLunAy*+S^W-WzsF!jiE;6ug{9VD884HQ3RAlts%+uQ3UUrl-dU?0wr_yYinLc%a7 zbTt7K2F?Oe1twJz20EM0Hhmmj!zju@W+wt(Z~lgZ3E`tI~Q_u2H> z(a|A%b~tjrJAL-u^muf9?6}B1IzDoyr!G1@JcZAur{mM9GoHfXXn5j|5k$|X5@r~A z-WBn%vzma5*(Kpu6fqQV`En23-Q8I(0;HmhStH>GTXP8}S>-f{7^2oZ z!Y&%bC?}vbgZMBUD6D6AJ^>#-O04$4Sr8Bo{S-Te$v5*7|rMn{u>R}Y{Oot|)WU%+3>!$hx zUANGcUnJ8(&szaQ4Wwnj1#ZBUWY|5B{3x%yfWchU#FB^3y-3jeM@$+24fDm%gz-zn z0SY=8TQC$^ZDf0aQDYAUTp(jK}G)1)(E-{RIVVYfsLtGSkWHp#ViX{BO-V#HLFOVf5 zS{kB6!q}UPLmn}32N{@pFt`O%_7;*r^ z&|CG?G)MU*anVJ<5e=Ys5l+AidQ9v5YxJ)OQ{+nXT6FXd^94A+yZ}=uno7tl_<%#t z(}VMrMb1K4-XuUwv=5>XOjoi3bp8DU37+A=K@uDcT-jTJ4{%BFgaaSngB9X7IER7I zw=N1Pav(=8Afj5mdP{3M#v;T9Bh!XRPV%;*c*rcn^!K+eZnNotjxlSnD;(o~KKN|uOHEbR?VKMlcjM_OAr5NvI# zzKR$Z`sqL9d>BFp{cZ3c`tx_<+28b+O86mP{GwVx6VNvU`V#w?EAlubj7P61b@Fwk zYKc)0pyeV!kDe4q`xF}c}uTYSYr7(KWDy6bqq~ePf85+Ao47n}D~! zOAVep(F)x--+uh%7ySgN>Fq~K{7L)>0EOA#(drt_($Dpjn67W6eWEK_!Y&tzUw7a+ zL_P^_5ErWRxiocs1z&||0^*WnN|0l5K|A$b9v8fkqYuv6hyDQNhL3_$LZX+{C7)=K5S(!*cZuw+_42P`I4Fbz{6Po9(9Y zD)!o-^-{*luvNdDuZ-$`t-l2<6mz3=HF7FvGC+~H5(|5|4o!R;*hK+1MyqKHxWtDz zc*^zxUqB8JbQWqFmAcekb0WAS^wuNLWdkvZi+?QBlBF=UQkbUZN%hT*{Nz0OLeH9l zLY6QzZf?_rq=s!R41ahMyyKS+?U&fNk9ZvqB6W;3izm*?b?a5Hnp zA6CkblpgI1$_#Ne<4&@SXJPHe)WgaV^>HAl(Vr-E&=sOsdWD3cqdvKVSo$V80W-Wr z!WxRWb}Y%|h@hNMH4X^^!l~u@of5;2isiL_GOCcE*aT zP>*`w&v-BaWzCD9i}GgVdFq3QDrfss}Ua$`Tq))jt=g2oxuv zKhPli&CAMa=WWT}C`F-%9e7TnK#-=VRxfAi#ggpRbJ@u>C3HMXvN}>cueJt&8%f9B z`PCZ`aqJm44GzFGVk@<}k$`FIXav2F$)fKx!@f%-@tt}RQX}Vq$csyW62?cN3y>_*+43`PdA(W7H4pTF_TI)!QS{16 zPk!d9vO1*#WTm?g@h)qbvn=Pn5>(V%P8U}Ote(*s7jr_gcY+vz9NaCiv&ehQjIUX$ z_SnT_K&@eQ7(jzPbg!lb3x_Pd&0(HjTq&bdcS4v7Kj?r<-4of_)Z-hekFH@bN2#ic zY(LA*(srYvMinv1JGz%#%Qx6|RWci8h{&pRi&o~?nlwg%%q=$Hi@%+0gG z-V#$DDNd1|EB!*4hW8k0?g32APw|31)i=IogziAo8b( z?#I@AdQ&oGwg~(DN_>x5@PGc-|K@R1?tyMQRXHtDQi(u8L8uy>QR0IYiKvm+|MS28 zHv=(z=|^izR=o)=ZN~IW5$`@b6hV%xSOx{dScYQ;dz!Pw#LpMVQV~^F%yk-z5;8~^ zFjy;6!Au8N#Tw$@Y#sR@T}w_GFH`bwMq9lxDCviR{+U`JszwgS!)L?HGqC_0t9T8) zxHUV3YDRiCd^QxHJR3e!Tc=sj2C&vpG0gb40B5wb+N{$?YHCUo3$3)lV2tQ&?JZQ%*+?Zd!@oX}Ow+F6j&vi1>(LL2-FDUQAKGPF*`1SwI_YZ=w3 z>3fM$R=ORyO?BI-a6gC2ZB)8)v+tb5k@nhP%x#mJ*9}`*A#v?ud`il$@sLjf&xN5C z%u~CmRnLVbrM;!L*)36~pTDvM%Q=u56HP@wE1Pg4%pAtERVv!Z)sMdl=2cithW6MV zntIUe;3{%kpsHH>8_R+oBOaM^!nliQ?q$4<{rxvjuYP&{?(*!_i<_&n^B4OC;HhYH zvkeq1Gx(_98!+h&*wQ^*090s8VGGy1P-|op0cC6FZ$)zw>rc{V9Je8%5-;-6E3vLl z2E*XukZ>iP1JJYBEe=@HgfD6+-~&dNgC(Q`4^J~x9;m7VRn>^hujv-cgeHwriKzGx z2Z>dMr%twd5nx`XSCVH3+RB$Zx=~>Ecm*CX z4AWXie8D)R9BG^kO2Wpn{FPRO8Xqmm(}=C6+d%-T@Z51ssgsO zOU`KGA0hTqF0*H`O_Eu8m9MjkODjDC<`yvK+lm!T!x~0?qR~+%mN89?4Rmgh(54sU z&Z00K#}vq%Rsp#aD=u+v5fxM7KN081S;&dfx>Hn@p)qq5#8u_lmF37jw1@Gt+M^|s z53kfrEev0ZiHy!v@uM=_T+4%QQuRh~!9U3~2BH5j)ny=QW-(pr22?K!4sP9*LWBUw z9(WlKl~Z2(PbMWkU@^*grd^zBQJ^FD-2)TCpLUrFDN4r>#YYyrRppAmC1Zw^_=qo1 zB<&mKP$)R1FSwvz@sYx)FFVukRj_z6ip~B5#vx}Gx)*>GAA%rC)`PG`A3};bO8p31 zmBa788&wISX{*`*G+zYq%E?eyxABSuINs)}va93iG~Fn_z6(l zQW6y?gI412Ula2s^gMNHLyY>2cpkZnowTx#g?8gtf`eBX?WADJQw2-tO~BzW9$K&< z0imX=sXA%}X-x#2Wj1vtC=~oJ%w&MW_yO`z{(u@1Y*e{r}i8T0AP{tLou^Ofj(xUmb`M6l0cQZ zS?%!%?lo`~Nz}mx#3CP64?_|(`rp%ma}wRhor9b+lY}F9}U%ENd#R1eKgb^m2ok8l=QlUq8m=a z*si%pPQs#QK^{MlzB94fjuGhfs#aGrvjXwFDRgeto_}v|uSd}mi{;WFUxWMtYuqLgXdpXbhf14yTYI)=R<_WN?D7AV@!^iBA|k z8!$W%=7@Nr4Vl$IwV{E&R9MBEu6wZ|4336J=6t;9 zqh{wH)RReeTR6Q)+b|o1r5i9!f;c(E$UZhXAx6r`Y{=gbkSTFj!X&3Ag~Q45@u6Cy zrq;CTVc9_zy$CsTqyee-t>vas%J7z+ptVlDNMBBtkS2I1dm5$}yJUvXOf1}W9CoHo z4(W(A6(E*|3=$Hy@%zioGUL!iGK3=bfTfMOUI@S)(TFm2?0CYMhMt(>tM8# zYD`KIIbIWw?kLE*M?o^DFM1muVE%k3lkIwkA}u1h1{cD<&~` zhDO;aJRMSJV(?5v-B^X-?PWsnn!eA6BbDyyZXRruCsC!h@DaxwfxL%M2NsmL>8p(^ z9nyCF!`V52Oz7a2NvzU|c;Xy9bP641ai~NYtst4S7bhn@QD2OAZthoS$&JaQENvR0 z1N|NNzoa|xinxjXQszp+vMD34PN}O{02v<6G$>0y>!p;9Hik;x;5t1nEt} z9RMkXE5WbQl)H*N??xV#WkyxW(~%S}MPDhJW5(%9z?COEpvMaN$JVwxgoS6x<}KZj zWf~gv@@Pr!OZMi}x!y~8`e_fmKEH~kNoY{Do(LhPVhmbjNt2Tdo@N-#uu3soTFiRf z35D5gqFwe(;7nhOy7|(Q$McuY^!HpLY2lcTrv^323Z%Pr%s33&_9}Nq3j@{Ug`z^*(&C0G=65&V;3|w&41} z{#B(8O+eTt*iFpg;N!>MPl%g12@_Qb-dw)~)0N2{BVIFPyew=IxA-(596XgF8+9$k z;e@tOa{K$DRp~V)l!KpNzrK2R`OEVc@2-Bie*OK25A7idI06&oF;oZho2Np_Iot!5 z6>o0=(Nb|k@Z(1?M?47+w+Q$rBS8rH;`{ywn}x`+Whm&|;qNgzANv9t4jeCX5$I<| zFWrq<@t~NnhE#h++ zV~%{u0?LhV$7=x50sZ4+e_wi{oCN{>Uopd6#nBE1-)M9u-()#et|X;1LOa1zBDus+ z4uRb-fyoL$Z`Ol)WA~F$&*+kLJaQ|2a>0QQjqe!D&|TpiX&R(hKYFTD9c=~ zR!ih>G(NS5_Rt)A)TsFsYr7KPIoPSqN6}&gox!p zXf|9_Fi6jAvn&YX6L5ETXPfA1ha@|ZlHjRr8NqQZ{Xl@2CL0jhqGjeXjowWhpIQ2C z#!ZoJcLE0574mw4nOIY*ldVkpE0g>gcm`g-yaCuaz4ZD!R$s~Z0Kn5*tcnGyUK;x>Do(t@9Ree|`L+V%gt1@b~fna#t8Tm@0T{vQpGPRF_R|LJ%wBAQqynIGTCo=v`Q&uu9@ zu^fd-z_;P<@bAJj8wWft{~#miUoFRRlN`T3B-+r zQRelE9JQnt#(r!t=k5lud%Yi4g0f2$UZ@BmgX9?YLvIy_{b%yzrh=Meu3SMvKlV(K(F*)!Y5wp4z8%m0x{Z8l78Taud%^4m;$eV+t6?4_v_>MI3mfjr-4i-9x* zCUwp{WhvN-2dcheIt1pWZo*DE=JiZ<7T?Av-W8Ma#h1Q)_)t>iPw=mZ@R$?zf~p9` zhXI@;*P5;-8ON8oPNkHJCV;0U9rK;FN-41sLDg!9{nXLmqEbSxv@F5oOR&r4RHAwf~m>xO!Mx^?!fHc_`d2;6C zk+QS|mhy&4n_LFghr~5;Dmsk?X3CkQeN$_yotC3fTdQtxQc5h=No_`Kzh%?wj9`Mn z2Cyp!_F5jEFuMnogcQrUCB#2b5{1&BxXMQomCO!>Xd~Nz$czX>{u0r1;)h~3>dSjI zkF23sVZjhiv4=TE%>H|F`=Iq(S+Rw6~nyc9F>0_%0O2N?%?gQA47*?A26HJtwnJ?v&^CAV)21RL7p zQ+p^#_Eb!WR}{$!QN_)dZmP+6D4WCieqt8QAof82JafCAjxv>>%T6Zg5>=ysP_V%B zg|Mp|Xqj|V#s0k0y!y_BX_d;UiWss>!-Yx=i$QA+*Z1Lbdv3N;DItZpFseO!)v6|0OGO6*(DiBcxqsXIB@R_RRhnGSD{z0KISlM}5`e9yk*} zO@0l1Q~2#O21fR1+yj7fPajf0o5{=L1I!l4oq%CB7M^F4_Gd%;wlTVzq@^)=Rjimj zENn=eKaBeUaPptf_Z|Q_8OUcKcG#wf14?q{tjQtI5=d)vi3|sgV{A8|WB&MUf=kKC z-15-iwPEK7n8Uho)HKGZc-pf#!iCyx1~AF|w1%F&t{<~T)c zKNBNg=|L5;^WlS?q*3|!aUw>8!?}E;P6632o+Ftc?c+!A-#>iNzdwEiDSu}_*Ov?> zGGsQ1Rn{AJ<%3QVvt)srT-8ajiX>Jks27CEBLVB4v`BQ!!Gmhs`OpNLEu8|Eu{l~u zlwJ5#Uo$Lf)by*?_*F&c*G@w(&v~6LnY7p0lFocx`mQGVHBgQf4k%U!;uWmu(bLCfvI#H!bS)$g7g;?`0GVs%_BZ0a?;ELiU_Rt)?U=H-4 zx|hKiI5Vc=MO3hyO6RJhLO07p^KS zhF!bcj=Fm$AJw60`eIcPjHolEq{xa7L z${!&m|3nU#<7(-oEb>p6+y!@#x@T7xyPKFc*Z(a7zjbNS|D)rA{vVxo@gF-Wk30Ua zBJjJ@@1HvITc_(TiSJelt1*sSZ_aiG_9HJ3)Bg>D-?}u$e;4+Dj*rLP`JbJX$FKjZ z1HZdj;B&=&cLCpDOW9oiYoDt=&|fzNH0b}~>B(V!|9^bk<$vEv`HcEM*77;Y?-czf zQ1n%rZe96Lj(3`8%|3tz`5zCDkMj24=%`!&@1(4=X_OY^J#+u3yg5>(1IA2k0UiJ+ z&XN)5IZk-hWWuPw!ARcuh5`o-RJLqO7$kxG_m6=-;XCzE`eZ=rW}3Z_BH1*<9I=er zsXNr1Sf0Pee6G8Ho<7E$(NUj4njfGTjU+46OM%fsjcirdWcJP=6nj0>=fm@k1NFF& zbUYQNV#Xm?CCN&jeir{!cL^4&onOB=d;KEw=*`vh^rM0YLw%*Yh?E~Avk^W4KSHc7 z^k&#aFXV-UYyvJxpk{!Wme**B(Oten+0Hl~MgdpMGs*5&Cm6LcRny*5h zJmtxFnXQ&VLZ8k={;Esu{NFysOKoYI|Bp}e^Z#&k*v0?uq?FkIa`~@Qp2-`6GB4!S zh5Oq~=JN3QbG^xMW7oH1E+1#{BNVtH(Rc0Y$&a`w`K50vZFHS`m?ge*c@a&oeq|8#%J_xc?$eJCFYqQK3sYEEUe#MPX1qAw4n+BPPpmlMK>0P=TvG(qUy0xcidm}Sm zI%iFG_$r!Ca?>Q(6zruw*fRmwM}MtCHEY7O#kCf>YPz5$3d%V*a|$ns%G_1w2kNe- zGR)#(Devop8kU#dY()A;W>2-9|J&{Kl$WOcpW!g?{~H}0clm#IQa;c8U$&>Sdoy77 zId9h9_^7%Ett_proxWhJ+xh59n^Ld;lY0na+t-4wtM8p^I~g?X{~V9<>%YU(lao&W z@1oTCe_eguSM|=#c^u}rQ-^4R<@%mbCt{rVf{QK9#yyF5(@}?*7zwiDuRxKmFaPo` zxV1Duqy2Z1&;NCF)ZPEJo1*2P(9-)&ve*9A-G2BLstDQptdgj?1~N}8b@xZLM#+Ew zC&+4*_|D%72aq9r#zL=#zu$<^q5_den6$PlTPy49f9|wB_hm|x{U_dK_5b*&i~rwE z$tg7MgpXO3SF2yal`x;$p?zA1eDxfT83*;6vk7;?l!Y03;AsvG?u7dpvh32XTwnep zcPH}St^amXauOA}U#R>Gu6%&}=c{i~{v)?R{v-EGlK%~M0Gif+M~BCG|NrT*i~rq8 z`Qij!F=8Q$Q+rV_0(gk9SB6_PE*MiTCDnt(BUSCUA3Vg*m0QbgW7#h;R|{o^_$R1~Iq#>I{#NJByc@q=q$vQ@s8-Y4IeLnX&aSQMAN@g?`42 z6S{hdeath-p~^mJBBqKVy5SUZG}k%Ovd@|p7FhswRF(Nft`T%sI=yC9YOCC5&Dl0< zyw(p+i1}#~U)FI+`vjOdac&^U%Zxy$w@JI{2JAd##E23-?I zRQMY6xz^=vt>zF9&ycfnyc{*+#|=s@sh+UuXq<1l(5130vIBK3wwZM*$F%yc_{>eD zY0#FHyO-V4QG4HSusY5)TCfLeJ6@wUqN#`3LWdyM^+kV~?n6~@VkP`5$QSJ}R&C13 zACthsZbedC^ToyUvik9;3UyuqL4PAPei@9|@wv&U)miWrgTtM$3=QsthV<-VIF)Qq zt;ar*TZ&=iej*GbcU=rC&&iaaEu->NX`VlP09w|{@NRq1rVX|gfgM7-Q+`QYbP;ew z1L$3Zr7k$a_@z3-P}VO6#tMjwEUW=pScY#3QddYrY2y!3Fx98XOK>hG7x;eCI=e5S zyd6x-L6qYeb|6RJ4ZK*K16U#Ciwb1( zVB4XpP0CxeYOp8KJ*V)D&I|fpKpA`s&Kw7Y9NeN6leQd*xH2S}u=u5QCv%?ys1qK5 zZwDoi@C=}Tfu}jc^H1QRU>}Uj=p)S3+u}&e9*g#Bt9+jrxn=dsC{}n_nTcBzCzp~n zTU<)Q;!~5hHCv$TmVJ8p;`O^9E-s(HyLoZ_>qRcTd}G3IWr=3eduQ6L%wt;zuBsMT zFI#@Dvs+G-U)%JK6ZL~yrKWWME9?upIz@g~sK0#v?&|u*%U{l(m*G7DDShkVsrbLI zA=->=u5qPWco8g1)k5`RB&k-l7^%R-!%Gw(#;z!tqAa}^oQFRlo*x9AuR-hB{7XW4 z6*Fg=DxE3;V9o*w>i_)u^;PB-4lu{idyYKVWm|6Le1C8LZt}0ct;GCZR-=MPR$^>3^Zi=3R_s8#Dz(oOFJ6h=lXAaZ8D1>6aAXVuCl2u@ z;Pv@c?wwZo-&8E31njC+2>|8`y;0|)`+7iN#w*DxtlL{=H46pADRyesRf@*? zvql2fv`{YPRh0j93q{qT^j&6acBtLzaf7bai$$qxCocgxhfH~&%B5vDGEm<3dYoKO zr&0$~flgVs&}MXc^Zc>W$vE@Nx0qCL)NAd3wd30+D0rj8M&lh>oy;J|K8T_*?Zu9dh6Yr zSEHe3Gr|AhMKckh=e>ocHq`?$Xi3ziPRL>rCmFoI4tLCcprd3m^ z?VQ>jXHS*Z@&73F$jV3cH$9Y>ruhGp!~Fekc{JTB?5I5YN0O(Et z)DKfCUZJdO5hu)n9uDT&DUzbl!w#(R?92D(!g?wYVu$Arm^pFKYn&ImQwQCtgYMKp zck19XoH|&CfxANorSj1oI_M4^Z2!=~H%~JmVf)=-gKV3R>#)HZqxH!T8LWZJjt>`X zj@f5BRM5}k-0uz*bcYJMLj~QTg6>d3ZgZqNRFE5)?odH@sGtFpO%D}ppff>t=s=b3 z(81$ixb9FwcPODdl+YbY=nf@h9(IQkwz1Uf4kbLU$i(hYLU$;kJCx8JO6U$HG+@#l zO6U$HbcYg}bZB=dAt~LVgnyqy32o#57a`o}?)Sem-~TcmAD!mn|A)t;?*6Bpl=`rN zvY6?o+deB$yi7jT>|i;bA`Zv;IKa<@;y;8{=v@)_On-*xBfeX-Iyxtgp%-0v4e1>5 zc#o{?K!kW+I24hYR5l0@l2P6D@$YFSNPVvB!~3=BC!g#3%8fVa)kiU=VQ@*R^Q$*+ zIQH}<{a1)ODBy9ZqXqaXL=$kW*IANIX3=Hl6dh3+M#g%QfRhbxn;Crwf39G;y61*br^O!k*kj(jITLh2kRGp85vn9ly82OsBVevPe=KhjO zYyD4&kN5&bj7A<}Ypn&F^#Acm{{G*?@#whI|GOyl>%aUqe%(HN%plj1mjuV>^|wy~ z%n20}ghMDI0+yc~_rA(N$tdy7)oOE?LJs*{+4y9oR8e|!nuT! zqXaEaBWsXv0tRR3COMS+pd-mv`WSMJ=R}@ES0@eJi8^A^t2?-$BRKZJnfL^VNm}os zNrw@$d%0aT3p|Oplb*;Ydlq~yu(!B^%ftDDG55PC@X>RvXu%Mz=$lF(Ei-shg?nZ{$N2IG8Z z*FrlNjk;aDn=S<}4M^Qf1)wHPw+uElIJNk^6^0Farh6d%!0ebdqFoB6)ITw;_5unpOTj^o+l5IlVnPrfot@y z2vY?30wpn^fD`d$d;pLV!V~>0b*r*MC*aAei1BLy^^yeFgh)NfqA>K3j{*+8pJGF| zSVienNv+stvE0P{T4a%{EEXnxD-V@*>sbs}^Z?toDg?YP!@w)wbQpwV?Fm`cP|y zHwL=BUZ5pv*H|bAyR}P5FWC}Y6r8sihikFA`Ce(^-*Z@N7*B-y)AgjQ2_yF8_f#K1 zT;4v37<%%138N`oT1^JOe*wPQPf^yf|E4$)M*P}lVAKBJaG3Z1j>m_?&i>m)*}nbv zgW{4e$P6SN(t(#oHPv*sXM!-Lt-E$(ESmlkIb55a3ervHP)9bTbEAVGxed#*>(~FC zbX-@eOI!V~`7O@+w$3EbwEjOnIVr?{jJx=cos|0c50*>SS@TKE80&nSO8A|dY~m?J zE62=qlFk#}TQm1Z9S%vHgYgJgX7PQL;T)!K)i)9})~Qbwzi|vstT*C)Ruz5TFJImn zq{K(ymBc6W4kj#52w*v)3)*PP6 z^to?pRUe_re*cm{_Xp^~z(KUXzn_d*ut+`>f7EI2o*Eb4MD89=hz_96wS zJoSe5&ApvFRL25Ln20aq2(fvOI}d_2Zl9{+WWwu-u+%6KzTGFjWLKK3PN z!mYs*ju$bG=*5*nJO0_W#Z3LRf`j78SX+7*S1fBdURC*}dZ8*SVx7sCZ&Lp^o7OUI=A}We zyFe+G>=p}^@_MBUmIoM=`BsckxRym(1*FuZ{3@)-b?rYLdbmydZ+vo?kN-Iw9(MUZ zc2YKpN8O4gSOmWXo3I_w$73CCZjvQQjoKJx)pTh#&#LRLYpAt^v|+UL7L2!cKsT{D zsv&P~(5(Y=i%4$G$lIW~I>{|Dq@+8@%I0KN=G4*AVTpB=eEC((?QP1M^Z!O6zzy^N z$?-5h|BsG_-TmJ?DQ!K|TblK^Fvmac8U0J#oNHx5v^N+wH4ip72DUKuZ)>7|__Y4O zN&J&e*`LH8vu!`jI{uettQe9u%2_6TaqeLgCM7V|!Xb4l&Sl0UQSzb*mkL zsY_#OFNnEAZ@!)zpke(# zJR0Tme~(X3y7M19DaG_B*^>oThe7hIe{*=@5uEy1`^dysB+hG>HRWHY@7t>UAC61* z|At-s*G@`({QnyHA2ox#(hRa;{>PgPl%3^&%pt#N{>M5nkJ2Yuf%wDnKQ;kt&i@FK zBhqF=CoF`o03ajtm(W{tSeQd$h%R*HoB`Y(%z^~QoJ=g>)Wg(ecqQiST1~0P*GTdM z4(6!_3$NCTtg3FI{_?8LMJjtKTH*0#=og_E9FZ!uSw*9=dos^dIs0DMzQ0mv{dT#7zM=5IYfaD zqp0(T)&Xf_WEM9buK{hiHCbn0nZvvuO|yU4Ot#Os6O?9ne{iNL;xM z#W|ur4kX$?QRtwntSUMgem2+VTK>NcFM(~?{~ZlSdH>(hNf-aIo1!HoeJN|5U&&m| z`i0!f`tRc*th}W)*P-sF)~=+>disB}O8}en|KV_)KmT`l(&hi$Nhyi{sZRL5<}T4E zzalWD|8_o=WbO+ry-Cm9Pn5khDRn)PmA&UkP9@1bNa`k|_;w|^pChl7a-UNY=IBsY?UR#R{*Wx|+*e7ASwj_XPHmi%;aAsp|o| zX=__$E&adYB~Xp_|4H8dA02o3|94YL^ndyefUVC7kMa_!^_K}1^HAxHQc5>|k+-6l zj{V*#+^@8(t^YT;1g1&;r&cYk-$ z%$Hu)(*GMKfojnIRbr@bIvc|6PO8}L*|)(jfwuxAkTnHLAVd^+I6!vKetz@rh7*cg9htK03|ie;XZ*PCNhKPRbt041TZoHuL-Ur!pPsWH6Xx zzKEtWTT^0R4>BL`gA4UDmD2;v2nBzSD}aazlj8EHi_3oi$N&Ts3GIVGNXh+5u0uQm zT`<*tieB$E0TDw70J<*V3k2S0&3lc3?Rb!}36O}qPq`C?S+gP$jd6=N)$LwSnlsJ< zQVynsaK@A$6h^kq7W>A$ z0zM4j98tU1+uH+I6v=2WhB;!ro|Puya%7KA{whEF32|_ZLIN&=8R_-jzklzLfDsS% z1b9HjD?ztxcMJ+Cl=bK)0mu(G@buK|kOjlI3>z-iwLYSe97#tpiHp`+9+ZLwsK_sbUG1qpV=J1xgQ=;RCGR=;fr2e~Z*f7Q5GbcY_dk zn|b>C)4j~^eK18HxwGGyuhU&B@YTR24wFCRKkxK+gMBHGaW9Ay$tXXG#ydGAZvxzU zP@BmbKq=>Ll-0nf=@z_E`6^IlQnMvkb_>;15b`B;!RD*`Fizktk~HB-6T z^hppV7nnlt1I(W=5E7RGNX1}@$ykgkp@1SKW5~6^MF0qO5f#dOhVKDiKz;xiLJ|RV z?;y_;+Ii}cIS2{kV7dY?ao{qz0xx0U(iNCd;)8SIA_uz2Uro|ld;%^d@`5}yT~7$^ zBNs=$RBlcT(n5j+2vHo&)5=j2#)5%s{edX4Tn*d8ffdHEt+DgnXy}~4W8^;Dx7Db; z!gC?~!CYal^zT5y(`1)wi6Tiqen)^0aRBb%O0MU9Ss%%1R?2~b^S%twdr{$ijS_sX zL~PRQjco8Ep_*pJIbY23$dx=7kic>%=GcLr;RSY}Mh!BI7zaM$3*rJS5q_Hz&As?< zHNEW1YQ%Hn&WPc9n|xpLb%5>w36R0KfQlI3WDF!$;u;8KVK!2<#Z%nf)k1}@?ddrVqV zo(ITGfMp0UjzT77`fYne#qd&bn*>*)q0nP$Bx4?<0B(Nd15WndP&?pE*uhi7mU;^nmOmdH6ZDn}M3HUtyZLcTi z%kzkF;^RMz6)vY{@B^9=ij4J%gL%bNiUpq!qz(U0POE!P0?*;No<(Vti}PZ)Tsa=*y^w%?!HgVfLc136Vj zffk|+xngQf%50IeRU8Tpq%+L4v8ZDMkS)kvn~K`r-zz;5nw843PvS+J;wut(D=?MT zwA39SZdh~meA{FV*Gx61hP1;627M)b^nMTwaBP{hD1ozzyY zFVvihi|c3y1_0g5WC*FcIv41H5G01UR1fc~6F93mp3?}uw|l++B#{IwmUbza>7W`t z*=R7aHK$g!Wet@?=DfGTnJ42lI9?)ebs)w|=!l3Z`z_V>FCn6k6RMPm?6br?mlztz zT}Hv;jhN&TQoq;q&+1(H@BjCXamXVELO4eQ@we(QqP^Y~MN7E>uv8nGOwC)d_h12C z05fRF`u4Z)b0RtQh;w@Y?iSb)90dY$2gp>&B}c%GkfHdDcpkYEeN&UPv{Zr$=9u() zmaviE4(-v2JzC3YGnjg0I`E-br4G(zTOOI)zPmRZos9OCt+wP0JY8bSBj|yJu&>RO zuSm8JT*wwv0;vn`pjpMaF;?S4d$=vE#)m_VRcg2Zb!`2Oe>tCpu>L05EbP`18FJQ4d&_0Ay)E1MY{~c2+1@3a%~Q1MLrr zA*wGmZ@m}V7Q2e?)6#9Sp@{K^GJ%WGOcIEY7(!!dVF3<9#FZ8BHaqt_hEm%kwm^v@ zOADevL0%YTL_{l(R`Pul)bpZ^(+tjBGNLn~>qN&gZ0~7CMxdGeHIrjuY z>6^ohl2VhNm?V2?Ndd`wkV{LcLy*#q+0Y9YFc(oEMpXq4q>x4T&<{N%`d}y1-Pvz9 z;N>ZJ@$v^X-{0K41h121AibW>a#>&qv2kQo*bSGhflJMNlC=f@uB6zFw~xQGg+ZZu}U0nZ?pA<8Ig98 zlr!Wl3E6;CgbW|bT5OPowV!hLyTpe$xP(5+xb?*{?Y$eY_bP0t9m8gG#JYhXwr;s! z#hpp}6VNXg_@6!TQX&@A#)$c3qToOFt@nUNMmk9Wg$8qSP5@Uf$c!dv$Iz1v|E?_7PY3-X%+Cg#5@$RE4xtn|+*>7F=O%WaG|t7GryK+c0Z4$1!T;v^ zrTkv3;yprcg}D@kaIqIy8MS_To)qJkVQy~`&xe74`})I#4V!2XsxefCF0 zTbgy7bgFJ>Maz|g+ZVKaNGH$KT0=^g=%|!6=AZylOoT#Mz@^X!5U|k0+*)FECw;|g z7P*tIOd&>LH1)6p{t)`){POJ83qX-WsLLu8bgWKvH2MKXauLBmwvhO&5gRvR+R*gEMxpm9m)i${Y;s z@$p~POjWgB1|e~!Gle0!+iz|gQa&iD)kN3S-bt*Z-Y=ntUFkKwQ#fUo_V3esl`{mn zw4w+IDO$?CDH!j=Yu7xIW9qtU-cf2$?iSm#^HJhCPuwq(o-k$sZ|7Dx1^Dso;^m9yaS%J`|KFmdh};)L zg;fVy^lSoV7@8vTyuZJ{kMBveiI4fYK|bX<)cQt#{9f+cEl@mlQ>1^NA$mWu$0zpieb$er{FvAeg~}itgp@3?9Bde@l5=W`fCu3H!zlgUSW&vCA0~aEn1PpOY?RO|eXQehC)@h?D{&!rw z|8+F(?tk1#$=Xo*J|8*yH!|%OH1n{ThLtfDn~^5TS=KKk?Y~%e^m;%q@zm>xAxnn) zr5z{AhWgy(__r(VZHT?;v%NYh4MGKn@M_roa|6!KDDoye~Iy%bB|G2yVV>jg!%71MBf9^hZrMfi9 zKX*baaziUl*dFJ0Y_AcT*Z+C{-_ddB|JzO3(fl7T_KSQJr{(FT7X5Xfbq;_nmbUW$ zN!R~{{GX%Yu=D@zr0mB3-%0FaEp6ogA+rCMHu7Is{}0EbF8|+7$|I5gPVzrfsgQpJ zwfviuiJx-*9~~9_|Hobaznzqw$p7c=WLGLnll<$`l%LlAFW&z+Jn7c|J1N_ef6XVk zx7xSgDThZ}n&dxZJFx#p`St&BJnrWIos`|!|Ap(L#CKUO@wo}{Z(5qgI${2v~6 z^1qXkmH(Ef^g2E8)s&X<|DZ77CiySM|Bp{c-Tm)7DY^N73(>yaVZSe_G|K-L0stE4 z|HAqI(YV|H-%Z&AFVT#HsRx5wnS};19w(SlV_us9vYXPs?9wRzTQ~sRB>zI#^YVYx z$^TBuX7hh{+V^WIjq<;R1Hg^;|2V(@Kkn}T-BH<^{okGT{US@F{BI!uu+jcM$?yM< z#;2Y9@1$%d|6R1-*HRkge+vl!8s&dni2pr0?)-l{Dckq|eeNE0Wu4L}|62$EUf=&W zJUZ;o|L&x$;oIxN_`br@DF0hX0MfYrFP#57IvscMe>*9kWc}Y|`}kr?Yx)0_{{L}t z|8LmY|2rw4K>j<4{ens>`Tx}ReaRp0Bn^1V*dY= zZvDTTvfljPo%Z|MN^AN5l>2{0|Nr=;lmDHR?aTk??on5|(v_}srA_(&0{{U3|BgkY IS^$Uw0M)fXSpWb4 literal 0 HcmV?d00001 diff --git a/assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.801.tgz b/assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.801.tgz new file mode 100755 index 0000000000000000000000000000000000000000..1b913828438a33e7ae62bfe03b47df89a74808e1 GIT binary patch literal 4315 zcmV<15G3y(iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH+#bK5qy{mfr+Cg~m9b1jOpWvAtAcCO`;zyP3ZZPx$&&H+e@q$E3Ove~;kMrUFPI4>NW7apo!;L%`FPH|9Zb+a0tXH460 z#d7h@7i&=zMW^Gj`x`}3|8I1BI{s!fjz*`^Y4l`t{7p0(kH*nA5PdljcDK@4roV|k zn^%8w|0azU{1?=QD;dLb6tJRbeGNz7hu;M$nnd%$y3Pw}EE58LET;$)GMO+~;ObiG z)WpF6)}|EXj7!V7MD0c}XI#XPkb#;1(M%>8>M&7x03cb8F{+Ql-aQwiq6Vw{Cr}cC$FOhEQr}(_y8reZ7KNOP$#M)_TGTU^ zpaCXR*o)&1z-bHMAeeHQo_XFeRP;!!iy2s5A|wKtEDPX;Wm8d+Nv5ug<@zYfv-W(o_XW(;xz@;FYbADsZOJWSEBVsx+2lpKD|!pv{1e4k{PUmQT)lYr>)FLyFqT=Qc#srtJ>t1g27hJT#xTkvS`DIn zkqYdPMnj&+X?-`?wAJo~?QnRpMvdWU?3o0Q;gupUQcxQACq;{i1=6Lc-6rszKT#in zd%HXXQlXl`0#{F6UW9^iZOG0n_^N1%FhgcbjWA~xO%;oZj1BRoK+SWMmWdk3#6k7x z2ZCOQlU9(t<~D;uY1@R$44JSQBpD`)Em3nz(2PgX(aGsOq4T#m{(TEzCxnJ@g%&U` z?8+&R8w!+R;6?iTvn>JElcb4dD2gVhR5^nB{IoA-Ae9_A+J`<4xh0}_XRPLO4$J^g zQCdFZiH`|Y0HRfKks+}q6Obumsa_0&`qP^wYR&yh*3sQN{lyGQL&RMoqJ?XszmxC~ zuE^d=8J(cI_(%b0V9ISh$=P+z^a4!5 z5@*5gTfveAS_+5nOYW5A#3tie2S)lFwwPD($eF>C z36djgVl)wJ1omk%VUoBtk(t&FWm+gMt+zGf6=$gWCpq>poQJS?V)hPUZO) zzE{VecJN*cR>kY>g7t)Ax-HQi40omx0MnA~aEAst~PpaX=UE zs-$WajGWBW$D~xH^j<-!%447D;uDN!`Oh?%T8MUvzqz+o2&ZJD6i3UNC2#(U8H&cqC zYL5G}QX5+*{OgQUg~7yvX6jNaB_l17l66y920))3Ce^c!&bgA-Rq15MYUUy>8f(_I zdyO4>^a|nasz9%Y|D9ZYx*YEd6T^wapHxLOLHe53RnH!sl{QnF|GmzOGo^F?dEYv- zcNxl+j1IuKYG%5E*62*Ixzq2OLkzFhy8hW$RfeYswgO+C zaoL*5LEvZkkwsRH6g(x^# z9~j!c3|RU{wcy?2k`hgV#)Qp8cGf|IQ4~doK>z+8D3xCk;j{FQ7zW>e|9$(60A*8~ z&A6cXP$hVc)AcHnjSN$qmh)jjV30AH3W^I2QXB+ML?0M_b&JzVi(Vr!?3D(!EA;kY zUx^g;0VoZ7b#1c;`!i?Mb@$Tjg-uyG6-+J;t122{ipCDul5ycut7o^Llv<;-V$~E( z2WtwQ;?5(uz#-h(K!4|%sp~_Sme!$}m9d!b8Bc3LE-OgLhFok}=Prli`M0{Nk@ z`I)$8D+4nogn=n-eyj6?Mkih(SAx(cEqDx<^#Kgk{m!2a>`!sdB^mVqWlDb!f$D8i z0Xe+V&_MjRn#~MlJua!H=Sw^wKX4no$_o-DO_^g#!A0rRhlce57MXUnPjD`DF@yb5 zdcWhOvgnXFMuX^pCL=2$>th$qWU6wpf+SN0CC!Zh+`JmgFElQ>DjO9#v-b|ZQYBn7 zpN|ZcH5pG+lyx+7BE6wKiEY!#xm3EL#*On!F6XmS9D=eL>L!ekoK&n%oxiGUOv6el zpKu8KFFE@|>A!}5adiKB2oxw{*c*kT(>)&;ic%NKppWU?w2*VQa3@=ac0x3rtUi`d z1*xKx+=oV~EGe6NE0uU-L8CxskS0wrH%&R^IwtDr9};2B+=N__HbobZN?mAfF!klB z&2Z(6vOt|P>77Gku9kk){c)J*H6}dKGzb83M}c1J}QCM zg;-+)3oIwU@X`u3y>iOyNBHMf3=>UiHhxT|c(xi76X)hz3yREK@;5Zn-dbt1j8tun*^^`Fx*U=xcF(D|_Ke*n1c-!s&?=lnxY7^SxGp~a0@&D22 z=JC18UeQMj((m#T0F}QQe|7vr8XuY;O{`=MZ5-)-7iT@|Z zqyGD!)8p~+L;U|5Z7>)Fy*GdqK50Kz836r>;U9wlz#VfppzG*TK`y>CAy>oY$X$Ss z975*_&W8_80BQBZKj3|-EW+)rL)kdq5B%vqOA;(>>y*7qaU&#c!uL4=O#PAnU1S`{ z8Y5|)^52Qq0^H==K5+l=!5=@RJJUe8Nx)qb+{ONVVc_-_Dvh?D>vWKs{uYM{XX6n~ z8?iUoEM5D74Yl@*moC1C3)cYaxw-cz+o8E!gW05LUX?V58(Yz~KyGT(CPnKB$-M+O zVf!RE9+k96^~UED{j9((eRbh~ z@%+;C@RufRk8bL!KjY<}n(c0^jhQxg_!|_~?*KNWtZwNqCtI-Up)FW3@j&H8M24c0#I?X%ei-(|DlGL7BEt9#*p-ag%R z0NZXSTWmIm`|Fwa1EMb^@-4AYR#%kk8$pnz5N@naWSHuT?hLHe-F>cOCJ! z$=m_gCt0S&MJjOH>a-T7htzpA82AMnRCk&1?E+Va@U6cQjh{B;+DM(1CIq;>b-XLI zYPjC2RbQ{U^cQ(eaB{il`PF3%GbRjH-4gA!(u>#bCmW9*)gJ49tMLmjT%WfoZ%VIN z5#(rDU1K3vFLBoG?QZUuqu!uHnlPKiPGupbWeWHWnE$yD<&=71?l@5n;r6y&mk@5a z{+$tJr;TCD=Ib<8@tc%FOqAfsDu#>MYh@=I4NBWUZL!8GJ7H34ZddMn;KsIfQE0y8 z0_T{dmApvo7WErrMndl`J+$SA&DjV3!8XIagHjn+r4z^nyd}#JI{vpv>l8{2T7)Tf3^>!<=sBmt5c*eeNFa^f+ztsylgY zcd;GX==xk)q|D-#)y(32Ft0iZ^WhL-XWQ-n zRn5V?3O^?j+~@zJ2yS}#gC9M#hxX7O+P{1IzW@LL|Np+V JW?2AE003}@e<1(> literal 0 HcmV?d00001 diff --git a/assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022301.tgz b/assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022301.tgz new file mode 100755 index 0000000000000000000000000000000000000000..4881b140e09c92286b211178cb4923a8d9b648cc GIT binary patch literal 5378 zcmV+d75(ZTiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH<$bK5wQ^O?V*SKh5WE~FlQP3bCE=XJ8Xu9+mNIFqTZ)YKRv zTN2hFzyUzp8r%2VuK@5(z3e#7W>)xMi)5qGXaJ2yqtTG(IPhcCl7`AxC@)ZUk7q>c zU`}H8-GiiFuh%=++q3_By>-G10{oZ%bdzj)^r&MVo zzw3Q=S83Yk|x@|dgD3@oZ5~8Q~`mIrAnjp zFKL88rc;SiqJ;!80f|baGDJufnIc3)lfava7>6*^I#I)JcS`jv9S5O^yXWZ`B}a|Q zr#wu@21~czHNA73UkVwiq31*6Q%oq=gmRSjE5?MfA&B6T>e>IQL`W@3M5HOo1K`9^ zAsEI)o~NqJa2^n%2w#@t963ilvoM}}wO|N&uHKfAd)D+KZ-zn8?+5)}&mX6hMT6d; z-x~~edwuUg+JDjbzaT6{^>6}U&G_FR?Dd-Cf3Vlzj{nCr z<$76B#EO&>Zj-`x;KK~X^v9@4Kgh;daQ?jtsDPu<31qBk z*VU42(fw1Tpyvq<31f4(B&Jp~ssJ6A|77Yl7b!$knHM#*78w=X0?fz-f)*BOOt=xp zRD(#hGGg2Ek17bw%nuVG0fQ;bgrvU#|E^NF2v& zZRO2`(r-~4@C9tDi=PTR%+NF~K{-?~KrAAxv*^<1koa3lCDu4(@SFOl3V|?F>EZ&@zC?U1DOn(YR9H*Ry28peAo%R5Z3 zLjamghj3x%^7;&7hHJ{$NH7}Chj21^EA&XBLaws{M=4{c7)sP?2%moOcEAUTQ>wI_ zyZ3KNjGjm2R1LkIjOwh;ld{_>WD{S}DaQyxl$uT~1Jw&qcz;!32fzp4H=qo)Pr1S{ zmFR1xy!xj^;sm9{$R|-GQ7K=~6I3rxxa|+@WaN2;m>a_Nt>sh+#f5)(#4qK44-JiOA99E(C7vG9qAek5aLSqX`VjyM-OM|pi~YGfqZ zL@*{UO`jws08CRN3D=0A!Wl*>%R6l-sW$QfNEG>k+fE!9TmYwt5DSSwbjjwDw*eL1 zNQA1Zi8@zZ62+8LtIK6NUAHJS^2<{FyhOuJ!F^L;*A$q6pbQ90W+bSWROCuTG=wKl zJa1oXv9?Rw-q50q$Yw9`Z*=zbVTn3C+5&$LIxeRB9x} z*p6@0NPkB=O#&p2$Qcr*XTQ5wLgr0rpAE|m;(vVjFfxL}^hiqBD`dnBQNbfMgh9{r zPZH6`DawvR({6%mys`xl)1ya{UQmWpd~IeC;#L$+zyX9LA!Ev@rbcTvYH7c(nZDc7FGE+I|K+;wJ}J=CmNDp5WWXAWIsjAQ^gMbI%scNT%V zj@NlpE7d@{cH7%C0^Z2EAsjobM$9E5dPS8??RxI7X*9)5C0i>UWXgS#SRrTfc0<7@ zbZP`{5{P&%EXv3^U1a4E1Zy&>G#t zv@|VR|K(b)>b`yV;q_^tuk>e?vDW^xci33}9qjjax9h*hNJims&7Ui~GooOO$RWan zi8)3Ac-PlPp$k4b!$7&_39@z6?X7phfxrYW&9F~3Dy#mLs(h|MGpckhDyu83T<7tO zhD~@h^gJi=jNF`}2H%(D5;|Qodv@!KscyE$>KeU3bqux0l%>ja;Gtb2SQ(uyldJ?^ zyjC(ARc$Q;C5Mz}vRqQe>{@d1;s;XBP`TEwKzXmP62jfksTy6f-kQPr5aEP!gid2+Y_fE0U(SHcR1bmM zYcSezAzlpRy9<=k42&D2w($@ScI{6ZpQe)uy@HNkBFhJfBlFmaBvOd4(#BG|j>EZ?s$@(> zX6GLZ#^x|f0XE{i5wrQjpKUFMFE?-83Y`T*9G2jHCVp+^g4WvGI}?~l%Jl>~e^LHl zRHuQ>m3x=&w(NJU8&=xa44%xYZ`#(jan@lRlwDA)Z)XeKSz4 z(D|)5=xl^!@yIE; z;nsw|W;lP%4UY?X;8v2s!e!)wfzs{VEoV)NbXH4(uRWpPF|F4B^6gxV3K)qbDlLS* z6b#qx6t2<#`h#9W{~PS@?(J{&zsE?8fh5Nytcs9x%A>-7^j)asY-TO8 zWGb7@RM!?VfEh}v$+Q8HEshH;OSx23(bvMipm4Irs zl?1M+qHudV%w~*wH~bdo0i!0p$BEry%EapmhI#<)iMKI}Hh7}a@xL(CnFx2e>+Ud? z(Df~_Xw^|L!emzo2^X-!zHTb_U%Pj2)4H@;{@+E*U!K;<|K0tD|L>r;d$5)NkCMKa z{J)?6Uzh*iq@-V@NZYJFbUF?7*>f%;zIECu9;_-|f9fhtbmd(xa8*U$F1}7t7r%|_ zT|8L*+XGnC=%wqXtLg&s@k%2%eL;CS)r^MoMRjsD${ax~7|m$S0qvYo0OzhDk$M1-HCu0`Q4&}tdalw`@QD=?{4pKEB_xQ)m8gMm9qR* z(b!u%&>HQ(Y6Xdj4rf1B>T>tmtR=2IY+Nyq{HMQgQtzY-pKR}0(e%j;jZrYC6in69`fWuM$}oj)TP=muF|t?-k^*%VLE^7vnJCj zL6zQe#DdSDla~5a*pptxOMtrD$_5Bc~_9H4^c6FSzzA`b@Lg@%)J7*RuA`w!* z{N5dvE>pX=6FF6NiVd22|6bZsH4}WEx9C;Qpx&avHEwzM-lbN!fO^4=9#(^H?-KL} zbt>Bt0#;(LgKvVqvFW`6a~^SMEz$*{23DeOz^^ahq8e4A)`G5unjNniT%!K1HC45u zR@6(bQeP#o*9NO~xLV{AZv`)^fNe%p#!?Q3iC_fL0P|7CM-c8+P|HQ<`{UvK}gdH!SfaB#R? z|2;<9_%PWjM}gd^bhmZl3gglrbjA2@XboFE{*&ll2LDD{>;Jj8--`dy+m8Q7N$mxC zGd@tUYHCUW=l$2%`5m#jCI%ZO{bOP)e?ZT5zT6N^7(1xg;u|)=^Lk z-jvZ$59qX(iUla@bUdgfts-o{_8|K=PHW`9O1`}O?;Q@B@xKrD_qOq$9wn`J1>Z#E zpJwZ7XWUt!2j0)YJC~3)d>}^0jTnJGJbC`a1DIrS|D426jVOAb@*{P`=N?pwUJwFs zaTbL?Ql4Hd`Cwrnf)ew5GCHn3*A0?cp4^1wgFUs| z&m`p45DxbDcMrV#E=E_6e<>LHW4Dw4<<9@~51Rhpz1^+<-{Yi*_5XO7gP->Ff2wEY zx&MkjWkaYsL-x0*PTtcmq~;J(oWm(Y$y$CP*I9JVoO4@8kZoF>R?B~lO1r;)<`()z z{Qp|SbG=@aai+OnFz$p19a4E||XC&hW$R$CFDN0`<3d z<#jUG*RVl6ug1=8)+o6R+vpk{E@#NWUKT?66cQ4ilPTIu zB|glkvI}6d9tAZ+#$d|C*ct06pT00sm}q)|_D+Gyd%`1c2RKe$pyj8D#0kB^$nC}c z=~)2pIGcmuHm4y85|qFw$H05{^V`?&PrcyP>940+Nc48#Sj4g5@bUN*A}W;^OsVeL zzpfTuF#cV3?ce;xY}z${@<(;SyCtGA3D48ShQL+c^FUoD-t%Bg&b{Y>juY?s|9Ly` zkw_|11t+gwD=$c-Fxs^j&I*6kODr3#~x0n6Z$N%2$ z?soh?O4@09ka?f#m;C+m^nUmAJiFKJfbc$lnMX?}5{po|p@g zYI)@9>sF3LNw{6s{4eg_slGvdd|s;;dHo;v+}vNQxNs3{wS15lJ*kr}-{qyP21}_Ot4anHYCtVz22@nv5BC zlLD7d9Ij`9y=CV(i$ia#+I|IyV9w zFI(YuOIj9$qcw<5+wpm`7~Dbg-neDdP*u=!$E)NFd1u~P>)@>+tP)RngKOlf(XcJx#C z4aTOng_x4~TWakgHYBwQOwQ}uGw-b8u4eN7NR>A7jT_#YWuXg+ok1Zy%`Sse)v(w9 z^RgctJS&K)I_DXjZw;<2wnTPQ9T27Hl$%Nvl;mD`MV5+No`c9w30R0U9%Ckov$F#? zaMnNb&RV{S2K>nGz06Hr5-Bc>KzBPF&g;=ta^}W$UXQA4-AWt_bJsjrT@z;O?7|h+ zpt0_!r-)FwxYRo<4infmDeh_-BakR^0mbcoe>rFBM+Ypim6HT3b2c6EP3BSMPBRG2 z7X#nv*fyxT&erQyd|O_z1u;MtP)njHXK6)1SbFQRN|v*ya%o9%tJM-zMrc<>sTJZ{ z0A3|RS@6BR-3q*(C${1r=7|lHp5xUbm*qEbR@__VT4$MKN<~tVMgAvM`L@nG!r;oz z!$aw+K)KR{hp7HLTOmc+?Vi%gc`g0 zC9Gnuf_$ufZ9R0GzD`=S{tIb0r2fO*0=>@uGw3(%KL>-sHvZ$Iq#Zauu@}^)oEsfg zaQOQVuV29BjD|BH%*;n~P&1mS)?K&O7-Vnp@OJE7SB!>Q85CybC^gzbg$kPYCFi#U gyDI~~HZQhmo3?42{*%)G2LJ&7|2o11{s4vm0Q?`EyZ`_I literal 0 HcmV?d00001 diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/Chart.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/Chart.yaml new file mode 100755 index 0000000..1005637 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +appVersion: v3.13.3 +description: Install Canal Network Plugin. +home: https://www.projectcalico.org/ +keywords: +- canal +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-canal +sources: +- https://github.com/rancher/rke2-charts +version: v3.13.300-build2021022301 diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/NOTES.txt b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/NOTES.txt new file mode 100755 index 0000000..12a30ff --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/NOTES.txt @@ -0,0 +1,3 @@ +Canal network plugin has been installed. + +NOTE: It may take few minutes until Canal image install CNI files and node become in ready state. diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/_helpers.tpl b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/_helpers.tpl new file mode 100755 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/config.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/config.yaml new file mode 100755 index 0000000..37f28ef --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/config.yaml @@ -0,0 +1,67 @@ +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Canal installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .Release.Name }}-config + namespace: kube-system +data: + # Typha is disabled. + typha_service_name: {{ .Values.calico.typhaServiceName | quote }} + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + canal_iface: {{ .Values.flannel.iface | quote }} + + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: {{ .Values.calico.masquerade | quote }} + + # Configure the MTU to use + veth_mtu: {{ .Values.calico.vethuMTU | quote }} + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + + # Flannel network configuration. Mounted into the flannel container. + net-conf.json: | + { + "Network": {{ .Values.podCidr | quote }}, + "Backend": { + "Type": {{ .Values.flannel.backend | quote }} + } + } diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/crd.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/crd.yaml new file mode 100755 index 0000000..0351759 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/crd.yaml @@ -0,0 +1,197 @@ +--- +# Source: calico/templates/kdd-crds.yaml + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMConfig + plural: ipamconfigs + singular: ipamconfig + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMHandle + plural: ipamhandles + singular: ipamhandle + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkSet + plural: networksets + singular: networkset diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/daemonset.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/daemonset.yaml new file mode 100755 index 0000000..1431df8 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/daemonset.yaml @@ -0,0 +1,262 @@ +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the canal container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Release.Name | quote }} + namespace: kube-system + labels: + k8s-app: canal +spec: + selector: + matchLabels: + k8s-app: canal + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: canal + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure canal gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: canal + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {{ template "system_default_registry" . }}{{ .Values.calico.cniImage.repository }}:{{ .Values.calico.cniImage.tag }} + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-canal.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: {{ template "system_default_registry" . }}{{ .Values.calico.flexvolImage.repository }}:{{ .Values.calico.flexvolImage.tag }} + command: ['/usr/local/bin/flexvol.sh', '-s', '/usr/local/bin/flexvol', '-i', 'flexvoldriver'] + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true + containers: + # Runs canal container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + command: + - "start_runit" + image: {{ template "system_default_registry" . }}{{ .Values.calico.nodeImage.repository }}:{{ .Values.calico.nodeImage.tag }} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: {{ .Values.calico.datastoreType | quote }} + # Configure route aggregation based on pod CIDR. + - name: USE_POD_CIDR + value: {{ .Values.calico.usePodCIDR | quote }} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: {{ .Values.calico.waitForDatastore | quote }} + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: {{ .Values.calico.networkingBackend | quote }} + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: {{ .Values.calico.clusterType | quote}} + # Period, in seconds, at which felix re-applies all iptables state + - name: FELIX_IPTABLESREFRESHINTERVAL + value: {{ .Values.calico.felixIptablesRefreshInterval | quote}} + - name: FELIX_IPTABLESBACKEND + value: {{ .Values.calico.felixIptablesBackend | quote}} + # No IP address needed. + - name: IP + value: "" + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: {{ .Values.calico.felixDefaultEndpointToHostAction | quote }} + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: {{ .Values.calico.felixIpv6Support | quote }} + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: {{ .Values.calico.felixLogSeverityScreen | quote }} + - name: FELIX_HEALTHENABLED + value: {{ .Values.calico.felixHealthEnabled | quote }} + # enable promentheus metrics + - name: FELIX_PROMETHEUSMETRICSENABLED + value: {{ .Values.calico.felixPrometheusMetricsEnabled | quote }} + - name: FELIX_XDPENABLED + value: {{ .Values.calico.felixXDPEnabled | quote }} + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + host: localhost + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # This container runs flannel using the kube-subnet-mgr backend + # for allocating subnets. + - name: kube-flannel + image: {{ template "system_default_registry" . }}{{ .Values.flannel.image.repository }}:{{ .Values.flannel.image.tag }} + command: + - "/opt/bin/flanneld" + {{- range .Values.flannel.args }} + - {{ . | quote }} + {{- end }} + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: FLANNELD_IFACE + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: canal_iface + - name: FLANNELD_IP_MASQ + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: masquerade + volumeMounts: + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + # Used by canal. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used by flannel. + - name: flannel-cfg + configMap: + name: {{ .Release.Name }}-config + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/rbac.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/rbac.yaml new file mode 100755 index 0000000..cd39730 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/rbac.yaml @@ -0,0 +1,163 @@ +--- +# Source: calico/templates/rbac.yaml + +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only requried for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + +--- +# Flannel ClusterRole +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: + - apiGroups: [""] + resources: + - pods + verbs: + - get + - apiGroups: [""] + resources: + - nodes + verbs: + - list + - watch + - apiGroups: [""] + resources: + - nodes/status + verbs: + - patch +--- +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: canal-flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: canal-calico +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/serviceaccount.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/serviceaccount.yaml new file mode 100755 index 0000000..582d55b --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: canal + namespace: kube-system diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/values.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/values.yaml new file mode 100755 index 0000000..8730b96 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/values.yaml @@ -0,0 +1,74 @@ +--- + +# The IPv4 cidr pool to create on startup if none exists. Pod IPs will be +# chosen from this range. +podCidr: "10.42.0.0/16" + +flannel: + # kube-flannel image + image: + repository: rancher/hardened-flannel + tag: v0.13.0-rancher1-build20210223 + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + iface: "" + # kube-flannel command arguments + args: + - "--ip-masq" + - "--kube-subnet-mgr" + # Backend for kube-flannel. Backend should not be changed + # at runtime. + backend: "vxlan" + +calico: + # CNI installation image. + cniImage: + repository: rancher/hardened-calico + tag: v3.13.3-build20210223 + # Canal node image. + nodeImage: + repository: rancher/hardened-calico + tag: v3.13.3-build20210223 + # Flexvol Image. + flexvolImage: + repository: rancher/hardened-calico + tag: v3.13.3-build20210223 + # Datastore type for canal. It can be either kuberentes or etcd. + datastoreType: kubernetes + # Wait for datastore to initialize. + waitForDatastore: true + # Configure route aggregation based on pod CIDR. + usePodCIDR: true + # Disable BGP routing. + networkingBackend: none + # Cluster type to identify the deployment type. + clusterType: "k8s,canal" + # Disable file logging so `kubectl logs` works. + disableFileLogging: true + # Disable IPv6 on Kubernetes. + felixIpv6Support: false + # Period, in seconds, at which felix re-applies all iptables state + felixIptablesRefreshInterval: 60 + # iptables backend to use for felix, defaults to auto but can also be set to nft or legacy + felixIptablesBackend: auto + # Set Felix logging to "info". + felixLogSeverityScreen: info + # Enable felix healthcheck. + felixHealthEnabled: true + # Enable prometheus metrics + felixPrometheusMetricsEnabled: true + # Disable XDP Acceleration as we do not support it with our ubi7 base image + felixXDPEnabled: false + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: true + # Set Felix endpoint to host default action to ACCEPT. + felixDefaultEndpointToHostAction: ACCEPT + # Configure the MTU to use. + vethuMTU: 1450 + # Typha is disabled. + typhaServiceName: none + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/.helmignore b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/.helmignore new file mode 100755 index 0000000..7c04072 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/Chart.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/Chart.yaml new file mode 100755 index 0000000..369939b --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 1.6.9 +description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS + Services +home: https://coredns.io +icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png +keywords: +- coredns +- dns +- kubedns +maintainers: +- email: hello@acale.ph + name: Acaleph +- email: shashidhara.huawei@gmail.com + name: shashidharatd +- email: andor44@gmail.com + name: andor44 +- email: manuel@rueg.eu + name: mrueg +name: rke2-coredns +sources: +- https://github.com/coredns/coredns +version: 1.10.101-build2021022302 diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/README.md b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/README.md new file mode 100755 index 0000000..0d41d40 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/README.md @@ -0,0 +1,138 @@ +# CoreDNS + +[CoreDNS](https://coredns.io/) is a DNS server that chains plugins and provides DNS Services + +# TL;DR; + +```console +$ helm install --name coredns --namespace=kube-system stable/coredns +``` + +## Introduction + +This chart bootstraps a [CoreDNS](https://github.com/coredns/coredns) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. This chart will provide DNS Services and can be deployed in multiple configuration to support various scenarios listed below: + + - CoreDNS as a cluster dns service and a drop-in replacement for Kube/SkyDNS. This is the default mode and CoreDNS is deployed as cluster-service in kube-system namespace. This mode is chosen by setting `isClusterService` to true. + - CoreDNS as an external dns service. In this mode CoreDNS is deployed as any kubernetes app in user specified namespace. The CoreDNS service can be exposed outside the cluster by using using either the NodePort or LoadBalancer type of service. This mode is chosen by setting `isClusterService` to false. + - CoreDNS as an external dns provider for kubernetes federation. This is a sub case of 'external dns service' which uses etcd plugin for CoreDNS backend. This deployment mode as a dependency on `etcd-operator` chart, which needs to be pre-installed. + +## Prerequisites + +- Kubernetes 1.10 or later + +## Installing the Chart + +The chart can be installed as follows: + +```console +$ helm install --name coredns --namespace=kube-system stable/coredns +``` + +The command deploys CoreDNS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists various ways to override default configuration during deployment. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete coredns +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +|:----------------------------------------|:--------------------------------------------------------------------------------------|:------------------------------------------------------------| +| `image.repository` | The image repository to pull from | coredns/coredns | +| `image.tag` | The image tag to pull from | `v1.6.9` | +| `image.pullPolicy` | Image pull policy | IfNotPresent | +| `replicaCount` | Number of replicas | 1 | +| `resources.limits.cpu` | Container maximum CPU | `100m` | +| `resources.limits.memory` | Container maximum memory | `128Mi` | +| `resources.requests.cpu` | Container requested CPU | `100m` | +| `resources.requests.memory` | Container requested memory | `128Mi` | +| `serviceType` | Kubernetes Service type | `ClusterIP` | +| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | {} | +| `prometheus.monitor.namespace` | Selector to select which namespaces the Endpoints objects are discovered from. | `""` | +| `service.clusterIP` | IP address to assign to service | `""` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `[]` | +| `service.annotations` | Annotations to add to service | `{prometheus.io/scrape: "true", prometheus.io/port: "9153"}`| +| `serviceAccount.create` | If true, create & use serviceAccount | false | +| `serviceAccount.name` | If not set & create is true, use template fullname | | +| `rbac.create` | If true, create & use RBAC resources | true | +| `rbac.pspEnable` | Specifies whether a PodSecurityPolicy should be created. | `false` | +| `isClusterService` | Specifies whether chart should be deployed as cluster-service or normal k8s app. | true | +| `priorityClassName` | Name of Priority Class to assign pods | `""` | +| `servers` | Configuration for CoreDNS and plugins | See values.yml | +| `affinity` | Affinity settings for pod assignment | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `tolerations` | Tolerations for pod assignment | [] | +| `zoneFiles` | Configure custom Zone files | [] | +| `extraSecrets` | Optional array of secrets to mount inside the CoreDNS container | [] | +| `customLabels` | Optional labels for Deployment(s), Pod, Service, ServiceMonitor objects | {} | +| `podDisruptionBudget` | Optional PodDisruptionBudget | {} | +| `autoscaler.enabled` | Optionally enabled a cluster-proportional-autoscaler for CoreDNS | `false` | +| `autoscaler.coresPerReplica` | Number of cores in the cluster per CoreDNS replica | `256` | +| `autoscaler.nodesPerReplica` | Number of nodes in the cluster per CoreDNS replica | `16` | +| `autoscaler.image.repository` | The image repository to pull autoscaler from | k8s.gcr.io/cluster-proportional-autoscaler-amd64 | +| `autoscaler.image.tag` | The image tag to pull autoscaler from | `1.7.1` | +| `autoscaler.image.pullPolicy` | Image pull policy for the autoscaler | IfNotPresent | +| `autoscaler.priorityClassName` | Optional priority class for the autoscaler pod. `priorityClassName` used if not set. | `""` | +| `autoscaler.affinity` | Affinity settings for pod assignment for autoscaler | {} | +| `autoscaler.nodeSelector` | Node labels for pod assignment for autoscaler | {} | +| `autoscaler.tolerations` | Tolerations for pod assignment for autoscaler | [] | +| `autoscaler.resources.limits.cpu` | Container maximum CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.limits.memory` | Container maximum memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.resources.requests.cpu` | Container requested CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.requests.memory` | Container requested memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.configmap.annotations` | Annotations to add to autoscaler config map. For example to stop CI renaming them | {} | + +See `values.yaml` for configuration notes. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install --name coredns \ + --set rbac.create=false \ + stable/coredns +``` + +The above command disables automatic creation of RBAC rules. + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install --name coredns -f values.yaml stable/coredns +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + + +## Caveats + +The chart will automatically determine which protocols to listen on based on +the protocols you define in your zones. This means that you could potentially +use both "TCP" and "UDP" on a single port. +Some cloud environments like "GCE" or "Azure container service" cannot +create external loadbalancers with both "TCP" and "UDP" protocols. So +When deploying CoreDNS with `serviceType="LoadBalancer"` on such cloud +environments, make sure you do not attempt to use both protocols at the same +time. + +## Autoscaling + +By setting `autoscaler.enabled = true` a +[cluster-proportional-autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler) +will be deployed. This will default to a coredns replica for every 256 cores, or +16 nodes in the cluster. These can be changed with `autoscaler.coresPerReplica` +and `autoscaler.nodesPerReplica`. When cluster is using large nodes (with more +cores), `coresPerReplica` should dominate. If using small nodes, +`nodesPerReplica` should dominate. + +This also creates a ServiceAccount, ClusterRole, and ClusterRoleBinding for +the autoscaler deployment. + +`replicaCount` is ignored if this is enabled. diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/NOTES.txt b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/NOTES.txt new file mode 100755 index 0000000..3a1883b --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/NOTES.txt @@ -0,0 +1,30 @@ +{{- if .Values.isClusterService }} +CoreDNS is now running in the cluster as a cluster-service. +{{- else }} +CoreDNS is now running in the cluster. +It can be accessed using the below endpoint +{{- if contains "NodePort" .Values.serviceType }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "coredns.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "$NODE_IP:$NODE_PORT" +{{- else if contains "LoadBalancer" .Values.serviceType }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status by running 'kubectl get svc -w {{ template "coredns.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "coredns.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo $SERVICE_IP +{{- else if contains "ClusterIP" .Values.serviceType }} + "{{ template "coredns.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local" + from within the cluster +{{- end }} +{{- end }} + +It can be tested with the following: + +1. Launch a Pod with DNS tools: + +kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools + +2. Query the DNS server: + +/ # host kubernetes diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/_helpers.tpl b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/_helpers.tpl new file mode 100755 index 0000000..cfdbef7 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/_helpers.tpl @@ -0,0 +1,158 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "coredns.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "coredns.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.servicePorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq .use_tcp true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- if index $innerdict "isudp" -}} + {{- printf "- {port: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- printf "- {port: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.containerPorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq .use_tcp true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- if index $innerdict "isudp" -}} + {{- printf "- {containerPort: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- printf "- {containerPort: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} + {{- end -}} + {{- end -}} +{{- end -}} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "coredns.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "coredns.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole-autoscaler.yaml new file mode 100755 index 0000000..b40bb0a --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole-autoscaler.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list","watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] +# Remove the configmaps rule once below issue is fixed: +# kubernetes-incubator/cluster-proportional-autoscaler#16 + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole.yaml new file mode 100755 index 0000000..4203a02 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole.yaml @@ -0,0 +1,38 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +{{- if .Values.rbac.pspEnable }} +- apiGroups: + - policy + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "coredns.fullname" . }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding-autoscaler.yaml new file mode 100755 index 0000000..d1ff736 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding-autoscaler.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }}-autoscaler +subjects: +- kind: ServiceAccount + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding.yaml new file mode 100755 index 0000000..7ae9d4f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "coredns.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap-autoscaler.yaml new file mode 100755 index 0000000..0712e0d --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap-autoscaler.yaml @@ -0,0 +1,34 @@ +{{- if .Values.autoscaler.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + {{- if .Values.customLabels }} + {{- toYaml .Values.customLabels | nindent 4 }} + {{- end }} + {{- if .Values.autoscaler.configmap.annotations }} + annotations: + {{- toYaml .Values.autoscaler.configmap.annotations | nindent 4 }} + {{- end }} +data: + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + linear: |- + { + "coresPerReplica": {{ .Values.autoscaler.coresPerReplica | float64 }}, + "nodesPerReplica": {{ .Values.autoscaler.nodesPerReplica | float64 }}, + "preventSinglePointFailure": true + } +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap.yaml new file mode 100755 index 0000000..b5069d3 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +data: + Corefile: |- + {{ range .Values.servers }} + {{- range $idx, $zone := .zones }}{{ if $idx }} {{ else }}{{ end }}{{ default "" $zone.scheme }}{{ default "." $zone.zone }}{{ else }}.{{ end -}} + {{- if .port }}:{{ .port }} {{ end -}} + { + {{- range .plugins }} + {{ .name }} {{ if .parameters }} {{if eq .name "kubernetes" }} {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDomain }} {{ end }} {{.parameters}}{{ end }}{{ if .configBlock }} { +{{ .configBlock | indent 12 }} + }{{ end }} + {{- end }} + } + {{ end }} + {{- range .Values.zoneFiles }} + {{ .filename }}: {{ toYaml .contents | indent 4 }} + {{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment-autoscaler.yaml new file mode 100755 index 0000000..6ddd209 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment-autoscaler.yaml @@ -0,0 +1,77 @@ +{{- if .Values.autoscaler.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + template: + metadata: + labels: + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.customLabels }} + {{ toYaml .Values.customLabels | nindent 8 }} + {{- end }} + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/configmap-autoscaler.yaml") . | sha256sum }} + {{- if .Values.isClusterService }} + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' + {{- end }} + spec: + serviceAccountName: {{ template "coredns.fullname" . }}-autoscaler + {{- $priorityClassName := default .Values.priorityClassName .Values.autoscaler.priorityClassName }} + {{- if $priorityClassName }} + priorityClassName: {{ $priorityClassName | quote }} + {{- end }} + {{- if .Values.autoscaler.affinity }} + affinity: +{{ toYaml .Values.autoscaler.affinity | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.tolerations }} + tolerations: +{{ toYaml .Values.autoscaler.tolerations | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.nodeSelector }} + nodeSelector: +{{ toYaml .Values.autoscaler.nodeSelector | indent 8 }} + {{- end }} + containers: + - name: autoscaler + image: {{ template "system_default_registry" . }}{{ .Values.autoscaler.image.repository }}:{{ .Values.autoscaler.image.tag }} + imagePullPolicy: {{ .Values.autoscaler.image.pullPolicy }} + resources: +{{ toYaml .Values.autoscaler.resources | indent 10 }} + command: + - /cluster-proportional-autoscaler + - --namespace={{ .Release.Namespace }} + - --configmap={{ template "coredns.fullname" . }}-autoscaler + - --target=Deployment/{{ template "coredns.fullname" . }} + - --logtostderr=true + - --v=2 +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment.yaml new file mode 100755 index 0000000..0ed3c52 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment.yaml @@ -0,0 +1,127 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + {{- if not .Values.autoscaler.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 10% + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + template: + metadata: + labels: + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- if .Values.isClusterService }} + scheduler.alpha.kubernetes.io/critical-pod: '' + {{- end }} + spec: + serviceAccountName: {{ template "coredns.serviceAccountName" . }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.isClusterService }} + dnsPolicy: Default + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if or (.Values.isClusterService) (.Values.tolerations) }} + tolerations: + {{- if .Values.isClusterService }} + - key: CriticalAddonsOnly + operator: Exists + {{- end }} + {{- if .Values.tolerations }} +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + containers: + - name: "coredns" + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns +{{- range .Values.extraSecrets }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: true +{{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + ports: +{{ include "coredns.containerPorts" . | indent 8 }} + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + volumes: + - name: config-volume + configMap: + name: {{ template "coredns.fullname" . }} + items: + - key: Corefile + path: Corefile + {{ range .Values.zoneFiles }} + - key: {{ .filename }} + path: {{ .filename }} + {{ end }} +{{- range .Values.extraSecrets }} + - name: {{ .name }} + secret: + secretName: {{ .name }} + defaultMode: 400 +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/poddisruptionbudget.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/poddisruptionbudget.yaml new file mode 100755 index 0000000..1fee2de --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/poddisruptionbudget.yaml @@ -0,0 +1,28 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/podsecuritypolicy.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/podsecuritypolicy.yaml new file mode 100755 index 0000000..4e7a36f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/podsecuritypolicy.yaml @@ -0,0 +1,57 @@ +{{- if .Values.rbac.pspEnable }} +{{ if .Capabilities.APIVersions.Has "policy/v1beta1" }} +apiVersion: policy/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: PodSecurityPolicy +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- else }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- end }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # Add back CAP_NET_BIND_SERVICE so that coredns can run on port 53 + allowedCapabilities: + - CAP_NET_BIND_SERVICE + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service-metrics.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service-metrics.yaml new file mode 100755 index 0000000..1657cd7 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service-metrics.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "coredns.fullname" . }}-metrics + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + selector: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + ports: + - name: metrics + port: 9153 + targetPort: 9153 +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service.yaml new file mode 100755 index 0000000..95c858f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + selector: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{ else }} + clusterIP: {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDNS }} + {{- end }} + {{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} + {{- end }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: +{{ include "coredns.servicePorts" . | indent 2 -}} + type: {{ default "ClusterIP" .Values.serviceType }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount-autoscaler.yaml new file mode 100755 index 0000000..1b218d2 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount-autoscaler.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount.yaml new file mode 100755 index 0000000..23f29a1 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.serviceAccountName" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/servicemonitor.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/servicemonitor.yaml new file mode 100755 index 0000000..ca0b691 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "coredns.fullname" . }} + {{- if .Values.prometheus.monitor.namespace }} + namespace: {{ .Values.prometheus.monitor.namespace }} + {{- end }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics + endpoints: + - port: metrics +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/values.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/values.yaml new file mode 100755 index 0000000..a1703d6 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/values.yaml @@ -0,0 +1,202 @@ +# Default values for coredns. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: rancher/hardened-coredns + tag: "v1.6.9-build20210223" + pullPolicy: IfNotPresent + +replicaCount: 1 + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +serviceType: "ClusterIP" + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + +service: +# clusterIP: "" +# loadBalancerIP: "" +# externalTrafficPolicy: "" + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9153" + +serviceAccount: + create: true + # The name of the ServiceAccount to use + # If not set and create is true, a name is generated using the fullname template + name: coredns + +rbac: + # If true, create & use RBAC resources + create: true + # If true, create and use PodSecurityPolicy + pspEnable: false + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + # name: + +# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app. +isClusterService: true + +# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set. +priorityClassName: "system-cluster-critical" + +# Default zone is what Kubernetes recommends: +# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options +servers: +- zones: + - zone: . + port: 53 + plugins: + - name: errors + # Serves a /health endpoint on :8080, required for livenessProbe + - name: health + configBlock: |- + lameduck 5s + # Serves a /ready endpoint on :8181, required for readinessProbe + - name: ready + # Required to query kubernetes API for data + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + # Serves a /metrics endpoint on :9153, required for serviceMonitor + - name: prometheus + parameters: 0.0.0.0:9153 + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + +# Complete example with all the options: +# - zones: # the `zones` block can be left out entirely, defaults to "." +# - zone: hello.world. # optional, defaults to "." +# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS) +# - zone: foo.bar. +# scheme: dns:// +# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol +# # Note that this will not work if you are also exposing tls or grpc on the same server +# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS) +# plugins: # the plugins to use for this server block +# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it! +# parameters: foo bar # list of parameters after the plugin +# configBlock: |- # if the plugin supports extra block style config, supply it here +# hello world +# foo bar + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core +# for example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core +# for example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] + +# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +podDisruptionBudget: {} + +# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/ +zoneFiles: [] +# - filename: example.db +# domain: example.com +# contents: | +# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600 +# example.com. IN NS b.iana-servers.net. +# example.com. IN NS a.iana-servers.net. +# example.com. IN A 192.168.99.102 +# *.example.com. IN A 192.168.99.102 + +# optional array of secrets to mount inside coredns container +# possible usecase: need for secure connection with etcd backend +extraSecrets: [] +# - name: etcd-client-certs +# mountPath: /etc/coredns/tls/etcd +# - name: some-fancy-secret +# mountPath: /etc/wherever + +# Custom labels to apply to Deployment, Pod, Service, ServiceMonitor. Including autoscaler if enabled. +customLabels: {} + +## Configue a cluster-proportional-autoscaler for coredns +# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler +autoscaler: + # Enabled the cluster-proportional-autoscaler + enabled: false + + # Number of cores in the cluster per coredns replica + coresPerReplica: 256 + # Number of nodes in the cluster per coredns replica + nodesPerReplica: 16 + + image: + repository: k8s.gcr.io/cluster-proportional-autoscaler-amd64 + tag: "1.7.1" + pullPolicy: IfNotPresent + + # Optional priority class to be used for the autoscaler pods. priorityClassName used if not set. + priorityClassName: "" + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core + affinity: {} + + # Node labels for pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core + tolerations: [] + + # resources for autoscaler pod + resources: + requests: + cpu: "20m" + memory: "10Mi" + limits: + cpu: "20m" + memory: "10Mi" + + # Options for autoscaler configmap + configmap: + ## Annotations for the coredns-autoscaler configmap + # i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed + annotations: {} +k8sApp : "kube-dns" + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/.helmignore b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/.helmignore new file mode 100755 index 0000000..50af031 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/Chart.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/Chart.yaml new file mode 100755 index 0000000..dc27305 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +appVersion: 0.35.0 +description: Ingress controller for Kubernetes using NGINX as a reverse proxy and + load balancer +home: https://github.com/kubernetes/ingress-nginx +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +kubeVersion: '>=1.16.0-0' +maintainers: +- name: ChiefAlexander +name: rke2-ingress-nginx +sources: +- https://github.com/kubernetes/ingress-nginx +version: 3.3.001 diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/OWNERS b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/OWNERS new file mode 100755 index 0000000..7aadb8d --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/OWNERS @@ -0,0 +1,5 @@ +approvers: + - ChiefAlexander + +reviewers: + - ChiefAlexander diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/README.md b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/README.md new file mode 100755 index 0000000..1ab152a --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/README.md @@ -0,0 +1,221 @@ +# ingress-nginx + +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +To use, add the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes v1.16+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo add stable https://kubernetes-charts.storage.googleapis.com/ +helm repo update +``` + +## Install Chart + +```console +# Helm 3 +$ helm install [RELEASE_NAME] ingress-nginx/ingress-nginx + +# Helm 2 +$ helm install --name [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +# Helm 3 +$ helm uninstall [RELEASE_NAME] + +# Helm 2 +# helm delete --purge [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +# Helm 3 or 2 +$ helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +# Helm 2 +$ helm inspect values ingress-nginx/ingress-nginx + +# Helm 3 +$ helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/master/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB + cloud.google.com/load-balancer-type: "Internal" + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customconfig-values.yaml new file mode 100755 index 0000000..e12b534 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,9 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + + config: + use-proxy-protocol: "true" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customnodeport-values.yaml new file mode 100755 index 0000000..cfc545f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,18 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-headers-values.yaml new file mode 100755 index 0000000..ff82cd9 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-headers-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-internal-lb-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-internal-lb-values.yaml new file mode 100755 index 0000000..443e39d --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-internal-lb-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-nodeport-values.yaml new file mode 100755 index 0000000..6d6605f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,6 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-configMapNamespace-values.yaml new file mode 100755 index 0000000..afb5487 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,16 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-values.yaml new file mode 100755 index 0000000..7b4d7cb --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-values.yaml @@ -0,0 +1,12 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-values.yaml new file mode 100755 index 0000000..a359a6a --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-default-values.yaml new file mode 100755 index 0000000..e63a7f5 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-default-values.yaml @@ -0,0 +1,6 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-metrics-values.yaml new file mode 100755 index 0000000..1e5190a --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-metrics-values.yaml @@ -0,0 +1,8 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-psp-values.yaml new file mode 100755 index 0000000..017b60a --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-psp-values.yaml @@ -0,0 +1,9 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-and-psp-values.yaml new file mode 100755 index 0000000..88aafc6 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-and-psp-values.yaml @@ -0,0 +1,9 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-values.yaml new file mode 100755 index 0000000..6e3b371 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-values.yaml @@ -0,0 +1,6 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-autoscaling-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-autoscaling-values.yaml new file mode 100755 index 0000000..5314cec --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-autoscaling-values.yaml @@ -0,0 +1,7 @@ +controller: + autoscaling: + enabled: true + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customconfig-values.yaml new file mode 100755 index 0000000..f232531 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customconfig-values.yaml @@ -0,0 +1,7 @@ +controller: + config: + use-proxy-protocol: "true" + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customnodeport-values.yaml new file mode 100755 index 0000000..9eda282 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,16 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-default-values.yaml new file mode 100755 index 0000000..93a393c --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-default-values.yaml @@ -0,0 +1,4 @@ +# Left blank to test default values +controller: + service: + type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-headers-values.yaml new file mode 100755 index 0000000..665fd48 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-headers-values.yaml @@ -0,0 +1,9 @@ +controller: + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-internal-lb-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-internal-lb-values.yaml new file mode 100755 index 0000000..892f6de --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-internal-lb-values.yaml @@ -0,0 +1,9 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-metrics-values.yaml new file mode 100755 index 0000000..887ed0f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-metrics-values.yaml @@ -0,0 +1,7 @@ +controller: + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-nodeport-values.yaml new file mode 100755 index 0000000..84f1f75 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-nodeport-values.yaml @@ -0,0 +1,5 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-psp-values.yaml new file mode 100755 index 0000000..e339c69 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-psp-values.yaml @@ -0,0 +1,6 @@ +controller: + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-configMapNamespace-values.yaml new file mode 100755 index 0000000..141e06b --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,15 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-values.yaml new file mode 100755 index 0000000..bc29abe --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-values.yaml @@ -0,0 +1,11 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-values.yaml new file mode 100755 index 0000000..b7f54c0 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-values.yaml @@ -0,0 +1,7 @@ +controller: + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-and-psp-values.yaml new file mode 100755 index 0000000..a829c36 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-and-psp-values.yaml @@ -0,0 +1,8 @@ +controller: + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-values.yaml new file mode 100755 index 0000000..4f18a70 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-values.yaml @@ -0,0 +1,5 @@ +controller: + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/NOTES.txt b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/NOTES.txt new file mode 100755 index 0000000..60fb2c1 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/NOTES.txt @@ -0,0 +1,71 @@ +The ingress-nginx controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + metadata: + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + name: example + namespace: foo + spec: + rules: + - host: www.example.com + http: + paths: + - backend: + serviceName: exampleService + servicePort: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/_helpers.tpl b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/_helpers.tpl new file mode 100755 index 0000000..61aadf0 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/_helpers.tpl @@ -0,0 +1,132 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ingress-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ingress-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.controller.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) "controller" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "ingress-nginx.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) "defaultbackend" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "ingress-nginx.labels" -}} +helm.sh/chart: {{ include "ingress-nginx.chart" . }} +{{ include "ingress-nginx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "ingress-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "ingress-nginx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Check the ingress controller version tag is at most three versions behind the last release +*/}} +{{- define "isControllerTagValid" -}} +{{- if not (semverCompare ">=0.27.0-0" (trimPrefix "nginx-" .Values.controller.image.tag)) -}} +{{- fail "Controller container image tag should be 0.27.0 or higher" -}} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100755 index 0000000..7eb5738 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ include "ingress-nginx.fullname" . }}-admission +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100755 index 0000000..9793125 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-createSecret.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100755 index 0000000..04a3e10 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,60 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: create + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{ template "system_default_registry" . }}{{.repository}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100755 index 0000000..43d9bf5 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,62 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: patch + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{ template "system_default_registry" . }}{{.repository}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - patch + - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/psp.yaml new file mode 100755 index 0000000..e8c8da9 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/role.yaml new file mode 100755 index 0000000..fe1c2ee --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100755 index 0000000..391e5e9 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100755 index 0000000..5dfdd34 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/validating-webhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/validating-webhook.yaml new file mode 100755 index 0000000..5d338e2 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,33 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + name: {{ include "ingress-nginx.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1beta1 + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1beta1 + clientConfig: + service: + namespace: {{ .Release.Namespace }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + path: /networking/v1beta1/ingresses +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrole.yaml new file mode 100755 index 0000000..2035f54 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrole.yaml @@ -0,0 +1,76 @@ +{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + name: {{ include "ingress-nginx.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrolebinding.yaml new file mode 100755 index 0000000..a341f52 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + name: {{ include "ingress-nginx.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-addheaders.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-addheaders.yaml new file mode 100755 index 0000000..c064589 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-addheaders.yaml @@ -0,0 +1,10 @@ +{{- if .Values.controller.addHeaders -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers +data: {{ toYaml .Values.controller.addHeaders | nindent 2 }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-proxyheaders.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-proxyheaders.yaml new file mode 100755 index 0000000..5a1b252 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-proxyheaders.yaml @@ -0,0 +1,15 @@ +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers +data: +{{- if .Values.controller.proxySetHeaders }} +{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} +{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-tcp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-tcp.yaml new file mode 100755 index 0000000..bc97251 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-tcp.yaml @@ -0,0 +1,13 @@ +{{- if .Values.tcp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- if .Values.controller.tcp.annotations }} + annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-tcp +data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-udp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-udp.yaml new file mode 100755 index 0000000..a9dc388 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-udp.yaml @@ -0,0 +1,13 @@ +{{- if .Values.udp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- if .Values.controller.udp.annotations }} + annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-udp +data: {{ tpl (toYaml .Values.udp) . | nindent 2 }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap.yaml new file mode 100755 index 0000000..5b0d371 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- if .Values.controller.configAnnotations }} + annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} +data: +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.controller.config }} + {{ toYaml .Values.controller.config | nindent 2 }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-daemonset.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-daemonset.yaml new file mode 100755 index 0000000..4c6a1e2 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-daemonset.yaml @@ -0,0 +1,252 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: {{ toYaml .Values.controller.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + containers: + - name: controller + {{- with .Values.controller.image }} + image: "{{ template "system_default_registry" . }}{{.repository}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + {{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ include "ingress-nginx.defaultBackend.fullname" . }} + {{- end }} + {{- if .Values.controller.publishService.enabled }} + - --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} + {{- end }} + - --election-id={{ .Values.controller.electionID }} + - --ingress-class={{ .Values.controller.ingressClass }} + - --configmap={{ .Release.Namespace }}/{{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} + - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + {{- end }} + {{- if .Values.controller.maxmindLicenseKey }} + - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} + {{- end }} + {{- if not (eq .Values.controller.healthCheckPath "/healthz") }} + - --health-check-path={{ .Values.controller.healthCheckPath }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- /* Accept keys without values or with false as value */}} + {{- if eq ($value | quote | len) 2 }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ .Values.controller.image.runAsUser }} + allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.livenessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.readinessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} + volumeMounts: + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraInitContainers }} + initContainers: {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} + volumes: + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-deployment.yaml new file mode 100755 index 0000000..f0b7afd --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-deployment.yaml @@ -0,0 +1,256 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + strategy: + {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: {{ toYaml .Values.controller.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + containers: + - name: controller + {{- with .Values.controller.image }} + image: "{{ template "system_default_registry" . }}{{.repository}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + {{- if .Values.defaultBackend.enabled }} + - --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }} + {{- end }} + {{- if .Values.controller.publishService.enabled }} + - --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} + {{- end }} + - --election-id={{ .Values.controller.electionID }} + - --ingress-class={{ .Values.controller.ingressClass }} + - --configmap=$(POD_NAMESPACE)/{{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.tcp }} + - --tcp-services-configmap=$(POD_NAMESPACE)/{{ include "ingress-nginx.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap=$(POD_NAMESPACE)/{{ include "ingress-nginx.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }} + {{- end }} + {{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} + - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + {{- end }} + {{- if .Values.controller.maxmindLicenseKey }} + - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} + {{- end }} + {{- if not (eq .Values.controller.healthCheckPath "/healthz") }} + - --health-check-path={{ .Values.controller.healthCheckPath }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- /* Accept keys without values or with false as value */}} + {{- if eq ($value | quote | len) 2 }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ .Values.controller.image.runAsUser }} + allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: {{ .Values.controller.healthCheckPath }} + port: {{ .Values.controller.livenessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: {{ .Values.controller.healthCheckPath }} + port: {{ .Values.controller.readinessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} + volumeMounts: + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraInitContainers }} + initContainers: {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} + volumes: + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-hpa.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-hpa.yaml new file mode 100755 index 0000000..4923cf8 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-hpa.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + name: {{ include "ingress-nginx.controller.fullname" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ingress-nginx.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscalingTemplate }} +{{- toYaml . | nindent 2 }} + {{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-poddisruptionbudget.yaml new file mode 100755 index 0000000..9dc8789 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,15 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (gt (.Values.controller.replicaCount | int) 1) -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + name: {{ include "ingress-nginx.controller.fullname" . }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-prometheusrules.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-prometheusrules.yaml new file mode 100755 index 0000000..c0b7e89 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-prometheusrules.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.prometheusRule.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "ingress-nginx.name" . }} + rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-psp.yaml new file mode 100755 index 0000000..bcf588c --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-psp.yaml @@ -0,0 +1,86 @@ +{{- if .Values.podSecurityPolicy.enabled -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller +spec: + allowedCapabilities: + - NET_BIND_SERVICE +{{- if .Values.controller.sysctls }} + allowedUnsafeSysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - {{ $sysctl }} + {{- end }} +{{- end }} + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + #- 'emptyDir' + #- 'projected' + - 'secret' + #- 'downwardAPI' +{{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- end }} +{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.hostPort.enabled }} +{{- range $key, $value := .Values.controller.hostPort.ports }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-role.yaml new file mode 100755 index 0000000..f2e3927 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-role.yaml @@ -0,0 +1,96 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + name: {{ include "ingress-nginx.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }}-{{ .Values.controller.ingressClass }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "ingress-nginx.fullname" . }}] +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-rolebinding.yaml new file mode 100755 index 0000000..5031350 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + name: {{ include "ingress-nginx.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-internal.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-internal.yaml new file mode 100755 index 0000000..0bdae23 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-internal.yaml @@ -0,0 +1,44 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.internal.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-internal +spec: + type: "{{ .Values.controller.service.type }}" +{{- if .Values.controller.service.internal.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-metrics.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-metrics.yaml new file mode 100755 index 0000000..b01f460 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-metrics.yaml @@ -0,0 +1,43 @@ +{{- if .Values.controller.metrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.service.labels }} + {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-metrics +spec: + type: {{ .Values.controller.metrics.service.type }} +{{- if .Values.controller.metrics.service.clusterIP }} + clusterIP: {{ .Values.controller.metrics.service.clusterIP }} +{{- end }} +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }} +{{- end }} + ports: + - name: metrics + port: {{ .Values.controller.metrics.service.servicePort }} + targetPort: metrics + {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }} + {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }} + nodePort: {{ .Values.controller.metrics.service.nodePort }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-webhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-webhook.yaml new file mode 100755 index 0000000..7a4dd51 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-webhook.yaml @@ -0,0 +1,33 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.admissionWebhooks.service.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + name: {{ include "ingress-nginx.controller.fullname" . }}-admission +spec: + type: {{ .Values.controller.admissionWebhooks.service.type }} +{{- if .Values.controller.admissionWebhooks.service.clusterIP }} + clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: https-webhook + port: 443 + targetPort: webhook + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service.yaml new file mode 100755 index 0000000..dce18c5 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service.yaml @@ -0,0 +1,83 @@ +{{- if .Values.controller.service.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.service.annotations }} + annotations: {{ toYaml .Values.controller.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} +spec: + type: {{ .Values.controller.service.type }} +{{- if .Values.controller.service.clusterIP }} + clusterIP: {{ .Values.controller.service.clusterIP }} +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} +{{- end }} +{{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-serviceaccount.yaml new file mode 100755 index 0000000..4358507 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + name: {{ template "ingress-nginx.serviceAccountName" . }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-servicemonitor.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-servicemonitor.yaml new file mode 100755 index 0000000..68b1c92 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} +{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }} +{{ else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- range .Values.controller.metrics.serviceMonitor.targetLabels }} + - {{ . }} + {{- end }} +{{- end }} + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-deployment.yaml new file mode 100755 index 0000000..58bf7a6 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-deployment.yaml @@ -0,0 +1,97 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend + replicas: {{ .Values.defaultBackend.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: default-backend + {{- if .Values.defaultBackend.podLabels }} + {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: {{ .Values.defaultBackend.priorityClassName }} + {{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - name: {{ template "ingress-nginx.name" . }}-default-backend + {{- with .Values.defaultBackend.image }} + image: "{{ template "system_default_registry" . }}{{.repository}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + {{- if .Values.defaultBackend.extraArgs }} + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- /* Accept keys without values or with false as value */}} + {{- if eq ($value | quote | len) 2 }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + {{- end }} + securityContext: + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + {{- if .Values.defaultBackend.extraEnvs }} + env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + {{- if .Values.defaultBackend.resources }} + resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-poddisruptionbudget.yaml new file mode 100755 index 0000000..b6c9c44 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,15 @@ +{{- if gt (.Values.defaultBackend.replicaCount | int) 1 -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-psp.yaml new file mode 100755 index 0000000..055f434 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-psp.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-backend + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-role.yaml new file mode 100755 index 0000000..23498de --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-role.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + name: {{ include "ingress-nginx.fullname" . }}-backend +rules: + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend] +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-rolebinding.yaml new file mode 100755 index 0000000..45558aa --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + name: {{ include "ingress-nginx.fullname" . }}-backend +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-service.yaml new file mode 100755 index 0000000..e74714d --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-service.yaml @@ -0,0 +1,34 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} +spec: + type: {{ .Values.defaultBackend.service.type }} +{{- if .Values.defaultBackend.service.clusterIP }} + clusterIP: {{ .Values.defaultBackend.service.clusterIP }} +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: default-backend +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-serviceaccount.yaml new file mode 100755 index 0000000..96419cf --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/values.yaml new file mode 100755 index 0000000..ef766cc --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/values.yaml @@ -0,0 +1,666 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md +## +controller: + image: + repository: rancher/nginx-ingress-controller + tag: "nginx-0.35.0-rancher2" + digest: sha256:fc4979d8b8443a831c9789b5155cded454cb7de737a8b727bc2ba0106d2eae8b + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # Configures the ports the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + ## Annotations to be added to the controller config configuration configmap + ## + configAnnotations: {} + + # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # Optionally customize the pod dnsConfig. + dnsConfig: {} + + # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirstWithHostNet + + # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: true + + ## Use host ports 80 and 443 + ## Disabled by default + ## + hostPort: + enabled: false + ports: + http: 80 + https: 443 + + ## Election ID to use for status update + ## + electionID: ingress-controller-leader + + ## Name of the ingress class to route through this controller + ## + ingressClass: nginx + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Security Context policies for controller pods + ## + podSecurityContext: {} + + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ### + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + + ## Allows customization of the source of the IP address or FQDN to report + ## in the ingress status field. By default, it reads the information provided + ## by the service. If disable, the status field reports the IP address of the + ## node or nodes where an ingress controller pod is running. + publishService: + enabled: true + ## Allows overriding of the publish service to bind to + ## Must be / + ## + pathOverride: "" + + ## Limit the scope of the controller + ## + scope: + enabled: false + namespace: "" # defaults to .Release.Namespace + + ## Allows customization of the configmap / nginx-configmap namespace + ## + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the tcp-services-configmap + ## + tcp: + configMapNamespace: "" # defaults to .Release.Namespace + ## Annotations to be added to the tcp config configmap + annotations: {} + + ## Allows customization of the udp-services-configmap + ## + udp: + configMapNamespace: "" # defaults to .Release.Namespace + ## Annotations to be added to the udp config configmap + annotations: {} + + ## Additional command line arguments to pass to nginx-ingress-controller + ## E.g. to specify the default SSL certificate you can use + ## extraArgs: + ## default-ssl-certificate: "/" + extraArgs: {} + + ## Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + ## DaemonSet or Deployment + ## + kind: Deployment + + ## Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + ## Labels to be added to the controller Deployment or DaemonSet + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + + # The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # minReadySeconds to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + ## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: failure-domain.beta.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal + + ## terminationGracePeriodSeconds + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + + ## Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + + # Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + # Define requests resources to avoid probe issues due to CPU utilization in busy nodes + # ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + # Ideally, there should be no limits. + # https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + # limits: + # cpu: 100m + # memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + ## Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: false + + annotations: {} + labels: {} + # clusterIP: "" + + ## List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + # sessionAffinity: "" + + # specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, + # the service controller allocates a port from your cluster’s NodePort range. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + # type: NodePort + # nodePorts: + # http: 32080 + # https: 32443 + # tcp: + # 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + ## Enables an additional internal load balancer (besides the external one). + ## Annotations are mandatory for the load balancer to come up. Varies with the cloud service. + internal: + enabled: false + annotations: {} + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + extraContainers: [] + ## Additional containers to be added to the controller pod. + ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + extraVolumeMounts: [] + ## Additional volumeMounts to the controller main container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the controller pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraInitContainers: [] + ## Containers, which are run before the app containers are started. + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + admissionWebhooks: + enabled: true + failurePolicy: Fail + port: 8443 + + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + patch: + enabled: true + image: + repository: docker.io/jettech/kube-webhook-certgen + tag: v1.3.0 + pullPolicy: IfNotPresent + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + tolerations: [] + runAsUser: 2000 + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + # clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9913 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + namespace: "" + namespaceSelector: {} + # Default: scrape .Release.Namespace only + # To scrape all, use the following: + # namespaceSelector: + # any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + + ## Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + ## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + ## to 300, allowing the draining of connections up to five minutes. + ## If the active connections end before that, the pod will terminate gracefully at that time. + ## To effectively take advantage of this feature, the Configmap feature + ## worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + + priorityClassName: "" + +## Rollback limit +## +revisionHistoryLimit: 10 + +# Maxmind license key to download GeoLite2 Databases +# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases +maxmindLicenseKey: "" + +## Default 404 backend +## +defaultBackend: + ## + enabled: false + + image: + repository: rancher/nginx-ingress-controller-defaultbackend + tag: "1.5-rancher1" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + + extraArgs: {} + + serviceAccount: + create: true + name: + ## Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + service: + annotations: {} + + # clusterIP: "" + + ## List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + +## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 +rbac: + create: true + scope: false + +# If true, create & use Pod Security Policy resources +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: + +## Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# TCP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# UDP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp +## +udp: {} +# 53: "kube-system/kube-dns:53" + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/Chart.yaml new file mode 100755 index 0000000..26ac286 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.19.8 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.19.801 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/NOTES.txt new file mode 100755 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/_helpers.tpl new file mode 100755 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/config.yaml new file mode 100755 index 0000000..536a12a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddress | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/daemonset.yaml new file mode 100755 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/rbac.yaml new file mode 100755 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/serviceaccount.yaml new file mode 100755 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/values.yaml new file mode 100755 index 0000000..e362472 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/values.yaml @@ -0,0 +1,142 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.19.8 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/.helmignore b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/.helmignore new file mode 100755 index 0000000..37ea1d7 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +OWNERS +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/Chart.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/Chart.yaml new file mode 100755 index 0000000..f1cebfe --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +appVersion: 0.3.6 +description: Metrics Server is a cluster-wide aggregator of resource usage data. +home: https://github.com/kubernetes-incubator/metrics-server +keywords: +- metrics-server +maintainers: +- email: o.with@sportradar.com + name: olemarkus +- email: k.aasan@sportradar.com + name: kennethaasan +name: rke2-metrics-server +sources: +- https://github.com/kubernetes-incubator/metrics-server +version: 2.11.100-build2021022301 diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/README.md b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/README.md new file mode 100755 index 0000000..678f084 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/README.md @@ -0,0 +1,39 @@ +# metrics-server + +[Metrics Server](https://github.com/kubernetes-incubator/metrics-server) is a cluster-wide aggregator of resource usage data. Resource metrics are used by components like `kubectl top` and the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale) to scale workloads. To autoscale based upon a custom metric, see the [Prometheus Adapter chart](https://github.com/helm/charts/blob/master/stable/prometheus-adapter). + +## Configuration + +Parameter | Description | Default +--- | --- | --- +`rbac.create` | Enable Role-based authentication | `true` +`rbac.pspEnabled` | Enable pod security policy support | `false` +`serviceAccount.create` | If `true`, create a new service account | `true` +`serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | `` +`apiService.create` | Create the v1beta1.metrics.k8s.io API service | `true` +`hostNetwork.enabled` | Enable hostNetwork mode | `false` +`image.repository` | Image repository | `k8s.gcr.io/metrics-server-amd64` +`image.tag` | Image tag | `v0.3.2` +`image.pullPolicy` | Image pull policy | `IfNotPresent` +`imagePullSecrets` | Image pull secrets | `[]` +`args` | Command line arguments | `[]` +`resources` | CPU/Memory resource requests/limits. | `{}` +`tolerations` | List of node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`nodeSelector` | Node labels for pod assignment | `{}` +`affinity` | Node affinity | `{}` +`replicas` | Number of replicas | `1` +`extraVolumeMounts` | Ability to provide volume mounts to the pod | `[]` +`extraVolumes` | Ability to provide volumes to the pod | `[]` +`livenessProbe` | Container liveness probe | See values.yaml +`podLabels` | Labels to be added to pods | `{}` +`podAnnotations` | Annotations to be added to pods | `{}` +`priorityClassName` | Pod priority class | `""` +`readinessProbe` | Container readiness probe | See values.yaml +`service.annotations` | Annotations to add to the service | `{}` +`service.labels` | Labels to be added to the metrics-server service | `{}` +`service.port` | Service port to expose | `443` +`service.type` | Type of service to create | `ClusterIP` +`podDisruptionBudget.enabled` | Create a PodDisruptionBudget | `false` +`podDisruptionBudget.minAvailable` | Minimum available instances; ignored if there is no PodDisruptionBudget | +`podDisruptionBudget.maxUnavailable` | Maximum unavailable instances; ignored if there is no PodDisruptionBudget | +`extraContainers` | Add additional containers | `[]` diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/ci/ci-values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/ci/ci-values.yaml new file mode 100755 index 0000000..a9d81b4 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/ci/ci-values.yaml @@ -0,0 +1,5 @@ +# CI is running on GKE, which already ships metrics-server. This cause +# conflicts on the apiService resource. + +apiService: + create: false diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/NOTES.txt b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/NOTES.txt new file mode 100755 index 0000000..1034c12 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/NOTES.txt @@ -0,0 +1,11 @@ +The metric server has been deployed. +{{ if .Values.apiService.create }} +In a few minutes you should be able to list metrics using the following +command: + + kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes" +{{ else }} +NOTE: You have disabled the API service creation for this release. The metrics +API will not work with this release unless you configure the metrics API +service outside of this Helm chart. +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/_helpers.tpl b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/_helpers.tpl new file mode 100755 index 0000000..b59ca03 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/_helpers.tpl @@ -0,0 +1,59 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "metrics-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "metrics-server.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "metrics-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a service name that defaults to app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "metrics-server.service.fullname" -}} +{{- .Values.service.nameOverride | default .Chart.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "metrics-server.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "metrics-server.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/aggregated-metrics-reader-cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/aggregated-metrics-reader-cluster-role.yaml new file mode 100755 index 0000000..e91a3d8 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/aggregated-metrics-reader-cluster-role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:{{ template "metrics-server.name" . }}-aggregated-reader + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods","nodes"] + verbs: ["get", "list", "watch"] +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/auth-delegator-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/auth-delegator-crb.yaml new file mode 100755 index 0000000..e82fca0 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/auth-delegator-crb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "metrics-server.fullname" . }}:system:auth-delegator + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/cluster-role.yaml new file mode 100755 index 0000000..8763acd --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/cluster-role.yaml @@ -0,0 +1,34 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:{{ template "metrics-server.fullname" . }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + - namespaces + verbs: + - get + - list + - watch + {{- if .Values.rbac.pspEnabled }} + - apiGroups: + - extensions + - policy + resources: + - podsecuritypolicies + resourceNames: + - privileged-{{ template "metrics-server.fullname" . }} + verbs: + - use + {{- end -}} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metric-server-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metric-server-service.yaml new file mode 100755 index 0000000..0d64cd1 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metric-server-service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- with .Values.service.labels -}} + {{ toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.service.annotations | trim | nindent 4 }} +spec: + ports: + - port: {{ .Values.service.port }} + protocol: TCP + targetPort: https + selector: + app: {{ template "metrics-server.name" . }} + release: {{ .Release.Name }} + type: {{ .Values.service.type }} + diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-api-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-api-service.yaml new file mode 100755 index 0000000..552ffea --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-api-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.apiService.create -}} +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.metrics.k8s.io + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + service: + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + group: metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-crb.yaml new file mode 100755 index 0000000..eb04c6f --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-crb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:{{ template "metrics-server.fullname" . }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:{{ template "metrics-server.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-deployment.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-deployment.yaml new file mode 100755 index 0000000..2e54f27 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-deployment.yaml @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "metrics-server.name" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.replicas }} + template: + metadata: + labels: + app: {{ template "metrics-server.name" . }} + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + {{- with .Values.podAnnotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "metrics-server.serviceAccountName" . }} +{{- if .Values.hostNetwork.enabled }} + hostNetwork: true +{{- end }} + containers: + {{- if .Values.extraContainers }} + {{- ( tpl (toYaml .Values.extraContainers) . ) | nindent 8 }} + {{- end }} + - name: metrics-server + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /metrics-server + - --cert-dir=/tmp + - --logtostderr + - --secure-port=8443 + {{- range .Values.args }} + - {{ . }} + {{- end }} + ports: + - containerPort: 8443 + name: https + livenessProbe: + {{- toYaml .Values.livenessProbe | trim | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | trim | nindent 12 }} + resources: + {{- toYaml .Values.resources | trim | nindent 12 }} + securityContext: + {{- toYaml .Values.securityContext | trim | nindent 12 }} + volumeMounts: + - name: tmp + mountPath: /tmp + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 10 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.nodeSelector | trim | nindent 8 }} + affinity: + {{- toYaml .Values.affinity | trim | nindent 8 }} + tolerations: + {{- toYaml .Values.tolerations | trim | nindent 8 }} + volumes: + - name: tmp + emptyDir: {} + {{- with .Values.extraVolumes }} + {{- toYaml . | nindent 6}} + {{- end }} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-serviceaccount.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-serviceaccount.yaml new file mode 100755 index 0000000..4d748ed --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/pdb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/pdb.yaml new file mode 100755 index 0000000..3831097 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.podDisruptionBudget.enabled -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "metrics-server.name" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/psp.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/psp.yaml new file mode 100755 index 0000000..b5cb7da --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/psp.yaml @@ -0,0 +1,26 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: privileged-{{ template "metrics-server.fullname" . }} +spec: + allowedCapabilities: + - '*' + fsGroup: + rule: RunAsAny + privileged: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - '*' + hostPID: true + hostIPC: true + hostNetwork: true + hostPorts: + - min: 1 + max: 65536 +{{- end }} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/role-binding.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/role-binding.yaml new file mode 100755 index 0000000..3169f24 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/role-binding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "metrics-server.fullname" . }}-auth-reader + namespace: kube-system + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/tests/test-version.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/tests/test-version.yaml new file mode 100755 index 0000000..3648e6d --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/tests/test-version.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "metrics-server.fullname" . }}-test + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['/bin/sh'] + args: + - -c + - 'wget -qO- https://{{ include "metrics-server.fullname" . }}:{{ .Values.service.port }}/version | grep -F {{ .Values.image.tag }}' + restartPolicy: Never + diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/values.yaml new file mode 100755 index 0000000..52e0fdd --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/values.yaml @@ -0,0 +1,113 @@ +rbac: + # Specifies whether RBAC resources should be created + create: true + pspEnabled: false + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +apiService: + # Specifies if the v1beta1.metrics.k8s.io API service should be created. + # + # You typically want this enabled! If you disable API service creation you have to + # manage it outside of this chart for e.g horizontal pod autoscaling to + # work with this release. + create: true + +hostNetwork: + # Specifies if metrics-server should be started in hostNetwork mode. + # + # You would require this enabled if you use alternate overlay networking for pods and + # API server unable to communicate with metrics-server. As an example, this is required + # if you use Weave network on EKS + enabled: false + +image: + repository: rancher/hardened-k8s-metrics-server + tag: v0.3.6-build20210223 + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - registrySecretName + +args: +# enable this if you have self-signed certificates, see: https://github.com/kubernetes-incubator/metrics-server +# - --kubelet-insecure-tls + - --kubelet-preferred-address-types=InternalIP + +resources: {} + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +replicas: 1 + +extraContainers: [] + +podLabels: {} + +podAnnotations: {} +# The following annotations guarantee scheduling for critical add-on pods. +# See more at: https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ +# scheduler.alpha.kubernetes.io/critical-pod: '' + +## Set a pod priorityClassName +priorityClassName: system-node-critical + +extraVolumeMounts: [] +# - name: secrets +# mountPath: /etc/kubernetes/secrets +# readOnly: true + +extraVolumes: [] +# - name: secrets +# secret: +# secretName: kube-apiserver + +livenessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + +readinessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["all"] + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + +service: + annotations: {} + labels: {} + # Add these labels to have metrics-server show up in `kubectl cluster-info` + # kubernetes.io/cluster-service: "true" + # kubernetes.io/name: "Metrics-server" + port: 443 + type: ClusterIP + +podDisruptionBudget: + # https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + enabled: false + minAvailable: + maxUnavailable: + +global: + systemDefaultRegistry: "" diff --git a/index.yaml b/index.yaml old mode 100644 new mode 100755 index dee5a53..9170c07 --- a/index.yaml +++ b/index.yaml @@ -1,6 +1,23 @@ apiVersion: v1 entries: rke2-canal: + - apiVersion: v1 + appVersion: v3.13.3 + created: "2021-02-25T18:49:47.253718-08:00" + description: Install Canal Network Plugin. + digest: 8be6ab96961a079b3ba56ba79bb49bcc14792b7b1235688384b51bc4f4c818b5 + home: https://www.projectcalico.org/ + keywords: + - canal + maintainers: + - email: charts@rancher.com + name: Rancher Labs + name: rke2-canal + sources: + - https://github.com/rancher/rke2-charts + urls: + - assets/rke2-canal/rke2-canal-v3.13.300-build2021022301.tgz + version: v3.13.300-build2021022301 - apiVersion: v1 appVersion: v3.13.3 created: "2021-02-25T17:59:12.931728-08:00" @@ -81,6 +98,33 @@ entries: urls: - assets/rke2-coredns/rke2-coredns-1.10.101.tgz version: 1.10.101 + - apiVersion: v1 + appVersion: 1.6.9 + created: "2021-02-25T18:49:47.256208-08:00" + description: CoreDNS is a DNS server that chains plugins and provides Kubernetes + DNS Services + digest: a6bcceac244eb1f4161ab29474dfed5464da49b7fd7af4df37e1ab2ebdd67ddd + home: https://coredns.io + icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png + keywords: + - coredns + - dns + - kubedns + maintainers: + - email: hello@acale.ph + name: Acaleph + - email: shashidhara.huawei@gmail.com + name: shashidharatd + - email: andor44@gmail.com + name: andor44 + - email: manuel@rueg.eu + name: mrueg + name: rke2-coredns + sources: + - https://github.com/coredns/coredns + urls: + - assets/rke2-coredns/rke2-coredns-1.10.101-build2021022302.tgz + version: 1.10.101-build2021022302 - apiVersion: v1 appVersion: 1.6.9 created: "2021-02-25T17:59:12.933187-08:00" @@ -109,6 +153,26 @@ entries: - assets/rke2-coredns/rke2-coredns-1.10.101-build2021022301.tgz version: 1.10.101-build2021022301 rke2-ingress-nginx: + - apiVersion: v1 + appVersion: 0.35.0 + created: "2021-02-25T18:49:47.265982-08:00" + description: Ingress controller for Kubernetes using NGINX as a reverse proxy + and load balancer + digest: 2c4d09aa9c99c62ec2141e1067ee8d37485cebe3531e711a3d736520939cfba6 + home: https://github.com/kubernetes/ingress-nginx + icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png + keywords: + - ingress + - nginx + kubeVersion: '>=1.16.0-0' + maintainers: + - name: ChiefAlexander + name: rke2-ingress-nginx + sources: + - https://github.com/kubernetes/ingress-nginx + urls: + - assets/rke2-ingress-nginx/rke2-ingress-nginx-3.3.001.tgz + version: 3.3.001 - apiVersion: v1 appVersion: 0.35.0 created: "2021-02-25T17:59:12.938912-08:00" @@ -168,6 +232,22 @@ entries: urls: - assets/rke2-kube-proxy/rke2-kube-proxy-v1.20.2.tgz version: v1.20.2 + - apiVersion: v1 + appVersion: v1.19.8 + created: "2021-02-25T18:49:47.27395-08:00" + description: Install Kube Proxy. + digest: 41c6625db0dec5b21d98fb601b26abb3a23cd5ce598553ee87461c7c566f70ad + keywords: + - kube-proxy + maintainers: + - email: charts@rancher.com + name: Rancher Labs + name: rke2-kube-proxy + sources: + - https://github.com/rancher/rke2-charts + urls: + - assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.801.tgz + version: v1.19.801 - apiVersion: v1 appVersion: v1.19.8 created: "2021-02-25T17:59:12.951821-08:00" @@ -364,6 +444,25 @@ entries: urls: - assets/rke2-metrics-server/rke2-metrics-server-2.11.100.tgz version: 2.11.100 + - apiVersion: v1 + appVersion: 0.3.6 + created: "2021-02-25T18:49:47.27561-08:00" + description: Metrics Server is a cluster-wide aggregator of resource usage data. + digest: 15ddecb18f303eb0ba7c9278246a332320a3a211f3db7690295a1a684a1dd65e + home: https://github.com/kubernetes-incubator/metrics-server + keywords: + - metrics-server + maintainers: + - email: o.with@sportradar.com + name: olemarkus + - email: k.aasan@sportradar.com + name: kennethaasan + name: rke2-metrics-server + sources: + - https://github.com/kubernetes-incubator/metrics-server + urls: + - assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022301.tgz + version: 2.11.100-build2021022301 - apiVersion: v1 appVersion: 0.3.6 created: "2021-02-25T17:59:12.952919-08:00" From 3d34983ce4cc315395cd3bc822295ece0e7dab9e Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Thu, 25 Feb 2021 18:50:59 -0800 Subject: [PATCH 08/10] Revert "test make sync" This reverts commit 6c7ccf31139ce4716dfc82dfd238dad895ed0348. --- .../rke2-canal-v3.13.300-build2021022301.tgz | Bin 5915 -> 0 bytes .../rke2-coredns-1.10.101-build2021022302.tgz | Bin 10575 -> 0 bytes .../rke2-ingress-nginx-3.3.001.tgz | Bin 20585 -> 0 bytes .../rke2-kube-proxy-v1.19.801.tgz | Bin 4315 -> 0 bytes ...etrics-server-2.11.100-build2021022301.tgz | Bin 5378 -> 0 bytes .../v3.13.300-build2021022301/Chart.yaml | 13 - .../templates/NOTES.txt | 3 - .../templates/_helpers.tpl | 7 - .../templates/config.yaml | 67 -- .../templates/crd.yaml | 197 ------ .../templates/daemonset.yaml | 262 ------- .../templates/rbac.yaml | 163 ----- .../templates/serviceaccount.yaml | 6 - .../v3.13.300-build2021022301/values.yaml | 74 -- .../1.10.101-build2021022302/.helmignore | 22 - .../1.10.101-build2021022302/Chart.yaml | 23 - .../1.10.101-build2021022302/README.md | 138 ---- .../templates/NOTES.txt | 30 - .../templates/_helpers.tpl | 158 ----- .../templates/clusterrole-autoscaler.yaml | 35 - .../templates/clusterrole.yaml | 38 - .../clusterrolebinding-autoscaler.yaml | 28 - .../templates/clusterrolebinding.yaml | 24 - .../templates/configmap-autoscaler.yaml | 34 - .../templates/configmap.yaml | 30 - .../templates/deployment-autoscaler.yaml | 77 -- .../templates/deployment.yaml | 127 ---- .../templates/poddisruptionbudget.yaml | 28 - .../templates/podsecuritypolicy.yaml | 57 -- .../templates/service-metrics.yaml | 33 - .../templates/service.yaml | 40 -- .../templates/serviceaccount-autoscaler.yaml | 21 - .../templates/serviceaccount.yaml | 16 - .../templates/servicemonitor.yaml | 33 - .../1.10.101-build2021022302/values.yaml | 202 ------ .../rke2-ingress-nginx/3.3.001/.helmignore | 22 - .../rke2-ingress-nginx/3.3.001/Chart.yaml | 16 - .../rke2-ingress-nginx/3.3.001/OWNERS | 5 - .../rke2-ingress-nginx/3.3.001/README.md | 221 ------ .../ci/daemonset-customconfig-values.yaml | 9 - .../ci/daemonset-customnodeport-values.yaml | 18 - .../3.3.001/ci/daemonset-headers-values.yaml | 10 - .../ci/daemonset-internal-lb-values.yaml | 10 - .../3.3.001/ci/daemonset-nodeport-values.yaml | 6 - ...set-tcp-udp-configMapNamespace-values.yaml | 16 - .../3.3.001/ci/daemonset-tcp-udp-values.yaml | 12 - .../3.3.001/ci/daemonset-tcp-values.yaml | 10 - .../3.3.001/ci/deamonset-default-values.yaml | 6 - .../3.3.001/ci/deamonset-metrics-values.yaml | 8 - .../3.3.001/ci/deamonset-psp-values.yaml | 9 - .../ci/deamonset-webhook-and-psp-values.yaml | 9 - .../3.3.001/ci/deamonset-webhook-values.yaml | 6 - .../ci/deployment-autoscaling-values.yaml | 7 - .../ci/deployment-customconfig-values.yaml | 7 - .../ci/deployment-customnodeport-values.yaml | 16 - .../3.3.001/ci/deployment-default-values.yaml | 4 - .../3.3.001/ci/deployment-headers-values.yaml | 9 - .../ci/deployment-internal-lb-values.yaml | 9 - .../3.3.001/ci/deployment-metrics-values.yaml | 7 - .../ci/deployment-nodeport-values.yaml | 5 - .../3.3.001/ci/deployment-psp-values.yaml | 6 - ...ent-tcp-udp-configMapNamespace-values.yaml | 15 - .../3.3.001/ci/deployment-tcp-udp-values.yaml | 11 - .../3.3.001/ci/deployment-tcp-values.yaml | 7 - .../ci/deployment-webhook-and-psp-values.yaml | 8 - .../3.3.001/ci/deployment-webhook-values.yaml | 5 - .../3.3.001/templates/NOTES.txt | 71 -- .../3.3.001/templates/_helpers.tpl | 132 ---- .../job-patch/clusterrole.yaml | 27 - .../job-patch/clusterrolebinding.yaml | 20 - .../job-patch/job-createSecret.yaml | 60 -- .../job-patch/job-patchWebhook.yaml | 62 -- .../admission-webhooks/job-patch/psp.yaml | 36 - .../admission-webhooks/job-patch/role.yaml | 20 - .../job-patch/rolebinding.yaml | 20 - .../job-patch/serviceaccount.yaml | 12 - .../validating-webhook.yaml | 33 - .../3.3.001/templates/clusterrole.yaml | 76 -- .../3.3.001/templates/clusterrolebinding.yaml | 16 - .../controller-configmap-addheaders.yaml | 10 - .../controller-configmap-proxyheaders.yaml | 15 - .../templates/controller-configmap-tcp.yaml | 13 - .../templates/controller-configmap-udp.yaml | 13 - .../templates/controller-configmap.yaml | 20 - .../templates/controller-daemonset.yaml | 252 ------- .../templates/controller-deployment.yaml | 256 ------- .../3.3.001/templates/controller-hpa.yaml | 36 - .../controller-poddisruptionbudget.yaml | 15 - .../templates/controller-prometheusrules.yaml | 21 - .../3.3.001/templates/controller-psp.yaml | 86 --- .../3.3.001/templates/controller-role.yaml | 96 --- .../templates/controller-rolebinding.yaml | 17 - .../controller-service-internal.yaml | 44 -- .../templates/controller-service-metrics.yaml | 43 -- .../templates/controller-service-webhook.yaml | 33 - .../3.3.001/templates/controller-service.yaml | 83 --- .../templates/controller-serviceaccount.yaml | 9 - .../templates/controller-servicemonitor.yaml | 42 -- .../templates/default-backend-deployment.yaml | 97 --- .../default-backend-poddisruptionbudget.yaml | 15 - .../templates/default-backend-psp.yaml | 33 - .../templates/default-backend-role.yaml | 14 - .../default-backend-rolebinding.yaml | 17 - .../templates/default-backend-service.yaml | 34 - .../default-backend-serviceaccount.yaml | 9 - .../rke2-ingress-nginx/3.3.001/values.yaml | 666 ------------------ .../rke2-kube-proxy/v1.19.801/Chart.yaml | 12 - .../v1.19.801/templates/NOTES.txt | 2 - .../v1.19.801/templates/_helpers.tpl | 21 - .../v1.19.801/templates/config.yaml | 69 -- .../v1.19.801/templates/daemonset.yaml | 78 -- .../v1.19.801/templates/rbac.yaml | 12 - .../v1.19.801/templates/serviceaccount.yaml | 5 - .../rke2-kube-proxy/v1.19.801/values.yaml | 142 ---- .../2.11.100-build2021022301/.helmignore | 22 - .../2.11.100-build2021022301/Chart.yaml | 15 - .../2.11.100-build2021022301/README.md | 39 - .../ci/ci-values.yaml | 5 - .../templates/NOTES.txt | 11 - .../templates/_helpers.tpl | 59 -- ...ggregated-metrics-reader-cluster-role.yaml | 18 - .../templates/auth-delegator-crb.yaml | 19 - .../templates/cluster-role.yaml | 34 - .../templates/metric-server-service.yaml | 25 - .../templates/metrics-api-service.yaml | 20 - .../templates/metrics-server-crb.yaml | 19 - .../templates/metrics-server-deployment.yaml | 88 --- .../metrics-server-serviceaccount.yaml | 12 - .../templates/pdb.yaml | 23 - .../templates/psp.yaml | 26 - .../templates/role-binding.yaml | 20 - .../templates/tests/test-version.yaml | 21 - .../2.11.100-build2021022301/values.yaml | 113 --- index.yaml | 99 --- 134 files changed, 6098 deletions(-) delete mode 100755 assets/rke2-canal/rke2-canal-v3.13.300-build2021022301.tgz delete mode 100755 assets/rke2-coredns/rke2-coredns-1.10.101-build2021022302.tgz delete mode 100755 assets/rke2-ingress-nginx/rke2-ingress-nginx-3.3.001.tgz delete mode 100755 assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.801.tgz delete mode 100755 assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022301.tgz delete mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/Chart.yaml delete mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/NOTES.txt delete mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/_helpers.tpl delete mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/config.yaml delete mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/crd.yaml delete mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/daemonset.yaml delete mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/rbac.yaml delete mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/serviceaccount.yaml delete mode 100755 charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/values.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/.helmignore delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/Chart.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/README.md delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/NOTES.txt delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/_helpers.tpl delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole-autoscaler.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding-autoscaler.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap-autoscaler.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment-autoscaler.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/poddisruptionbudget.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/podsecuritypolicy.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service-metrics.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount-autoscaler.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/servicemonitor.yaml delete mode 100755 charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/.helmignore delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/Chart.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/OWNERS delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/README.md delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customconfig-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customnodeport-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-headers-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-internal-lb-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-nodeport-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-configMapNamespace-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-default-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-metrics-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-psp-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-and-psp-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-autoscaling-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customconfig-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customnodeport-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-default-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-headers-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-internal-lb-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-metrics-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-nodeport-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-psp-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-configMapNamespace-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-and-psp-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-values.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/NOTES.txt delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/_helpers.tpl delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrole.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrolebinding.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-createSecret.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-patchWebhook.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/psp.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/role.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/rolebinding.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/serviceaccount.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/validating-webhook.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrole.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrolebinding.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-addheaders.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-proxyheaders.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-tcp.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-udp.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-daemonset.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-deployment.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-hpa.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-poddisruptionbudget.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-prometheusrules.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-psp.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-role.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-rolebinding.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-internal.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-metrics.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-webhook.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-serviceaccount.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-servicemonitor.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-deployment.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-poddisruptionbudget.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-psp.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-role.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-rolebinding.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-service.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-serviceaccount.yaml delete mode 100755 charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/values.yaml delete mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/Chart.yaml delete mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/NOTES.txt delete mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/_helpers.tpl delete mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/config.yaml delete mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/daemonset.yaml delete mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/rbac.yaml delete mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/serviceaccount.yaml delete mode 100755 charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/values.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/.helmignore delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/Chart.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/README.md delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/ci/ci-values.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/NOTES.txt delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/_helpers.tpl delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/aggregated-metrics-reader-cluster-role.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/auth-delegator-crb.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/cluster-role.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metric-server-service.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-api-service.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-crb.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-deployment.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-serviceaccount.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/pdb.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/psp.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/role-binding.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/tests/test-version.yaml delete mode 100755 charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/values.yaml mode change 100755 => 100644 index.yaml diff --git a/assets/rke2-canal/rke2-canal-v3.13.300-build2021022301.tgz b/assets/rke2-canal/rke2-canal-v3.13.300-build2021022301.tgz deleted file mode 100755 index 39ab3a7c92d01db64d6e45d8212ac31837770de8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5915 zcmV+$7v$(4iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBjbK5wQ@O;*<=u+ybd@&{YCC-eVtE+QlIZ=%*>nu4lTU%Qi zh-^t%g8&x*Woxs!-+l#v@1kTov7Ol~^FtDmKzF0jXf*l-kvDkY`GgbJ?k$Pbtu+bR zPfz=FI-Sn((UJYT)9Fac1r6{t{)&7hwZ{6NBN(y@PWyQw{ej7vzQ#vGmb`N=aEzy^LoDMmvGm zb{#B^#;P*rPREUlA6gdIkk>&-y<)e0Nw~|A+7P zU)TRjJf7z@8gF2-L>P=#N8r;yLL>wOEx?yZG=kt@n55$f=p4A<2zW=8RxKEb00tuk zx0ErMVdD+>OQDd%T#69%k}8mdFR;~!M9`yw>_T(D(>gk6by}VF{&BO>m@~pTvTg(5 z4cx>t^pZC~L$bgIxWCPJ0Ev-MR13N8Lb{NbL4o>X_Bs@sgJN=0d_!C=LJN{ipkfFTyb9)F(z6wk&|b92m|u%oyP}d!U!d z%^wAzRHaOb73B;2m5Tu1DhnXMIfe zcRXl%9*w+^sDCum-?qDoXB@Q`E~JC`f%rGb13SPX{8siwEk(=%;6lR;jV9&`3~XPb zLB?Fu(9$|{?Yq!i-7&(OmiO*yIGyk2wKp69bz6t!otskw!RK`F!1;9m3)AbAbj=B95l4+q41Xh3TcAOXM*!tiQ^M#!?bvP!)n`IxlypJM7#+t2 zpVLJw(aAB8#X{o3&Gn3!WhFR7B7olD^vb}B6^=w;eifjcQblGA;lsy~g+uwmPMtF| zC|`U?<;`dzhnU4mqjXF$6bHyPov-aO3ov40ZB~l?ZspCjcRXm`{H*pY7n>#gbIK5y zSS-wXRRaDn%Q5T+b0oDuO+r92EM32Wro(yW1u8m#!m7^hgvjiVBQ5Mk8%-DWj-0=HT7iE1XMI z%YjizE5f>P+_8AFUdb{x{EL;7Eel{T)lSNMBC!@Qi=DI)ri2kWh1z`Mb8Tde;ez<< zD*kNN3`O%61=GOx?{ha#7Q_+>(@TGe z{!P~TlYQr!1%*jTI!022sFxTk2vJMwtF+@N`*PtvY{Ez<_V?2focKO6lupozf?I?@ zSiPy-yaLs5OZ5^&EFqrJ_jWmCdz;1i`{^jdl1ssFOSG$5NDEI3Fq%C^Ndg2d$$UH|&CFl6ap5tLj6wDBLYO#ja9g#zZPq@Ll!vmfDkf z0`FP>hwt9MtFHgUgM-)g{}PYqd5t$PwjT2?IB!Hz66_1FAQ0ABNL6CZUy#U{=+?FKNK8q88Od2mA~E5esby__O*DE%IsNNi2*g zb@bUU#c{mUiF1>?1r$|!ww)N01LHK*aUI0;Fu4KGOq4N4Q*$>JSCXD=Q=3(AK;kbg zi(6p(sR(^an1$maCB}ZW9&QIF^?_R_M@TgF31e%BL=>B~6eP~hX@Y#}dUfGnp7s8H zNnEE*BZW=pn(`tMcuD)_UfRIt=|VBhI<3Rje*V!}@T%E`zh=JJF9ok`Q?DZn-tj8y zXumPBm@-@;GYzaWrt~@U3{F!Zxeu}2((o-E$8`GZ^@sk|us`XKr^CzB{&aY9(VtGs zJwhEfyD**hhJ)$FLwY__RAvGYZ7tv08&Y0fi@NDpLdNw%kk4`3^rqdGeMh%+U>5PQbA&JUN zRh*cCYZ+tdy-y-Cql{|m=(N2rNEA`NP)Ub}qn;5S+|ocVo4b+Z?KUtZ|N2|Q{YpIR zF6L3YSX*!*Vs5q;%C)fSA&-$HvE;;aZ1;%P-<8={{#9OTN%*>1QX)y9s-Qfw#g+Im zOHoQZyWL0@n__}^>4kf`(W6H_!N35#^&LAhC#Ht$lD(u>9@!UMKF|8k|i)81-$+a=+?JP62 z?P8Zg{oK5=&ZH#D{K9DQ-wdpZ_mveb3(RT%L_ARFgm%gTVaVSIW-lQ zWFKCFA5I1S0EYBZuVk4c2|YC|N!*w(z>_860eflPYecrVjVuyVu`8C2b$x9`k_J^7yV zf35!~AQ%d+@QJE{cKd%04&NPD0I|nQX#2cgYuUc#D*hQVDyq`pjv@U6uSZ<(fmTW zvg$a6K<5T5u25RofGg@Jp*r^@kWsb>ih{2-O8fd3=|AH>ZX~;b#fDRu(cwmgeDt+#sk}rmN46BPH8r-4o_YAyhsr4{$z5 zUw2_B#!1^k>_$WelaO8L+d7{TI(f5m#%#n}Vm>8BtT(vs)(FW%P!~z=-=QDt2T+_u z_D_6Y#9X_C;i7E6feXrMNdJith+A9L0)Z1Zk}ficqlJx53kd--G&Au3K_g>DMWh7; z0thtcLi(72Pdha8s>t%}2g0}cHa@6{R41VV)Di+(GwzP7CwwO2LfYPBq((w6zv|{koYQkQN-Z^12-O`n9{G zp(cwwc4wZX466%&ZMF+^w$!ruw?Z@It8OU-{KguEAm$m%iu}k5?pmczDMNb0!L)Zd zJhS2RN@Uq4B-qkAXUnw=Tt<~{R`AznKx|EY*hes1@Ue{4`CPmn-e1HVs z1&Mye^%c%H8$2l4r}|j6vYA^^8T%Nu<-1X^21#C*WZvHVtelBqKGhGrE|1~iL!VP* z!Im-GZzH0YUAuj>3?XLJl@XYkyO>-jc`>jelA5i4>nvSNuKyHMlP2p_hBK15vPF#6 zcFgpOEge-1F*4eBE=(&~KO^Jwet)!OE!aYuqWrG}i$k<|1FBmA7n-+j0@_AC+9J|z zpVRh?@Odk#<7Nx(=nz+gwkv$aLwr*oQjZ84rAfpxQZ^qn7D;gdSfi{ zqE5GYerXOHI^)mP4he?@!5!scyyiZfj0T(9J*krP0!!G_D)?dr=DkY3)Od%Yf7_0g zY}*tgBNOeUUrR0D?!jA6nLq2B*zeT*nY?gge*3ovh-N*zY*uAS8a8BZgRkErOn$8u za20b^J?Uk;M!_P=a_4AKc=9fh6bng0l^35lp)n?d&0|z4)IxVNxJ_v)oKkBhuf)%$ ztz@6dnA6hc_yls76&T+uba~4SiL0?0t`!#fdNLZMr|B8-Z;X!267%Y{Q5(s8Er8gHi;~i3vcEyrPMvT@OP8G>v4ZNx;&kl77wK^FCX4cT2|6*OM!Ru=(m%> zWO{aaHO-jyP)e#r(c9@U2FPP=4r6G%xSeq-_*)Ig(^~8;cHqhRpm&*?f`j45>4%fv zul?caLpirm*1bc@9#Q38Pwe^ic+$Um)Cwq+il>xq5cUM6+MC3l_0I>tPY0vP$%pg) z_^N+))gONv3@81o&nM>(rG8fh_?F?+VFQW#3T1U<4!xBr3BLU{LhF6lof4 zv-9eXIB7n-z0%~{Z!;+a-g2`E^p=FX5_a>_k_BY0^AoaDh8F0Otg2wSNIV-Ycfd}r z-k2L%ae4Ldhvy}-==n5|f3zwfDl2j@`g}CHygc7jsBemA#a|9u`^P_L7qOJvJ@zJq z%?V40IUS6R6xjsl>G|cykAq?Dd9e#kV|rBS_{PHYO>&$}pZ3pAuFog^;pymdFq}*- zKV6O|C%wtwa`-6u-cj?uU5V|!5Mmq2gVE>X@%3nQc{O?D63p(F*u5ofxrbo~N#~az z$NkU!tHIb@OJ)xi-03dsS^aFUkol$a%P*A= zMpbsfo=P{4jk|{VP;UvNvvw$gm|>M9C!>tcy!!8ED3_eAHzcTvLdLv(=j@4AmTFYR zlQi&Z8sv!5%T=~vS`lfn6>ep!;#m>t$(Vb!O&*8IuK$e^}ngl(%f&+)~($e!qIo)tXq zKA4xAZS@0{T?y7n=sZ2PcTn6jzkKnWfw+N#$b~9{_pFYtSHo$g`;+MA4evtAd2{@J zmSbiba&GbJF|&$#JUZz;GkzP)uir_}Plm((`RR0U_9!ZzGoF!`@ZLl6XnJun{_j5p z(RnfN1GbV!X!(0|{B~`adrHXp zm6z-Gebl>e&)Era%Vy@?x4qm8v}WJ(cF)jGkUJ;dWx$+KhL^Hu5AZd!kZRTqa_VP2 zlmBjwj6Fc+6U2p}X>V8)8R~x1r#Amn&WQhn1;F>!|8@?KtM$L{k2|mVpD*#;lmD4XLZk5p2Ha`>$|JAO>(q|%hAg2g>e|J1jAA;rzMEQdMPCRr@IOu z%j~WzfGiyTrP*7%rHeq4`|dbmU{kmjs38)8+H(nWdoD*cwH)n<#RC%2k5a^u>cU@} zNy5*D)5EiHvL6;)qo}1EMEh!ma#rZLuxZ{8##637GH()&)&S47X+V{4!}UpH>-Vxo6?sXfnBe51WZlh;<7=(v`rLmRemH!?FMHhpj?0!*?v3rV@gAX(&$ zi3&`813fFNV1tMXlnAkMK~13u7L3%I)l3PWXoMu#{lkF6_y3Rrsan7}E#mDAb z=`w&EPIfy&spS?J#WE=OyHrS+L#-3Fjjsv|T5(&Cy0+Wu(#^NcicdSFnxg+&t+P|K z(kYHrnx|!VAh({KO0ct?$1uNtwyP}PHO1TbQW>dVG@VQ8=utW@$&yGjQ6HzTzi|d8 zOH}qm6jEgi$870<;B0Lp7?LK6K1~X@(cslV>)6=}K5@68%@8DpVue8x3N9p(3l26I zUkVS`>5NBCs`o*4P^u#f%v?n4#DF2RpuyC{*ecZO@pPwOu(+ z`&9V$|E;j}m3%Fpvou5YwDw=>OwMe-$L}9cU^_f`b9o|@B;7UKBz~Z_CMLfjtH9gb zAm@a^xKu7tV~QJ4lrO%|BIkrDc zVQyr3R8em|NM&qo0PKDHbK5x5;Qo62EBd2nH*wCQB)^i;SGiip&hEN4aeQ%RxAwBB z7$PAFV-ny1plnU-``f?5i!V`>PHUclZ9#>F(_xcK-mKwPG>j%7jGx z51p0AsuTB}Tqq^~z+4bUdob>LC{6P}yTM`bqZeTja*|4Y>zHwTa&`ek00Q+xU_Qni z3)Rg7xWIf&LM*%yOK=ZHQl_HUZrha@ z5ay9kjX3?FO%gQ3qHS^ce9U5&@y~x_oczkDL~)SPp?8HR*NjJ^=e5B0^;%&5f0gz1 z-w7gAB0@3O&2WN9+=CI0WAAn91ZLoE!G{qMM%@4F=P9Xf0b-I6seb{4Y1V^or<3Tv5}at1y1Osl5{>3B85Z|x>j`tPeVC-U2mW!K z35ogXx$k)?X9<=goM|u#ql#_=5mUvws0RZS3#@OUC?e|6Me!Td$FbV1A6cCFb!y3VUuO z1JjQ2CxrvNe|2;W`RZw+rQT1Wsd}EX=mLisCvsw#tyZi^U+XC^X8fMa2)+(CwhTW! zZwrX9i`I@0BE=yY5G>$&q~@9{O^-_uBbLRH+UpTcV>ZDNAmJ8a%g#P94pa$^7_MFj zKxrCy^${!+y)Af`nk5e@Croh+!WaoBQZgk_lzdWD*qMWrMIwNo+#{4pCX}kifecE2 z1rFk|0(hp#w;T+-sYgBV#Y9M)v>Y`dCz6CH_C2qVn7=ZrnS8CZ-;##QambPcQ>A@w z3V6eWZbGmIIo9Z}H=Ga*^h(%W5Y*`mE=iEf3A4#+S;X^Ux& z3C}iBYM(+&?_cw?2Wrijt$DDgei{Ycv@qKMbI!QX>p;y3R`R|*!YGy_z%)u3p%NH{ z-iywQ&W;v`gjqT1qbz!IY!Z> zHgNYvw>Ge9MVdy|-8IPfuDxXNONRLbidlf8^HU8cLK2mF7cGp0LmVYoD&D%a9mFg| zF%a59QN#npQv@VE)Ia9Xt5zxyDwHt5_TB)+u^e%h4M%g}0Z19cUZ=*U1WQgrQDu}8 zzBNYKt@zfQI3-dG={-eiC(!?CaHSpt#;*~NrkV%P#xiUx0U3`2MSrPNh{6$`>Y~YU zO=8B<$_%QrtMVL0eRGtl9eB)=G{zDGzCr32ToXA06vtXL*gc^(pW6*kzqWMCOa1f` z`f92mL{y2H7!M?{Oai8oa2!u|ATm2%3nD*I$7r#;w7nQyGag5Q2F$DwYwiXh!Vy+9 z$ykYn9G@B8e0X|2BH;+|FBysj_z_jzd?3`B?Ss83K0EQ*xS*W#!yML@6GS*aiC$-Rw(8QxLU9^^fwZHj*XLf6#geF{ zE=hvbo);O-y?;ROpZuQ1)6@EDibF*ZO*08X@+_2!awH(d z+$kra)>`MCwHyyH$27$4Tqi0uYoV01$K9aYZEq>Pm=ZZ@jl0?e@wW6lT2a%}NV+yX zi!x;qKC0czh44wn2_0U9BOGNhp~KT5W%=#bH#p3+cdghW3fcvZaVQ!8fO#TH&svZm z8IE4xq#O(3>|M?Jo@2GShX(^3${w6CyLzx!9t%Ex^6sNy@{G1ns!d_d zT(cuHj$3KeZ*LjCO$Z&fP%C9oLbuBnJgXUo}ecfW%5g zG1JoE!rkq3pSL>)?aqs~RdbbAi>c%Ui`IbgRzXYKQy~AMAZmiQ6noVGR&_NK5k!5% zvxp^#(Bgi`67wKrRAMSc4{qJ^F11ld_vsm2ygLFxMWFEv5vArCWnul+Zl`dl1#i8sD6zng1PF2 zG4PjGFsf=+G3qy=WK?wuea>k$Qe+S*?`?PTa02gCI!7D|L#%^{ zMeL%FV|JZ8JUxrORXqjx>c}`}T>3dhrFfh|0<`;xOE`OX^7`}nyZ0Yn{_s@o^H9bx z#1dMqJirEDzCiFE#~2A5oaJ}#?x4lsXTfNSUCEOkoJvU01SGmrD(wJYLqcdKad8Bc!NKXd)qcL&NMj0`TvFYA}n=K#aqn z5+N~4!+^6y+j!NDZZM^h8oTtfwEZO(5=X{7)NxA|$LyNWVbAlr0lYyO4M%{KPhQ6ZJo=#JzlZri?piwY0+jlF$L`$LyvZQDFe+Q&58Y+w*n< z_`f=UCboErA1^GXFhX)6>^Z&5( ze0OvHKgRXtOZyp&Nz&87p8<)noTT_=qSz9Sa1Wlf^#*M}^IqSih#I@UoTi|Y0Rqpq zYe9qugqFmI5go1Ksg9ioCf8USt4<=RmhZ-xa}r^=1<5lS!r`9&Ns^0fFd#SJw+diN zW~ko=zhj$sk&mGS*93maP)r5{M}X2)V;*?_#0I)P&okqkD~QQ;a3j>3-%$CH$9Pmwf3@RX&$H2(IsUXCP=4Z#1nJ0=(FF1 z*EElbNr;%qczf8 zeG}NnC>*)nQVs-)DoBp#5Lb?8%ta14hklGw?T^sfiDZDrj6@(pmTLFn_0=`v5yhee zy@IsDS+WbsRdcPi8rD|^(FsNrH)B0Q0uoH6wzh4+Sb>>fX>>cvr0z=OG!E7&^>l>9 z-|@upQU9}DA$FS6NXiJT?oCP^y-a zf~=#fBc^t}4t3gCwsm#Dkc=@kq))S2gV9iWFM-eq-&Ap*;(7kZ8^*4bR4OkUou^=O zy+$~^0u!Wm2&pC$Gf6p=BNG=+n3}1o{-i~i4cOvHY0TC%>*c+FIyui%2Hd?5$LB6- zVMhnu%MYg*U{|w2ylGy^Krf`HPX72l1SVNw8=mT2?@}G6*F`>a?@GA|+S9D_5h`JK zWyQJ#g$kB6WbiK#*t`gn7SYhOd3N=>Ebh4VsDU(No~ygd0N2aA5qL$EF2Qdzxjy)I zc4u;a6%I7-!Un{f1d&-zHehLUTk;eRB@NAz0Y;(El-&@f@F$&=(jwE zBy4ZbX`m)zdxp?jX%3fT?rbLhlM{QIY6yK4UYW#foh_9~C-0gMjAq$e!l+d`#3~4s z24|Vf(}o0~Fl1bZ!z>cY0)vpnF%FeTLVY&IO{b$Cd#A=Ai&vc&WB{{ft&!_auW7*- zlQ#6n@lGycdrBnk*cf6KrL8}TyMI&PF(Vi0i&pszr1Xp~fz*fN94Q$-7^&Y<4a>vD z8%1oRh;0tTqem>t+S2`M!Tjbe>yt@MwD$JWWB}M_GgH+aFUg#zr8`I&DF}$>I2HVSeT| zi=&Zvllc)erhPQ{MBDzudRCmW{(Sf7SMdMOi2v9i9cM8vrQ7^z+(5TBOO@nG4UVSb=Q(A{V#M0jZKT9Jdank_! zMq;Yp{!#5vnu?|&qdY&|jk5y|4}s-92j7DCIAPW(&l4#HC@un{q07o5LS*`V=mELG zQP10gW%A9px`+4UXMxEK0*svUKb2?xw-&eG+_m`pU%ed6qWWKl`}O?K!{?j)&&RoH z61H~0Klb6iInf__qSuiA_KtIR*p?-_7Vx^_Oy!M*R-V`^^j|J}R{xQ*NaO_pbk3pr z)5v^%U!BSNhd!0-s)Js=dD#`Myw(v)U3Hcg6vyi{jtUW5&qenjbwo_b8rMswNSX)t z*#OO1kza5BFP8s(LL>cZ=+b83BKiOPdENfo-QURn$GLt(`|p)mSQ|5NV+Q_4W+034 zdpuCf#vKK7*02XKt>rAaVnx>f8HZ9Iiw0=5*w^wqp;R4iR-%68V7a&aU%?DmB>xZg zJ2m;=+1uUJe|(hdYw74F}pAyDqz3yYnJ@a3knx?YU8^Sy%9?YMY1R!Y@mq*={k!M7CYeAO5!wjjtRwx=QT~g z*z}dDfh)_i!jVclxP?K?kUUhJ*m1z>h|B_3Yu5}c<#>##ydZQK<8wx+{0ky+#<8E5 zZU*?SPVDtwi_ZUbd<?%vaA9sBLsMS`)y=QSP^Vskb+ghA3uBByBXvBUEFx`_@t`Wl;~FJh^ALO2OPG zAHVolc;ff`V+-vliWn8|Xgu+Ez;`A1{ht5&h6pMAyH8JMaoC9s{d+1hw8R+)`CENc zg`3L-nJg!#8#SEP(~auls9~_YNkeVuFJGE^G|IXuL!eoqa0Q-A3pf8_#`I2T>X-}m z7fT^fS|LUA%C{TlqEYU?A|~a^(?$DAJ?iKx_zm*U)Io>p%AWbs zTwv_U1~fICpeUv8Ozl}MB^s9HguZ!RDw{s9Z&6;o45F00IhUbDW$nV^?UVt9EJ+ZJ z3IVGaxUhJBxpApj=*u~(ZMj1(h+i%xV;mTX_z=sNg`(cRpBJwTBW6R%gp4rfFLTFv z@p$}l_dnN({vBTn?f*3d0Os5O-A=dDsr&yAHuXOr=lXhP{l-fF{Y3%3vRz&(OEljh z1@2QWqQc$s=boqh!;pVB!YoLsztH@i#WA77pY+R)`aUn;amRgvZhoS98S)-K;ylB|HS*fLmZ4*7}@; z&junMY)Q}V_+W#2HUlFnoSkHgjojBTeVAdsj5W?UJe@_scJZiWVsFnd(U(|P_G^sO zo}iKqQ~D;O_6Nt2 zgNN0Xo1Z*+fulzFszLr|wH3!GQj6W@yEUxmxjvGGjuBn+OZS{A{#38=8O&Jzl~2f* zJuD+B)4yU_z6FpPj+VYJUq+<|FFG$emD}zU*nj)*;k@)f*C<8t369a^0*8!7q6dfN zu975}Wio$wP6dCwcQuprK#X~nm=$ByM<6knu%_kh~JEDL^h+^xfl?*szGVlOU9JA|l2jz9%s0{8X zicB^CJ>XRkkeesy|w^4Wfu7X&k3FtJGM58U#M0<#}2IoNZ<;HbROOxd_5keb*n zI8uEySnFxgQB9$+u6SNr@FaJvJaKpQ7j;eu&u^rh{jAIW=XZ>EApbLB*Yq0k=;-`Z z-5W6>&#=5^{HoxEx_NqjT)lCAdQ#lr89fp|32gPXd@fvlS>rO)GmugJWnY#_JsZVb z3iBmsp2fHaPu?qtM|AQ;D}!$c&2Aj*59Xsd+6Rn;P%JT3^YzEH5y$pj=%X|};tAux zksPVfM~K43YYl7mSXr9JIKfn+xTqwUH+`E4shb-HY8OtflWxIZ81It^P!zeZn`;_} z3Q&-H+Y>^|&b9>I^x*K|VDG>K7zk5Gej&LS|gi#_{CGDp1WpS%nEcx!2U0@r9 z>|0aFR`>ce_YGN+GK#6}ft&lUqTuOAgw4&oxx3IlE}c&7VO9zLA(y6o24oFd$($ znXmCtBXx6t+y{r4q39Kg)#}MPRK3?YYE!q$aB~vpER5;2xm{b9$h^Ezu6=aurfKR6 zi615@o~eS(KmYA_Bv-5fv*7&i?suoY|99Bgy#Mtm*IK{-P*)&at>sNwm~T_@*p!8N zybtYsqu0IU|B7bdBKhAvsMr5|zQ1?4k^hfzHS0XJFFh}C{XS64&w1BsBfo!#hU0&# zX#Fm(`SRbzfWE5y-`(3kn3Dg`H}d~cuBGFD_A#MDUDch5AY@6qel)x_t(NvAE=G@5 zEr6PxaPNKEeg>=fLO{WxDs(sfWMmUaP7_+_+UxW^Kfa4fRGRFE9*V27NBX0{R z9_ly_JWV+skQ*H73{Uv4?Ev1Zf`1@RSSH}~bvuX8>-)d6yV3t2<=O(PZ+YHF`|H!wk(8;lXO zK5Rz&2oWm4eGmiD2vW|*B*H?sy>Op!SF+>Hf1kYnZ_m5DybKu?EXLjsTEhjc*2J-` zkeR2+mkVZ=qKhD;vPklB8kd)sp0~9Hr&MwlWuX!;o~I?1)|;Wvm=uzuR8cTZ<&elx z*4O5X+X(jGZ7Av>0;8(;-*dgh0O+;b6;)+B|7e%VE0-e-AOD8qq!`aC_1jLcf-xv~ z041ESNgO*?Rye7793l#RY_Y=;5DG~aOOnReR#3=H!5Jmgt+pRQ(kClleG; zK91RS&-0+=76Tx~Do2T$3#-2C(Gf(Pr7c38rJ?yVU>p>8+80-old}s;4KeBVPK``h zWMH^~h$yH)0&3QRBCI^~`FgO>H8{;PCe)EN0fD7de7~&gyO)X;%Jc~8bkaPOT&6UD zQwlO4WCp&7PD)mc(p(kFnZR7vp&|oOa5?G;S zer-4@d4%oOw)35jp|BmWIA+(P_m!lh9^4c#w!9KKCzX&GVJMN5e|C^o_HIS6n5F!& za_`gAt;*lqAh4FkTF{7G09~VrW;PpR&ec+H908(?D;}0g(R$whfoIP?ko4KJ9=st! zg5nD|X0ZE4;-zMq8qpjJKT*;=Aq+D2s2$-LOS9)LlS#{hyetjgI5oE0OkQ)Gu(6U0 zaXiIjXVMBlLa>k^>2zV4FBLOivF@PX)v*@8ChWM(Ezi4!bN8Vqn19{E2^JwI`UUU> zZ69*oy1H}A_4aOit$SYgZ~3rmHMqClEnLa$1vb5S#l0) zI?O}oz)>L4a0coP6i75&9rYaU<+vLh20zXpepc^-f};A{%sDvK*lV=yJA&)DZsBxr z#^gE2LY+tt?X4&%SF)YMhg&$yl0N3@EOixeS47^z?XJ?>P+;rE1u;p86hW9~4V2s} zF=pTROwbKUvILILe_9233-_|y=_E_7p;;6pIBD3>O%#}xt5dN1;w@Q@gc=1(eZgYJ zER-p*4RN%FSy<~FH*hX`t;4T&-NI#_8TxQ4GzGI` zf|u2iF0eEflVotIOf79f*c_5-Kv3)Q=Q-fK{Ax1=+6q}=4(<`Mxzr;a<#%li(0VzH z#-1tjOt3mB+EiE8I1!;zX_ZAiDHC@KU+$Jdel7*M_J3KO16$rz8UFgWGjUvVqe;GY z!4qKppK&P7yYM9v)G;2E=D_!t+{e{@P^6kSgS&;(b3jqVvC!)X2|k-yoLo{7P z3B-tHaik70LueE%qG07SdD`80ZwicWq`-Ne1DSE4j3+3Ds~6fo8#uTt&%snN|5+!R zPMTl?IJX@@_b8nWDT^MyjMj7>VBRP}t0PrhO%*?^j`-N<&%-IWz!=6R<7AS=OHklG zMzDZ{TR3uGEwKJ>p{Yn&Wal7R!cvEIXm;hA{PQh+7GbBAt^;=mcV!AnpEsBr zgRo8tK7LxZucaU_<-N$`<@Nyv^JWSBKSr_199SNObx=?)2tIcey-TfqgE&VM6Cg5W z#3eHy6aYf=_>Nx+6%?E%ASht9d*F%I4f@}(m$Q#&tjQKiqQm{g6f_4zW<@}jMm>jn*$tit-SzB)dBKpT2#DII zY9W=VSb~C;BOvp>ZC&wr<p@ep-t%Cdnz~RoeK77{gMxW#>O$RgX$n@>i<-Vs-4Ifn6-QHuFFKg#Q*gPvbO>iL z1*KrloH*J_MCd5gvLT!~6ikQIX2;Q1n}x462h$<7xpA}=h*<9&6d#|Qb_37y{FZPu z-9hz-zc3E?1|?~XwSQQ$6pl}UV~P@+FO*ogtH~no0%N#zz7CYcmxiAe#|mWmY?BV;oO* z0KvbG-oDYcwh79K+%KqDVmMnI`g+)$cfy76Y^q`e*CPd}mKmlUi%T=9HGtCmHgcqt zzV|AO)6yT?p2_+`V~nIIClu-cWX@4-l|_o`givgwe<^2@g)G)v zLZ|)e>f?Q+Gft_eFeutgSf=n%-3e8Harl){wKPoTGsMRu#v3^@C@Gh9*V9c@zFdY-l^m!{FCRZ5zzaGE#fpPGU ze$`HwTsT?xuCG`~ne?oGK2Il&8H!w_O_;2@Muc*j1Gr$`KS!8aZ?akv&V16{%Ypot zemT|Fzf^RIEF2j^yfPs>kf19JLZPqO%CwS+q{K<8w`k6+g8xYBB_U9PJxLOq?{{QH z7@Ct8-69o;V$j5FNVQ$$1_CPtE&013Lh{0`mM< zmr@!dK6EAHpXy-}#<8d2Z|}h6l&itUCL)#)iX;x;bYMY=L`e<}YR9{rvheF2?RyL; z7A}!O0IJp9*Z~_!dCy|JW3uau8?Ok}Z*dJ6_iE-)C9}4caz=!G8BE(n(q?blowoo60RY=Sw>1C& diff --git a/assets/rke2-ingress-nginx/rke2-ingress-nginx-3.3.001.tgz b/assets/rke2-ingress-nginx/rke2-ingress-nginx-3.3.001.tgz deleted file mode 100755 index ff3dd0e27987c6430a07dc8dc82474aed9d7f9ec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20585 zcmV*RKwiHeiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ{dK)+LAUc2ZDX>)b*vhDz7u_80FLPFrokTmfq9fVK{EkmP z8|(&2jJg{=0Gblxc)xv(eZ75>^WoO_8!4KWJ&pOs5xX0OLZPZqC=?3x7L6?&%qe2b z3g$StADk~B<@O5t-e0y;hQs0T zzkuPE+OI7U-(nxR7}|u+2jW*K zehnPr`y^lkzKHzkz!^BlgG&MBok!-xX3P2DcsLC2t?WZP4CcLCw7MhIWs{x-GzL8j zC>#WGF|D5b?fa2EIlN2lYXy!;;zPe+~n@1pGODRveTJ;U>e zLLunAy*+S^W-WzsF!jiE;6ug{9VD884HQ3RAlts%+uQ3UUrl-dU?0wr_yYinLc%a7 zbTt7K2F?Oe1twJz20EM0Hhmmj!zju@W+wt(Z~lgZ3E`tI~Q_u2H> z(a|A%b~tjrJAL-u^muf9?6}B1IzDoyr!G1@JcZAur{mM9GoHfXXn5j|5k$|X5@r~A z-WBn%vzma5*(Kpu6fqQV`En23-Q8I(0;HmhStH>GTXP8}S>-f{7^2oZ z!Y&%bC?}vbgZMBUD6D6AJ^>#-O04$4Sr8Bo{S-Te$v5*7|rMn{u>R}Y{Oot|)WU%+3>!$hx zUANGcUnJ8(&szaQ4Wwnj1#ZBUWY|5B{3x%yfWchU#FB^3y-3jeM@$+24fDm%gz-zn z0SY=8TQC$^ZDf0aQDYAUTp(jK}G)1)(E-{RIVVYfsLtGSkWHp#ViX{BO-V#HLFOVf5 zS{kB6!q}UPLmn}32N{@pFt`O%_7;*r^ z&|CG?G)MU*anVJ<5e=Ys5l+AidQ9v5YxJ)OQ{+nXT6FXd^94A+yZ}=uno7tl_<%#t z(}VMrMb1K4-XuUwv=5>XOjoi3bp8DU37+A=K@uDcT-jTJ4{%BFgaaSngB9X7IER7I zw=N1Pav(=8Afj5mdP{3M#v;T9Bh!XRPV%;*c*rcn^!K+eZnNotjxlSnD;(o~KKN|uOHEbR?VKMlcjM_OAr5NvI# zzKR$Z`sqL9d>BFp{cZ3c`tx_<+28b+O86mP{GwVx6VNvU`V#w?EAlubj7P61b@Fwk zYKc)0pyeV!kDe4q`xF}c}uTYSYr7(KWDy6bqq~ePf85+Ao47n}D~! zOAVep(F)x--+uh%7ySgN>Fq~K{7L)>0EOA#(drt_($Dpjn67W6eWEK_!Y&tzUw7a+ zL_P^_5ErWRxiocs1z&||0^*WnN|0l5K|A$b9v8fkqYuv6hyDQNhL3_$LZX+{C7)=K5S(!*cZuw+_42P`I4Fbz{6Po9(9Y zD)!o-^-{*luvNdDuZ-$`t-l2<6mz3=HF7FvGC+~H5(|5|4o!R;*hK+1MyqKHxWtDz zc*^zxUqB8JbQWqFmAcekb0WAS^wuNLWdkvZi+?QBlBF=UQkbUZN%hT*{Nz0OLeH9l zLY6QzZf?_rq=s!R41ahMyyKS+?U&fNk9ZvqB6W;3izm*?b?a5Hnp zA6CkblpgI1$_#Ne<4&@SXJPHe)WgaV^>HAl(Vr-E&=sOsdWD3cqdvKVSo$V80W-Wr z!WxRWb}Y%|h@hNMH4X^^!l~u@of5;2isiL_GOCcE*aT zP>*`w&v-BaWzCD9i}GgVdFq3QDrfss}Ua$`Tq))jt=g2oxuv zKhPli&CAMa=WWT}C`F-%9e7TnK#-=VRxfAi#ggpRbJ@u>C3HMXvN}>cueJt&8%f9B z`PCZ`aqJm44GzFGVk@<}k$`FIXav2F$)fKx!@f%-@tt}RQX}Vq$csyW62?cN3y>_*+43`PdA(W7H4pTF_TI)!QS{16 zPk!d9vO1*#WTm?g@h)qbvn=Pn5>(V%P8U}Ote(*s7jr_gcY+vz9NaCiv&ehQjIUX$ z_SnT_K&@eQ7(jzPbg!lb3x_Pd&0(HjTq&bdcS4v7Kj?r<-4of_)Z-hekFH@bN2#ic zY(LA*(srYvMinv1JGz%#%Qx6|RWci8h{&pRi&o~?nlwg%%q=$Hi@%+0gG z-V#$DDNd1|EB!*4hW8k0?g32APw|31)i=IogziAo8b( z?#I@AdQ&oGwg~(DN_>x5@PGc-|K@R1?tyMQRXHtDQi(u8L8uy>QR0IYiKvm+|MS28 zHv=(z=|^izR=o)=ZN~IW5$`@b6hV%xSOx{dScYQ;dz!Pw#LpMVQV~^F%yk-z5;8~^ zFjy;6!Au8N#Tw$@Y#sR@T}w_GFH`bwMq9lxDCviR{+U`JszwgS!)L?HGqC_0t9T8) zxHUV3YDRiCd^QxHJR3e!Tc=sj2C&vpG0gb40B5wb+N{$?YHCUo3$3)lV2tQ&?JZQ%*+?Zd!@oX}Ow+F6j&vi1>(LL2-FDUQAKGPF*`1SwI_YZ=w3 z>3fM$R=ORyO?BI-a6gC2ZB)8)v+tb5k@nhP%x#mJ*9}`*A#v?ud`il$@sLjf&xN5C z%u~CmRnLVbrM;!L*)36~pTDvM%Q=u56HP@wE1Pg4%pAtERVv!Z)sMdl=2cithW6MV zntIUe;3{%kpsHH>8_R+oBOaM^!nliQ?q$4<{rxvjuYP&{?(*!_i<_&n^B4OC;HhYH zvkeq1Gx(_98!+h&*wQ^*090s8VGGy1P-|op0cC6FZ$)zw>rc{V9Je8%5-;-6E3vLl z2E*XukZ>iP1JJYBEe=@HgfD6+-~&dNgC(Q`4^J~x9;m7VRn>^hujv-cgeHwriKzGx z2Z>dMr%twd5nx`XSCVH3+RB$Zx=~>Ecm*CX z4AWXie8D)R9BG^kO2Wpn{FPRO8Xqmm(}=C6+d%-T@Z51ssgsO zOU`KGA0hTqF0*H`O_Eu8m9MjkODjDC<`yvK+lm!T!x~0?qR~+%mN89?4Rmgh(54sU z&Z00K#}vq%Rsp#aD=u+v5fxM7KN081S;&dfx>Hn@p)qq5#8u_lmF37jw1@Gt+M^|s z53kfrEev0ZiHy!v@uM=_T+4%QQuRh~!9U3~2BH5j)ny=QW-(pr22?K!4sP9*LWBUw z9(WlKl~Z2(PbMWkU@^*grd^zBQJ^FD-2)TCpLUrFDN4r>#YYyrRppAmC1Zw^_=qo1 zB<&mKP$)R1FSwvz@sYx)FFVukRj_z6ip~B5#vx}Gx)*>GAA%rC)`PG`A3};bO8p31 zmBa788&wISX{*`*G+zYq%E?eyxABSuINs)}va93iG~Fn_z6(l zQW6y?gI412Ula2s^gMNHLyY>2cpkZnowTx#g?8gtf`eBX?WADJQw2-tO~BzW9$K&< z0imX=sXA%}X-x#2Wj1vtC=~oJ%w&MW_yO`z{(u@1Y*e{r}i8T0AP{tLou^Ofj(xUmb`M6l0cQZ zS?%!%?lo`~Nz}mx#3CP64?_|(`rp%ma}wRhor9b+lY}F9}U%ENd#R1eKgb^m2ok8l=QlUq8m=a z*si%pPQs#QK^{MlzB94fjuGhfs#aGrvjXwFDRgeto_}v|uSd}mi{;WFUxWMtYuqLgXdpXbhf14yTYI)=R<_WN?D7AV@!^iBA|k z8!$W%=7@Nr4Vl$IwV{E&R9MBEu6wZ|4336J=6t;9 zqh{wH)RReeTR6Q)+b|o1r5i9!f;c(E$UZhXAx6r`Y{=gbkSTFj!X&3Ag~Q45@u6Cy zrq;CTVc9_zy$CsTqyee-t>vas%J7z+ptVlDNMBBtkS2I1dm5$}yJUvXOf1}W9CoHo z4(W(A6(E*|3=$Hy@%zioGUL!iGK3=bfTfMOUI@S)(TFm2?0CYMhMt(>tM8# zYD`KIIbIWw?kLE*M?o^DFM1muVE%k3lkIwkA}u1h1{cD<&~` zhDO;aJRMSJV(?5v-B^X-?PWsnn!eA6BbDyyZXRruCsC!h@DaxwfxL%M2NsmL>8p(^ z9nyCF!`V52Oz7a2NvzU|c;Xy9bP641ai~NYtst4S7bhn@QD2OAZthoS$&JaQENvR0 z1N|NNzoa|xinxjXQszp+vMD34PN}O{02v<6G$>0y>!p;9Hik;x;5t1nEt} z9RMkXE5WbQl)H*N??xV#WkyxW(~%S}MPDhJW5(%9z?COEpvMaN$JVwxgoS6x<}KZj zWf~gv@@Pr!OZMi}x!y~8`e_fmKEH~kNoY{Do(LhPVhmbjNt2Tdo@N-#uu3soTFiRf z35D5gqFwe(;7nhOy7|(Q$McuY^!HpLY2lcTrv^323Z%Pr%s33&_9}Nq3j@{Ug`z^*(&C0G=65&V;3|w&41} z{#B(8O+eTt*iFpg;N!>MPl%g12@_Qb-dw)~)0N2{BVIFPyew=IxA-(596XgF8+9$k z;e@tOa{K$DRp~V)l!KpNzrK2R`OEVc@2-Bie*OK25A7idI06&oF;oZho2Np_Iot!5 z6>o0=(Nb|k@Z(1?M?47+w+Q$rBS8rH;`{ywn}x`+Whm&|;qNgzANv9t4jeCX5$I<| zFWrq<@t~NnhE#h++ zV~%{u0?LhV$7=x50sZ4+e_wi{oCN{>Uopd6#nBE1-)M9u-()#et|X;1LOa1zBDus+ z4uRb-fyoL$Z`Ol)WA~F$&*+kLJaQ|2a>0QQjqe!D&|TpiX&R(hKYFTD9c=~ zR!ih>G(NS5_Rt)A)TsFsYr7KPIoPSqN6}&gox!p zXf|9_Fi6jAvn&YX6L5ETXPfA1ha@|ZlHjRr8NqQZ{Xl@2CL0jhqGjeXjowWhpIQ2C z#!ZoJcLE0574mw4nOIY*ldVkpE0g>gcm`g-yaCuaz4ZD!R$s~Z0Kn5*tcnGyUK;x>Do(t@9Ree|`L+V%gt1@b~fna#t8Tm@0T{vQpGPRF_R|LJ%wBAQqynIGTCo=v`Q&uu9@ zu^fd-z_;P<@bAJj8wWft{~#miUoFRRlN`T3B-+r zQRelE9JQnt#(r!t=k5lud%Yi4g0f2$UZ@BmgX9?YLvIy_{b%yzrh=Meu3SMvKlV(K(F*)!Y5wp4z8%m0x{Z8l78Taud%^4m;$eV+t6?4_v_>MI3mfjr-4i-9x* zCUwp{WhvN-2dcheIt1pWZo*DE=JiZ<7T?Av-W8Ma#h1Q)_)t>iPw=mZ@R$?zf~p9` zhXI@;*P5;-8ON8oPNkHJCV;0U9rK;FN-41sLDg!9{nXLmqEbSxv@F5oOR&r4RHAwf~m>xO!Mx^?!fHc_`d2;6C zk+QS|mhy&4n_LFghr~5;Dmsk?X3CkQeN$_yotC3fTdQtxQc5h=No_`Kzh%?wj9`Mn z2Cyp!_F5jEFuMnogcQrUCB#2b5{1&BxXMQomCO!>Xd~Nz$czX>{u0r1;)h~3>dSjI zkF23sVZjhiv4=TE%>H|F`=Iq(S+Rw6~nyc9F>0_%0O2N?%?gQA47*?A26HJtwnJ?v&^CAV)21RL7p zQ+p^#_Eb!WR}{$!QN_)dZmP+6D4WCieqt8QAof82JafCAjxv>>%T6Zg5>=ysP_V%B zg|Mp|Xqj|V#s0k0y!y_BX_d;UiWss>!-Yx=i$QA+*Z1Lbdv3N;DItZpFseO!)v6|0OGO6*(DiBcxqsXIB@R_RRhnGSD{z0KISlM}5`e9yk*} zO@0l1Q~2#O21fR1+yj7fPajf0o5{=L1I!l4oq%CB7M^F4_Gd%;wlTVzq@^)=Rjimj zENn=eKaBeUaPptf_Z|Q_8OUcKcG#wf14?q{tjQtI5=d)vi3|sgV{A8|WB&MUf=kKC z-15-iwPEK7n8Uho)HKGZc-pf#!iCyx1~AF|w1%F&t{<~T)c zKNBNg=|L5;^WlS?q*3|!aUw>8!?}E;P6632o+Ftc?c+!A-#>iNzdwEiDSu}_*Ov?> zGGsQ1Rn{AJ<%3QVvt)srT-8ajiX>Jks27CEBLVB4v`BQ!!Gmhs`OpNLEu8|Eu{l~u zlwJ5#Uo$Lf)by*?_*F&c*G@w(&v~6LnY7p0lFocx`mQGVHBgQf4k%U!;uWmu(bLCfvI#H!bS)$g7g;?`0GVs%_BZ0a?;ELiU_Rt)?U=H-4 zx|hKiI5Vc=MO3hyO6RJhLO07p^KS zhF!bcj=Fm$AJw60`eIcPjHolEq{xa7L z${!&m|3nU#<7(-oEb>p6+y!@#x@T7xyPKFc*Z(a7zjbNS|D)rA{vVxo@gF-Wk30Ua zBJjJ@@1HvITc_(TiSJelt1*sSZ_aiG_9HJ3)Bg>D-?}u$e;4+Dj*rLP`JbJX$FKjZ z1HZdj;B&=&cLCpDOW9oiYoDt=&|fzNH0b}~>B(V!|9^bk<$vEv`HcEM*77;Y?-czf zQ1n%rZe96Lj(3`8%|3tz`5zCDkMj24=%`!&@1(4=X_OY^J#+u3yg5>(1IA2k0UiJ+ z&XN)5IZk-hWWuPw!ARcuh5`o-RJLqO7$kxG_m6=-;XCzE`eZ=rW}3Z_BH1*<9I=er zsXNr1Sf0Pee6G8Ho<7E$(NUj4njfGTjU+46OM%fsjcirdWcJP=6nj0>=fm@k1NFF& zbUYQNV#Xm?CCN&jeir{!cL^4&onOB=d;KEw=*`vh^rM0YLw%*Yh?E~Avk^W4KSHc7 z^k&#aFXV-UYyvJxpk{!Wme**B(Oten+0Hl~MgdpMGs*5&Cm6LcRny*5h zJmtxFnXQ&VLZ8k={;Esu{NFysOKoYI|Bp}e^Z#&k*v0?uq?FkIa`~@Qp2-`6GB4!S zh5Oq~=JN3QbG^xMW7oH1E+1#{BNVtH(Rc0Y$&a`w`K50vZFHS`m?ge*c@a&oeq|8#%J_xc?$eJCFYqQK3sYEEUe#MPX1qAw4n+BPPpmlMK>0P=TvG(qUy0xcidm}Sm zI%iFG_$r!Ca?>Q(6zruw*fRmwM}MtCHEY7O#kCf>YPz5$3d%V*a|$ns%G_1w2kNe- zGR)#(Devop8kU#dY()A;W>2-9|J&{Kl$WOcpW!g?{~H}0clm#IQa;c8U$&>Sdoy77 zId9h9_^7%Ett_proxWhJ+xh59n^Ld;lY0na+t-4wtM8p^I~g?X{~V9<>%YU(lao&W z@1oTCe_eguSM|=#c^u}rQ-^4R<@%mbCt{rVf{QK9#yyF5(@}?*7zwiDuRxKmFaPo` zxV1Duqy2Z1&;NCF)ZPEJo1*2P(9-)&ve*9A-G2BLstDQptdgj?1~N}8b@xZLM#+Ew zC&+4*_|D%72aq9r#zL=#zu$<^q5_den6$PlTPy49f9|wB_hm|x{U_dK_5b*&i~rwE z$tg7MgpXO3SF2yal`x;$p?zA1eDxfT83*;6vk7;?l!Y03;AsvG?u7dpvh32XTwnep zcPH}St^amXauOA}U#R>Gu6%&}=c{i~{v)?R{v-EGlK%~M0Gif+M~BCG|NrT*i~rq8 z`Qij!F=8Q$Q+rV_0(gk9SB6_PE*MiTCDnt(BUSCUA3Vg*m0QbgW7#h;R|{o^_$R1~Iq#>I{#NJByc@q=q$vQ@s8-Y4IeLnX&aSQMAN@g?`42 z6S{hdeath-p~^mJBBqKVy5SUZG}k%Ovd@|p7FhswRF(Nft`T%sI=yC9YOCC5&Dl0< zyw(p+i1}#~U)FI+`vjOdac&^U%Zxy$w@JI{2JAd##E23-?I zRQMY6xz^=vt>zF9&ycfnyc{*+#|=s@sh+UuXq<1l(5130vIBK3wwZM*$F%yc_{>eD zY0#FHyO-V4QG4HSusY5)TCfLeJ6@wUqN#`3LWdyM^+kV~?n6~@VkP`5$QSJ}R&C13 zACthsZbedC^ToyUvik9;3UyuqL4PAPei@9|@wv&U)miWrgTtM$3=QsthV<-VIF)Qq zt;ar*TZ&=iej*GbcU=rC&&iaaEu->NX`VlP09w|{@NRq1rVX|gfgM7-Q+`QYbP;ew z1L$3Zr7k$a_@z3-P}VO6#tMjwEUW=pScY#3QddYrY2y!3Fx98XOK>hG7x;eCI=e5S zyd6x-L6qYeb|6RJ4ZK*K16U#Ciwb1( zVB4XpP0CxeYOp8KJ*V)D&I|fpKpA`s&Kw7Y9NeN6leQd*xH2S}u=u5QCv%?ys1qK5 zZwDoi@C=}Tfu}jc^H1QRU>}Uj=p)S3+u}&e9*g#Bt9+jrxn=dsC{}n_nTcBzCzp~n zTU<)Q;!~5hHCv$TmVJ8p;`O^9E-s(HyLoZ_>qRcTd}G3IWr=3eduQ6L%wt;zuBsMT zFI#@Dvs+G-U)%JK6ZL~yrKWWME9?upIz@g~sK0#v?&|u*%U{l(m*G7DDShkVsrbLI zA=->=u5qPWco8g1)k5`RB&k-l7^%R-!%Gw(#;z!tqAa}^oQFRlo*x9AuR-hB{7XW4 z6*Fg=DxE3;V9o*w>i_)u^;PB-4lu{idyYKVWm|6Le1C8LZt}0ct;GCZR-=MPR$^>3^Zi=3R_s8#Dz(oOFJ6h=lXAaZ8D1>6aAXVuCl2u@ z;Pv@c?wwZo-&8E31njC+2>|8`y;0|)`+7iN#w*DxtlL{=H46pADRyesRf@*? zvql2fv`{YPRh0j93q{qT^j&6acBtLzaf7bai$$qxCocgxhfH~&%B5vDGEm<3dYoKO zr&0$~flgVs&}MXc^Zc>W$vE@Nx0qCL)NAd3wd30+D0rj8M&lh>oy;J|K8T_*?Zu9dh6Yr zSEHe3Gr|AhMKckh=e>ocHq`?$Xi3ziPRL>rCmFoI4tLCcprd3m^ z?VQ>jXHS*Z@&73F$jV3cH$9Y>ruhGp!~Fekc{JTB?5I5YN0O(Et z)DKfCUZJdO5hu)n9uDT&DUzbl!w#(R?92D(!g?wYVu$Arm^pFKYn&ImQwQCtgYMKp zck19XoH|&CfxANorSj1oI_M4^Z2!=~H%~JmVf)=-gKV3R>#)HZqxH!T8LWZJjt>`X zj@f5BRM5}k-0uz*bcYJMLj~QTg6>d3ZgZqNRFE5)?odH@sGtFpO%D}ppff>t=s=b3 z(81$ixb9FwcPODdl+YbY=nf@h9(IQkwz1Uf4kbLU$i(hYLU$;kJCx8JO6U$HG+@#l zO6U$HbcYg}bZB=dAt~LVgnyqy32o#57a`o}?)Sem-~TcmAD!mn|A)t;?*6Bpl=`rN zvY6?o+deB$yi7jT>|i;bA`Zv;IKa<@;y;8{=v@)_On-*xBfeX-Iyxtgp%-0v4e1>5 zc#o{?K!kW+I24hYR5l0@l2P6D@$YFSNPVvB!~3=BC!g#3%8fVa)kiU=VQ@*R^Q$*+ zIQH}<{a1)ODBy9ZqXqaXL=$kW*IANIX3=Hl6dh3+M#g%QfRhbxn;Crwf39G;y61*br^O!k*kj(jITLh2kRGp85vn9ly82OsBVevPe=KhjO zYyD4&kN5&bj7A<}Ypn&F^#Acm{{G*?@#whI|GOyl>%aUqe%(HN%plj1mjuV>^|wy~ z%n20}ghMDI0+yc~_rA(N$tdy7)oOE?LJs*{+4y9oR8e|!nuT! zqXaEaBWsXv0tRR3COMS+pd-mv`WSMJ=R}@ES0@eJi8^A^t2?-$BRKZJnfL^VNm}os zNrw@$d%0aT3p|Oplb*;Ydlq~yu(!B^%ftDDG55PC@X>RvXu%Mz=$lF(Ei-shg?nZ{$N2IG8Z z*FrlNjk;aDn=S<}4M^Qf1)wHPw+uElIJNk^6^0Farh6d%!0ebdqFoB6)ITw;_5unpOTj^o+l5IlVnPrfot@y z2vY?30wpn^fD`d$d;pLV!V~>0b*r*MC*aAei1BLy^^yeFgh)NfqA>K3j{*+8pJGF| zSVienNv+stvE0P{T4a%{EEXnxD-V@*>sbs}^Z?toDg?YP!@w)wbQpwV?Fm`cP|y zHwL=BUZ5pv*H|bAyR}P5FWC}Y6r8sihikFA`Ce(^-*Z@N7*B-y)AgjQ2_yF8_f#K1 zT;4v37<%%138N`oT1^JOe*wPQPf^yf|E4$)M*P}lVAKBJaG3Z1j>m_?&i>m)*}nbv zgW{4e$P6SN(t(#oHPv*sXM!-Lt-E$(ESmlkIb55a3ervHP)9bTbEAVGxed#*>(~FC zbX-@eOI!V~`7O@+w$3EbwEjOnIVr?{jJx=cos|0c50*>SS@TKE80&nSO8A|dY~m?J zE62=qlFk#}TQm1Z9S%vHgYgJgX7PQL;T)!K)i)9})~Qbwzi|vstT*C)Ruz5TFJImn zq{K(ymBc6W4kj#52w*v)3)*PP6 z^to?pRUe_re*cm{_Xp^~z(KUXzn_d*ut+`>f7EI2o*Eb4MD89=hz_96wS zJoSe5&ApvFRL25Ln20aq2(fvOI}d_2Zl9{+WWwu-u+%6KzTGFjWLKK3PN z!mYs*ju$bG=*5*nJO0_W#Z3LRf`j78SX+7*S1fBdURC*}dZ8*SVx7sCZ&Lp^o7OUI=A}We zyFe+G>=p}^@_MBUmIoM=`BsckxRym(1*FuZ{3@)-b?rYLdbmydZ+vo?kN-Iw9(MUZ zc2YKpN8O4gSOmWXo3I_w$73CCZjvQQjoKJx)pTh#&#LRLYpAt^v|+UL7L2!cKsT{D zsv&P~(5(Y=i%4$G$lIW~I>{|Dq@+8@%I0KN=G4*AVTpB=eEC((?QP1M^Z!O6zzy^N z$?-5h|BsG_-TmJ?DQ!K|TblK^Fvmac8U0J#oNHx5v^N+wH4ip72DUKuZ)>7|__Y4O zN&J&e*`LH8vu!`jI{uettQe9u%2_6TaqeLgCM7V|!Xb4l&Sl0UQSzb*mkL zsY_#OFNnEAZ@!)zpke(# zJR0Tme~(X3y7M19DaG_B*^>oThe7hIe{*=@5uEy1`^dysB+hG>HRWHY@7t>UAC61* z|At-s*G@`({QnyHA2ox#(hRa;{>PgPl%3^&%pt#N{>M5nkJ2Yuf%wDnKQ;kt&i@FK zBhqF=CoF`o03ajtm(W{tSeQd$h%R*HoB`Y(%z^~QoJ=g>)Wg(ecqQiST1~0P*GTdM z4(6!_3$NCTtg3FI{_?8LMJjtKTH*0#=og_E9FZ!uSw*9=dos^dIs0DMzQ0mv{dT#7zM=5IYfaD zqp0(T)&Xf_WEM9buK{hiHCbn0nZvvuO|yU4Ot#Os6O?9ne{iNL;xM z#W|ur4kX$?QRtwntSUMgem2+VTK>NcFM(~?{~ZlSdH>(hNf-aIo1!HoeJN|5U&&m| z`i0!f`tRc*th}W)*P-sF)~=+>disB}O8}en|KV_)KmT`l(&hi$Nhyi{sZRL5<}T4E zzalWD|8_o=WbO+ry-Cm9Pn5khDRn)PmA&UkP9@1bNa`k|_;w|^pChl7a-UNY=IBsY?UR#R{*Wx|+*e7ASwj_XPHmi%;aAsp|o| zX=__$E&adYB~Xp_|4H8dA02o3|94YL^ndyefUVC7kMa_!^_K}1^HAxHQc5>|k+-6l zj{V*#+^@8(t^YT;1g1&;r&cYk-$ z%$Hu)(*GMKfojnIRbr@bIvc|6PO8}L*|)(jfwuxAkTnHLAVd^+I6!vKetz@rh7*cg9htK03|ie;XZ*PCNhKPRbt041TZoHuL-Ur!pPsWH6Xx zzKEtWTT^0R4>BL`gA4UDmD2;v2nBzSD}aazlj8EHi_3oi$N&Ts3GIVGNXh+5u0uQm zT`<*tieB$E0TDw70J<*V3k2S0&3lc3?Rb!}36O}qPq`C?S+gP$jd6=N)$LwSnlsJ< zQVynsaK@A$6h^kq7W>A$ z0zM4j98tU1+uH+I6v=2WhB;!ro|Puya%7KA{whEF32|_ZLIN&=8R_-jzklzLfDsS% z1b9HjD?ztxcMJ+Cl=bK)0mu(G@buK|kOjlI3>z-iwLYSe97#tpiHp`+9+ZLwsK_sbUG1qpV=J1xgQ=;RCGR=;fr2e~Z*f7Q5GbcY_dk zn|b>C)4j~^eK18HxwGGyuhU&B@YTR24wFCRKkxK+gMBHGaW9Ay$tXXG#ydGAZvxzU zP@BmbKq=>Ll-0nf=@z_E`6^IlQnMvkb_>;15b`B;!RD*`Fizktk~HB-6T z^hppV7nnlt1I(W=5E7RGNX1}@$ykgkp@1SKW5~6^MF0qO5f#dOhVKDiKz;xiLJ|RV z?;y_;+Ii}cIS2{kV7dY?ao{qz0xx0U(iNCd;)8SIA_uz2Uro|ld;%^d@`5}yT~7$^ zBNs=$RBlcT(n5j+2vHo&)5=j2#)5%s{edX4Tn*d8ffdHEt+DgnXy}~4W8^;Dx7Db; z!gC?~!CYal^zT5y(`1)wi6Tiqen)^0aRBb%O0MU9Ss%%1R?2~b^S%twdr{$ijS_sX zL~PRQjco8Ep_*pJIbY23$dx=7kic>%=GcLr;RSY}Mh!BI7zaM$3*rJS5q_Hz&As?< zHNEW1YQ%Hn&WPc9n|xpLb%5>w36R0KfQlI3WDF!$;u;8KVK!2<#Z%nf)k1}@?ddrVqV zo(ITGfMp0UjzT77`fYne#qd&bn*>*)q0nP$Bx4?<0B(Nd15WndP&?pE*uhi7mU;^nmOmdH6ZDn}M3HUtyZLcTi z%kzkF;^RMz6)vY{@B^9=ij4J%gL%bNiUpq!qz(U0POE!P0?*;No<(Vti}PZ)Tsa=*y^w%?!HgVfLc136Vj zffk|+xngQf%50IeRU8Tpq%+L4v8ZDMkS)kvn~K`r-zz;5nw843PvS+J;wut(D=?MT zwA39SZdh~meA{FV*Gx61hP1;627M)b^nMTwaBP{hD1ozzyY zFVvihi|c3y1_0g5WC*FcIv41H5G01UR1fc~6F93mp3?}uw|l++B#{IwmUbza>7W`t z*=R7aHK$g!Wet@?=DfGTnJ42lI9?)ebs)w|=!l3Z`z_V>FCn6k6RMPm?6br?mlztz zT}Hv;jhN&TQoq;q&+1(H@BjCXamXVELO4eQ@we(QqP^Y~MN7E>uv8nGOwC)d_h12C z05fRF`u4Z)b0RtQh;w@Y?iSb)90dY$2gp>&B}c%GkfHdDcpkYEeN&UPv{Zr$=9u() zmaviE4(-v2JzC3YGnjg0I`E-br4G(zTOOI)zPmRZos9OCt+wP0JY8bSBj|yJu&>RO zuSm8JT*wwv0;vn`pjpMaF;?S4d$=vE#)m_VRcg2Zb!`2Oe>tCpu>L05EbP`18FJQ4d&_0Ay)E1MY{~c2+1@3a%~Q1MLrr zA*wGmZ@m}V7Q2e?)6#9Sp@{K^GJ%WGOcIEY7(!!dVF3<9#FZ8BHaqt_hEm%kwm^v@ zOADevL0%YTL_{l(R`Pul)bpZ^(+tjBGNLn~>qN&gZ0~7CMxdGeHIrjuY z>6^ohl2VhNm?V2?Ndd`wkV{LcLy*#q+0Y9YFc(oEMpXq4q>x4T&<{N%`d}y1-Pvz9 z;N>ZJ@$v^X-{0K41h121AibW>a#>&qv2kQo*bSGhflJMNlC=f@uB6zFw~xQGg+ZZu}U0nZ?pA<8Ig98 zlr!Wl3E6;CgbW|bT5OPowV!hLyTpe$xP(5+xb?*{?Y$eY_bP0t9m8gG#JYhXwr;s! z#hpp}6VNXg_@6!TQX&@A#)$c3qToOFt@nUNMmk9Wg$8qSP5@Uf$c!dv$Iz1v|E?_7PY3-X%+Cg#5@$RE4xtn|+*>7F=O%WaG|t7GryK+c0Z4$1!T;v^ zrTkv3;yprcg}D@kaIqIy8MS_To)qJkVQy~`&xe74`})I#4V!2XsxefCF0 zTbgy7bgFJ>Maz|g+ZVKaNGH$KT0=^g=%|!6=AZylOoT#Mz@^X!5U|k0+*)FECw;|g z7P*tIOd&>LH1)6p{t)`){POJ83qX-WsLLu8bgWKvH2MKXauLBmwvhO&5gRvR+R*gEMxpm9m)i${Y;s z@$p~POjWgB1|e~!Gle0!+iz|gQa&iD)kN3S-bt*Z-Y=ntUFkKwQ#fUo_V3esl`{mn zw4w+IDO$?CDH!j=Yu7xIW9qtU-cf2$?iSm#^HJhCPuwq(o-k$sZ|7Dx1^Dso;^m9yaS%J`|KFmdh};)L zg;fVy^lSoV7@8vTyuZJ{kMBveiI4fYK|bX<)cQt#{9f+cEl@mlQ>1^NA$mWu$0zpieb$er{FvAeg~}itgp@3?9Bde@l5=W`fCu3H!zlgUSW&vCA0~aEn1PpOY?RO|eXQehC)@h?D{&!rw z|8+F(?tk1#$=Xo*J|8*yH!|%OH1n{ThLtfDn~^5TS=KKk?Y~%e^m;%q@zm>xAxnn) zr5z{AhWgy(__r(VZHT?;v%NYh4MGKn@M_roa|6!KDDoye~Iy%bB|G2yVV>jg!%71MBf9^hZrMfi9 zKX*baaziUl*dFJ0Y_AcT*Z+C{-_ddB|JzO3(fl7T_KSQJr{(FT7X5Xfbq;_nmbUW$ zN!R~{{GX%Yu=D@zr0mB3-%0FaEp6ogA+rCMHu7Is{}0EbF8|+7$|I5gPVzrfsgQpJ zwfviuiJx-*9~~9_|Hobaznzqw$p7c=WLGLnll<$`l%LlAFW&z+Jn7c|J1N_ef6XVk zx7xSgDThZ}n&dxZJFx#p`St&BJnrWIos`|!|Ap(L#CKUO@wo}{Z(5qgI${2v~6 z^1qXkmH(Ef^g2E8)s&X<|DZ77CiySM|Bp{c-Tm)7DY^N73(>yaVZSe_G|K-L0stE4 z|HAqI(YV|H-%Z&AFVT#HsRx5wnS};19w(SlV_us9vYXPs?9wRzTQ~sRB>zI#^YVYx z$^TBuX7hh{+V^WIjq<;R1Hg^;|2V(@Kkn}T-BH<^{okGT{US@F{BI!uu+jcM$?yM< z#;2Y9@1$%d|6R1-*HRkge+vl!8s&dni2pr0?)-l{Dckq|eeNE0Wu4L}|62$EUf=&W zJUZ;o|L&x$;oIxN_`br@DF0hX0MfYrFP#57IvscMe>*9kWc}Y|`}kr?Yx)0_{{L}t z|8LmY|2rw4K>j<4{ens>`Tx}ReaRp0Bn^1V*dY= zZvDTTvfljPo%Z|MN^AN5l>2{0|Nr=;lmDHR?aTk??on5|(v_}srA_(&0{{U3|BgkY IS^$Uw0M)fXSpWb4 diff --git a/assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.801.tgz b/assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.801.tgz deleted file mode 100755 index 1b913828438a33e7ae62bfe03b47df89a74808e1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4315 zcmV<15G3y(iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH+#bK5qy{mfr+Cg~m9b1jOpWvAtAcCO`;zyP3ZZPx$&&H+e@q$E3Ove~;kMrUFPI4>NW7apo!;L%`FPH|9Zb+a0tXH460 z#d7h@7i&=zMW^Gj`x`}3|8I1BI{s!fjz*`^Y4l`t{7p0(kH*nA5PdljcDK@4roV|k zn^%8w|0azU{1?=QD;dLb6tJRbeGNz7hu;M$nnd%$y3Pw}EE58LET;$)GMO+~;ObiG z)WpF6)}|EXj7!V7MD0c}XI#XPkb#;1(M%>8>M&7x03cb8F{+Ql-aQwiq6Vw{Cr}cC$FOhEQr}(_y8reZ7KNOP$#M)_TGTU^ zpaCXR*o)&1z-bHMAeeHQo_XFeRP;!!iy2s5A|wKtEDPX;Wm8d+Nv5ug<@zYfv-W(o_XW(;xz@;FYbADsZOJWSEBVsx+2lpKD|!pv{1e4k{PUmQT)lYr>)FLyFqT=Qc#srtJ>t1g27hJT#xTkvS`DIn zkqYdPMnj&+X?-`?wAJo~?QnRpMvdWU?3o0Q;gupUQcxQACq;{i1=6Lc-6rszKT#in zd%HXXQlXl`0#{F6UW9^iZOG0n_^N1%FhgcbjWA~xO%;oZj1BRoK+SWMmWdk3#6k7x z2ZCOQlU9(t<~D;uY1@R$44JSQBpD`)Em3nz(2PgX(aGsOq4T#m{(TEzCxnJ@g%&U` z?8+&R8w!+R;6?iTvn>JElcb4dD2gVhR5^nB{IoA-Ae9_A+J`<4xh0}_XRPLO4$J^g zQCdFZiH`|Y0HRfKks+}q6Obumsa_0&`qP^wYR&yh*3sQN{lyGQL&RMoqJ?XszmxC~ zuE^d=8J(cI_(%b0V9ISh$=P+z^a4!5 z5@*5gTfveAS_+5nOYW5A#3tie2S)lFwwPD($eF>C z36djgVl)wJ1omk%VUoBtk(t&FWm+gMt+zGf6=$gWCpq>poQJS?V)hPUZO) zzE{VecJN*cR>kY>g7t)Ax-HQi40omx0MnA~aEAst~PpaX=UE zs-$WajGWBW$D~xH^j<-!%447D;uDN!`Oh?%T8MUvzqz+o2&ZJD6i3UNC2#(U8H&cqC zYL5G}QX5+*{OgQUg~7yvX6jNaB_l17l66y920))3Ce^c!&bgA-Rq15MYUUy>8f(_I zdyO4>^a|nasz9%Y|D9ZYx*YEd6T^wapHxLOLHe53RnH!sl{QnF|GmzOGo^F?dEYv- zcNxl+j1IuKYG%5E*62*Ixzq2OLkzFhy8hW$RfeYswgO+C zaoL*5LEvZkkwsRH6g(x^# z9~j!c3|RU{wcy?2k`hgV#)Qp8cGf|IQ4~doK>z+8D3xCk;j{FQ7zW>e|9$(60A*8~ z&A6cXP$hVc)AcHnjSN$qmh)jjV30AH3W^I2QXB+ML?0M_b&JzVi(Vr!?3D(!EA;kY zUx^g;0VoZ7b#1c;`!i?Mb@$Tjg-uyG6-+J;t122{ipCDul5ycut7o^Llv<;-V$~E( z2WtwQ;?5(uz#-h(K!4|%sp~_Sme!$}m9d!b8Bc3LE-OgLhFok}=Prli`M0{Nk@ z`I)$8D+4nogn=n-eyj6?Mkih(SAx(cEqDx<^#Kgk{m!2a>`!sdB^mVqWlDb!f$D8i z0Xe+V&_MjRn#~MlJua!H=Sw^wKX4no$_o-DO_^g#!A0rRhlce57MXUnPjD`DF@yb5 zdcWhOvgnXFMuX^pCL=2$>th$qWU6wpf+SN0CC!Zh+`JmgFElQ>DjO9#v-b|ZQYBn7 zpN|ZcH5pG+lyx+7BE6wKiEY!#xm3EL#*On!F6XmS9D=eL>L!ekoK&n%oxiGUOv6el zpKu8KFFE@|>A!}5adiKB2oxw{*c*kT(>)&;ic%NKppWU?w2*VQa3@=ac0x3rtUi`d z1*xKx+=oV~EGe6NE0uU-L8CxskS0wrH%&R^IwtDr9};2B+=N__HbobZN?mAfF!klB z&2Z(6vOt|P>77Gku9kk){c)J*H6}dKGzb83M}c1J}QCM zg;-+)3oIwU@X`u3y>iOyNBHMf3=>UiHhxT|c(xi76X)hz3yREK@;5Zn-dbt1j8tun*^^`Fx*U=xcF(D|_Ke*n1c-!s&?=lnxY7^SxGp~a0@&D22 z=JC18UeQMj((m#T0F}QQe|7vr8XuY;O{`=MZ5-)-7iT@|Z zqyGD!)8p~+L;U|5Z7>)Fy*GdqK50Kz836r>;U9wlz#VfppzG*TK`y>CAy>oY$X$Ss z975*_&W8_80BQBZKj3|-EW+)rL)kdq5B%vqOA;(>>y*7qaU&#c!uL4=O#PAnU1S`{ z8Y5|)^52Qq0^H==K5+l=!5=@RJJUe8Nx)qb+{ONVVc_-_Dvh?D>vWKs{uYM{XX6n~ z8?iUoEM5D74Yl@*moC1C3)cYaxw-cz+o8E!gW05LUX?V58(Yz~KyGT(CPnKB$-M+O zVf!RE9+k96^~UED{j9((eRbh~ z@%+;C@RufRk8bL!KjY<}n(c0^jhQxg_!|_~?*KNWtZwNqCtI-Up)FW3@j&H8M24c0#I?X%ei-(|DlGL7BEt9#*p-ag%R z0NZXSTWmIm`|Fwa1EMb^@-4AYR#%kk8$pnz5N@naWSHuT?hLHe-F>cOCJ! z$=m_gCt0S&MJjOH>a-T7htzpA82AMnRCk&1?E+Va@U6cQjh{B;+DM(1CIq;>b-XLI zYPjC2RbQ{U^cQ(eaB{il`PF3%GbRjH-4gA!(u>#bCmW9*)gJ49tMLmjT%WfoZ%VIN z5#(rDU1K3vFLBoG?QZUuqu!uHnlPKiPGupbWeWHWnE$yD<&=71?l@5n;r6y&mk@5a z{+$tJr;TCD=Ib<8@tc%FOqAfsDu#>MYh@=I4NBWUZL!8GJ7H34ZddMn;KsIfQE0y8 z0_T{dmApvo7WErrMndl`J+$SA&DjV3!8XIagHjn+r4z^nyd}#JI{vpv>l8{2T7)Tf3^>!<=sBmt5c*eeNFa^f+ztsylgY zcd;GX==xk)q|D-#)y(32Ft0iZ^WhL-XWQ-n zRn5V?3O^?j+~@zJ2yS}#gC9M#hxX7O+P{1IzW@LL|Np+V JW?2AE003}@e<1(> diff --git a/assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022301.tgz b/assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022301.tgz deleted file mode 100755 index 4881b140e09c92286b211178cb4923a8d9b648cc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5378 zcmV+d75(ZTiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH<$bK5wQ^O?V*SKh5WE~FlQP3bCE=XJ8Xu9+mNIFqTZ)YKRv zTN2hFzyUzp8r%2VuK@5(z3e#7W>)xMi)5qGXaJ2yqtTG(IPhcCl7`AxC@)ZUk7q>c zU`}H8-GiiFuh%=++q3_By>-G10{oZ%bdzj)^r&MVo zzw3Q=S83Yk|x@|dgD3@oZ5~8Q~`mIrAnjp zFKL88rc;SiqJ;!80f|baGDJufnIc3)lfava7>6*^I#I)JcS`jv9S5O^yXWZ`B}a|Q zr#wu@21~czHNA73UkVwiq31*6Q%oq=gmRSjE5?MfA&B6T>e>IQL`W@3M5HOo1K`9^ zAsEI)o~NqJa2^n%2w#@t963ilvoM}}wO|N&uHKfAd)D+KZ-zn8?+5)}&mX6hMT6d; z-x~~edwuUg+JDjbzaT6{^>6}U&G_FR?Dd-Cf3Vlzj{nCr z<$76B#EO&>Zj-`x;KK~X^v9@4Kgh;daQ?jtsDPu<31qBk z*VU42(fw1Tpyvq<31f4(B&Jp~ssJ6A|77Yl7b!$knHM#*78w=X0?fz-f)*BOOt=xp zRD(#hGGg2Ek17bw%nuVG0fQ;bgrvU#|E^NF2v& zZRO2`(r-~4@C9tDi=PTR%+NF~K{-?~KrAAxv*^<1koa3lCDu4(@SFOl3V|?F>EZ&@zC?U1DOn(YR9H*Ry28peAo%R5Z3 zLjamghj3x%^7;&7hHJ{$NH7}Chj21^EA&XBLaws{M=4{c7)sP?2%moOcEAUTQ>wI_ zyZ3KNjGjm2R1LkIjOwh;ld{_>WD{S}DaQyxl$uT~1Jw&qcz;!32fzp4H=qo)Pr1S{ zmFR1xy!xj^;sm9{$R|-GQ7K=~6I3rxxa|+@WaN2;m>a_Nt>sh+#f5)(#4qK44-JiOA99E(C7vG9qAek5aLSqX`VjyM-OM|pi~YGfqZ zL@*{UO`jws08CRN3D=0A!Wl*>%R6l-sW$QfNEG>k+fE!9TmYwt5DSSwbjjwDw*eL1 zNQA1Zi8@zZ62+8LtIK6NUAHJS^2<{FyhOuJ!F^L;*A$q6pbQ90W+bSWROCuTG=wKl zJa1oXv9?Rw-q50q$Yw9`Z*=zbVTn3C+5&$LIxeRB9x} z*p6@0NPkB=O#&p2$Qcr*XTQ5wLgr0rpAE|m;(vVjFfxL}^hiqBD`dnBQNbfMgh9{r zPZH6`DawvR({6%mys`xl)1ya{UQmWpd~IeC;#L$+zyX9LA!Ev@rbcTvYH7c(nZDc7FGE+I|K+;wJ}J=CmNDp5WWXAWIsjAQ^gMbI%scNT%V zj@NlpE7d@{cH7%C0^Z2EAsjobM$9E5dPS8??RxI7X*9)5C0i>UWXgS#SRrTfc0<7@ zbZP`{5{P&%EXv3^U1a4E1Zy&>G#t zv@|VR|K(b)>b`yV;q_^tuk>e?vDW^xci33}9qjjax9h*hNJims&7Ui~GooOO$RWan zi8)3Ac-PlPp$k4b!$7&_39@z6?X7phfxrYW&9F~3Dy#mLs(h|MGpckhDyu83T<7tO zhD~@h^gJi=jNF`}2H%(D5;|Qodv@!KscyE$>KeU3bqux0l%>ja;Gtb2SQ(uyldJ?^ zyjC(ARc$Q;C5Mz}vRqQe>{@d1;s;XBP`TEwKzXmP62jfksTy6f-kQPr5aEP!gid2+Y_fE0U(SHcR1bmM zYcSezAzlpRy9<=k42&D2w($@ScI{6ZpQe)uy@HNkBFhJfBlFmaBvOd4(#BG|j>EZ?s$@(> zX6GLZ#^x|f0XE{i5wrQjpKUFMFE?-83Y`T*9G2jHCVp+^g4WvGI}?~l%Jl>~e^LHl zRHuQ>m3x=&w(NJU8&=xa44%xYZ`#(jan@lRlwDA)Z)XeKSz4 z(D|)5=xl^!@yIE; z;nsw|W;lP%4UY?X;8v2s!e!)wfzs{VEoV)NbXH4(uRWpPF|F4B^6gxV3K)qbDlLS* z6b#qx6t2<#`h#9W{~PS@?(J{&zsE?8fh5Nytcs9x%A>-7^j)asY-TO8 zWGb7@RM!?VfEh}v$+Q8HEshH;OSx23(bvMipm4Irs zl?1M+qHudV%w~*wH~bdo0i!0p$BEry%EapmhI#<)iMKI}Hh7}a@xL(CnFx2e>+Ud? z(Df~_Xw^|L!emzo2^X-!zHTb_U%Pj2)4H@;{@+E*U!K;<|K0tD|L>r;d$5)NkCMKa z{J)?6Uzh*iq@-V@NZYJFbUF?7*>f%;zIECu9;_-|f9fhtbmd(xa8*U$F1}7t7r%|_ zT|8L*+XGnC=%wqXtLg&s@k%2%eL;CS)r^MoMRjsD${ax~7|m$S0qvYo0OzhDk$M1-HCu0`Q4&}tdalw`@QD=?{4pKEB_xQ)m8gMm9qR* z(b!u%&>HQ(Y6Xdj4rf1B>T>tmtR=2IY+Nyq{HMQgQtzY-pKR}0(e%j;jZrYC6in69`fWuM$}oj)TP=muF|t?-k^*%VLE^7vnJCj zL6zQe#DdSDla~5a*pptxOMtrD$_5Bc~_9H4^c6FSzzA`b@Lg@%)J7*RuA`w!* z{N5dvE>pX=6FF6NiVd22|6bZsH4}WEx9C;Qpx&avHEwzM-lbN!fO^4=9#(^H?-KL} zbt>Bt0#;(LgKvVqvFW`6a~^SMEz$*{23DeOz^^ahq8e4A)`G5unjNniT%!K1HC45u zR@6(bQeP#o*9NO~xLV{AZv`)^fNe%p#!?Q3iC_fL0P|7CM-c8+P|HQ<`{UvK}gdH!SfaB#R? z|2;<9_%PWjM}gd^bhmZl3gglrbjA2@XboFE{*&ll2LDD{>;Jj8--`dy+m8Q7N$mxC zGd@tUYHCUW=l$2%`5m#jCI%ZO{bOP)e?ZT5zT6N^7(1xg;u|)=^Lk z-jvZ$59qX(iUla@bUdgfts-o{_8|K=PHW`9O1`}O?;Q@B@xKrD_qOq$9wn`J1>Z#E zpJwZ7XWUt!2j0)YJC~3)d>}^0jTnJGJbC`a1DIrS|D426jVOAb@*{P`=N?pwUJwFs zaTbL?Ql4Hd`Cwrnf)ew5GCHn3*A0?cp4^1wgFUs| z&m`p45DxbDcMrV#E=E_6e<>LHW4Dw4<<9@~51Rhpz1^+<-{Yi*_5XO7gP->Ff2wEY zx&MkjWkaYsL-x0*PTtcmq~;J(oWm(Y$y$CP*I9JVoO4@8kZoF>R?B~lO1r;)<`()z z{Qp|SbG=@aai+OnFz$p19a4E||XC&hW$R$CFDN0`<3d z<#jUG*RVl6ug1=8)+o6R+vpk{E@#NWUKT?66cQ4ilPTIu zB|glkvI}6d9tAZ+#$d|C*ct06pT00sm}q)|_D+Gyd%`1c2RKe$pyj8D#0kB^$nC}c z=~)2pIGcmuHm4y85|qFw$H05{^V`?&PrcyP>940+Nc48#Sj4g5@bUN*A}W;^OsVeL zzpfTuF#cV3?ce;xY}z${@<(;SyCtGA3D48ShQL+c^FUoD-t%Bg&b{Y>juY?s|9Ly` zkw_|11t+gwD=$c-Fxs^j&I*6kODr3#~x0n6Z$N%2$ z?soh?O4@09ka?f#m;C+m^nUmAJiFKJfbc$lnMX?}5{po|p@g zYI)@9>sF3LNw{6s{4eg_slGvdd|s;;dHo;v+}vNQxNs3{wS15lJ*kr}-{qyP21}_Ot4anHYCtVz22@nv5BC zlLD7d9Ij`9y=CV(i$ia#+I|IyV9w zFI(YuOIj9$qcw<5+wpm`7~Dbg-neDdP*u=!$E)NFd1u~P>)@>+tP)RngKOlf(XcJx#C z4aTOng_x4~TWakgHYBwQOwQ}uGw-b8u4eN7NR>A7jT_#YWuXg+ok1Zy%`Sse)v(w9 z^RgctJS&K)I_DXjZw;<2wnTPQ9T27Hl$%Nvl;mD`MV5+No`c9w30R0U9%Ckov$F#? zaMnNb&RV{S2K>nGz06Hr5-Bc>KzBPF&g;=ta^}W$UXQA4-AWt_bJsjrT@z;O?7|h+ zpt0_!r-)FwxYRo<4infmDeh_-BakR^0mbcoe>rFBM+Ypim6HT3b2c6EP3BSMPBRG2 z7X#nv*fyxT&erQyd|O_z1u;MtP)njHXK6)1SbFQRN|v*ya%o9%tJM-zMrc<>sTJZ{ z0A3|RS@6BR-3q*(C${1r=7|lHp5xUbm*qEbR@__VT4$MKN<~tVMgAvM`L@nG!r;oz z!$aw+K)KR{hp7HLTOmc+?Vi%gc`g0 zC9Gnuf_$ufZ9R0GzD`=S{tIb0r2fO*0=>@uGw3(%KL>-sHvZ$Iq#Zauu@}^)oEsfg zaQOQVuV29BjD|BH%*;n~P&1mS)?K&O7-Vnp@OJE7SB!>Q85CybC^gzbg$kPYCFi#U gyDI~~HZQhmo3?42{*%)G2LJ&7|2o11{s4vm0Q?`EyZ`_I diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/Chart.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/Chart.yaml deleted file mode 100755 index 1005637..0000000 --- a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/Chart.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -appVersion: v3.13.3 -description: Install Canal Network Plugin. -home: https://www.projectcalico.org/ -keywords: -- canal -maintainers: -- email: charts@rancher.com - name: Rancher Labs -name: rke2-canal -sources: -- https://github.com/rancher/rke2-charts -version: v3.13.300-build2021022301 diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/NOTES.txt b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/NOTES.txt deleted file mode 100755 index 12a30ff..0000000 --- a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/NOTES.txt +++ /dev/null @@ -1,3 +0,0 @@ -Canal network plugin has been installed. - -NOTE: It may take few minutes until Canal image install CNI files and node become in ready state. diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/_helpers.tpl b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/_helpers.tpl deleted file mode 100755 index b647c75..0000000 --- a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/_helpers.tpl +++ /dev/null @@ -1,7 +0,0 @@ -{{- define "system_default_registry" -}} -{{- if .Values.global.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.systemDefaultRegistry -}} -{{- else -}} -{{- "" -}} -{{- end -}} -{{- end -}} diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/config.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/config.yaml deleted file mode 100755 index 37f28ef..0000000 --- a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/config.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- -# Source: calico/templates/calico-config.yaml -# This ConfigMap is used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: {{ .Release.Name }}-config - namespace: kube-system -data: - # Typha is disabled. - typha_service_name: {{ .Values.calico.typhaServiceName | quote }} - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: {{ .Values.flannel.iface | quote }} - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: {{ .Values.calico.masquerade | quote }} - - # Configure the MTU to use - veth_mtu: {{ .Values.calico.vethuMTU | quote }} - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": __CNI_MTU__, - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - }, - { - "type": "bandwidth", - "capabilities": {"bandwidth": true} - } - ] - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": {{ .Values.podCidr | quote }}, - "Backend": { - "Type": {{ .Values.flannel.backend | quote }} - } - } diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/crd.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/crd.yaml deleted file mode 100755 index 0351759..0000000 --- a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/crd.yaml +++ /dev/null @@ -1,197 +0,0 @@ ---- -# Source: calico/templates/kdd-crds.yaml - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgppeers.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPPeer - plural: bgppeers - singular: bgppeer - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: blockaffinities.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BlockAffinity - plural: blockaffinities - singular: blockaffinity - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamblocks.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMBlock - plural: ipamblocks - singular: ipamblock - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamconfigs.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMConfig - plural: ipamconfigs - singular: ipamconfig - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamhandles.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMHandle - plural: ipamhandles - singular: ipamhandle - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networksets.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkSet - plural: networksets - singular: networkset diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/daemonset.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/daemonset.yaml deleted file mode 100755 index 1431df8..0000000 --- a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/daemonset.yaml +++ /dev/null @@ -1,262 +0,0 @@ ---- -# Source: calico/templates/calico-node.yaml -# This manifest installs the canal container, as well -# as the CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: {{ .Release.Name | quote }} - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: canal - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - nodeSelector: - kubernetes.io/os: linux - hostNetwork: true - tolerations: - # Make sure canal gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: canal - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - priorityClassName: system-node-critical - initContainers: - # This container installs the CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{ template "system_default_registry" . }}{{ .Values.calico.cniImage.repository }}:{{ .Values.calico.cniImage.tag }} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-canal.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: {{ .Release.Name }}-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: {{ .Release.Name }}-config - key: veth_mtu - # Prevents the container from sleeping forever. - - name: SLEEP - value: "false" - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - securityContext: - privileged: true - # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes - # to communicate with Felix over the Policy Sync API. - - name: flexvol-driver - image: {{ template "system_default_registry" . }}{{ .Values.calico.flexvolImage.repository }}:{{ .Values.calico.flexvolImage.tag }} - command: ['/usr/local/bin/flexvol.sh', '-s', '/usr/local/bin/flexvol', '-i', 'flexvoldriver'] - volumeMounts: - - name: flexvol-driver-host - mountPath: /host/driver - securityContext: - privileged: true - containers: - # Runs canal container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - command: - - "start_runit" - image: {{ template "system_default_registry" . }}{{ .Values.calico.nodeImage.repository }}:{{ .Values.calico.nodeImage.tag }} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: {{ .Values.calico.datastoreType | quote }} - # Configure route aggregation based on pod CIDR. - - name: USE_POD_CIDR - value: {{ .Values.calico.usePodCIDR | quote }} - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: {{ .Values.calico.waitForDatastore | quote }} - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: {{ .Values.calico.networkingBackend | quote }} - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: {{ .Values.calico.clusterType | quote}} - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: {{ .Values.calico.felixIptablesRefreshInterval | quote}} - - name: FELIX_IPTABLESBACKEND - value: {{ .Values.calico.felixIptablesBackend | quote}} - # No IP address needed. - - name: IP - value: "" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within `--cluster-cidr`. - # - name: CALICO_IPV4POOL_CIDR - # value: "192.168.0.0/16" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: {{ .Values.calico.felixDefaultEndpointToHostAction | quote }} - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: {{ .Values.calico.felixIpv6Support | quote }} - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: {{ .Values.calico.felixLogSeverityScreen | quote }} - - name: FELIX_HEALTHENABLED - value: {{ .Values.calico.felixHealthEnabled | quote }} - # enable promentheus metrics - - name: FELIX_PROMETHEUSMETRICSENABLED - value: {{ .Values.calico.felixPrometheusMetricsEnabled | quote }} - - name: FELIX_XDPENABLED - value: {{ .Values.calico.felixXDPEnabled | quote }} - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - exec: - command: - - /bin/calico-node - - -felix-live - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - host: localhost - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - - name: policysync - mountPath: /var/run/nodeagent - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: {{ template "system_default_registry" . }}{{ .Values.flannel.image.repository }}:{{ .Values.flannel.image.tag }} - command: - - "/opt/bin/flanneld" - {{- range .Values.flannel.args }} - - {{ . | quote }} - {{- end }} - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: {{ .Release.Name }}-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: {{ .Release.Name }}-config - key: masquerade - volumeMounts: - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by canal. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - # Used by flannel. - - name: flannel-cfg - configMap: - name: {{ .Release.Name }}-config - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/rbac.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/rbac.yaml deleted file mode 100755 index cd39730..0000000 --- a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/rbac.yaml +++ /dev/null @@ -1,163 +0,0 @@ ---- -# Source: calico/templates/rbac.yaml - -# Include a clusterrole for the calico-node DaemonSet, -# and bind it to the calico-node serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-node -rules: - # The CNI plugin needs to get pods, nodes, and namespaces. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - # Pod CIDR auto-detection on kubeadm needs access to config maps. - - apiGroups: [""] - resources: - - configmaps - verbs: - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - ipamblocks - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - networksets - - clusterinformations - - hostendpoints - - blockaffinities - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only requried for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update - ---- -# Flannel ClusterRole -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -rules: - - apiGroups: [""] - resources: - - pods - verbs: - - get - - apiGroups: [""] - resources: - - nodes - verbs: - - list - - watch - - apiGroups: [""] - resources: - - nodes/status - verbs: - - patch ---- -# Bind the flannel ClusterRole to the canal ServiceAccount. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: canal-flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: canal-calico -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/serviceaccount.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/serviceaccount.yaml deleted file mode 100755 index 582d55b..0000000 --- a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/templates/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system diff --git a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/values.yaml b/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/values.yaml deleted file mode 100755 index 8730b96..0000000 --- a/charts/rke2-canal/rke2-canal/v3.13.300-build2021022301/values.yaml +++ /dev/null @@ -1,74 +0,0 @@ ---- - -# The IPv4 cidr pool to create on startup if none exists. Pod IPs will be -# chosen from this range. -podCidr: "10.42.0.0/16" - -flannel: - # kube-flannel image - image: - repository: rancher/hardened-flannel - tag: v0.13.0-rancher1-build20210223 - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - iface: "" - # kube-flannel command arguments - args: - - "--ip-masq" - - "--kube-subnet-mgr" - # Backend for kube-flannel. Backend should not be changed - # at runtime. - backend: "vxlan" - -calico: - # CNI installation image. - cniImage: - repository: rancher/hardened-calico - tag: v3.13.3-build20210223 - # Canal node image. - nodeImage: - repository: rancher/hardened-calico - tag: v3.13.3-build20210223 - # Flexvol Image. - flexvolImage: - repository: rancher/hardened-calico - tag: v3.13.3-build20210223 - # Datastore type for canal. It can be either kuberentes or etcd. - datastoreType: kubernetes - # Wait for datastore to initialize. - waitForDatastore: true - # Configure route aggregation based on pod CIDR. - usePodCIDR: true - # Disable BGP routing. - networkingBackend: none - # Cluster type to identify the deployment type. - clusterType: "k8s,canal" - # Disable file logging so `kubectl logs` works. - disableFileLogging: true - # Disable IPv6 on Kubernetes. - felixIpv6Support: false - # Period, in seconds, at which felix re-applies all iptables state - felixIptablesRefreshInterval: 60 - # iptables backend to use for felix, defaults to auto but can also be set to nft or legacy - felixIptablesBackend: auto - # Set Felix logging to "info". - felixLogSeverityScreen: info - # Enable felix healthcheck. - felixHealthEnabled: true - # Enable prometheus metrics - felixPrometheusMetricsEnabled: true - # Disable XDP Acceleration as we do not support it with our ubi7 base image - felixXDPEnabled: false - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: true - # Set Felix endpoint to host default action to ACCEPT. - felixDefaultEndpointToHostAction: ACCEPT - # Configure the MTU to use. - vethuMTU: 1450 - # Typha is disabled. - typhaServiceName: none - -global: - systemDefaultRegistry: "" diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/.helmignore b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/.helmignore deleted file mode 100755 index 7c04072..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/.helmignore +++ /dev/null @@ -1,22 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj -OWNERS diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/Chart.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/Chart.yaml deleted file mode 100755 index 369939b..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/Chart.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: v1 -appVersion: 1.6.9 -description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS - Services -home: https://coredns.io -icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png -keywords: -- coredns -- dns -- kubedns -maintainers: -- email: hello@acale.ph - name: Acaleph -- email: shashidhara.huawei@gmail.com - name: shashidharatd -- email: andor44@gmail.com - name: andor44 -- email: manuel@rueg.eu - name: mrueg -name: rke2-coredns -sources: -- https://github.com/coredns/coredns -version: 1.10.101-build2021022302 diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/README.md b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/README.md deleted file mode 100755 index 0d41d40..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# CoreDNS - -[CoreDNS](https://coredns.io/) is a DNS server that chains plugins and provides DNS Services - -# TL;DR; - -```console -$ helm install --name coredns --namespace=kube-system stable/coredns -``` - -## Introduction - -This chart bootstraps a [CoreDNS](https://github.com/coredns/coredns) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. This chart will provide DNS Services and can be deployed in multiple configuration to support various scenarios listed below: - - - CoreDNS as a cluster dns service and a drop-in replacement for Kube/SkyDNS. This is the default mode and CoreDNS is deployed as cluster-service in kube-system namespace. This mode is chosen by setting `isClusterService` to true. - - CoreDNS as an external dns service. In this mode CoreDNS is deployed as any kubernetes app in user specified namespace. The CoreDNS service can be exposed outside the cluster by using using either the NodePort or LoadBalancer type of service. This mode is chosen by setting `isClusterService` to false. - - CoreDNS as an external dns provider for kubernetes federation. This is a sub case of 'external dns service' which uses etcd plugin for CoreDNS backend. This deployment mode as a dependency on `etcd-operator` chart, which needs to be pre-installed. - -## Prerequisites - -- Kubernetes 1.10 or later - -## Installing the Chart - -The chart can be installed as follows: - -```console -$ helm install --name coredns --namespace=kube-system stable/coredns -``` - -The command deploys CoreDNS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists various ways to override default configuration during deployment. - -> **Tip**: List all releases using `helm list` - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -```console -$ helm delete coredns -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Configuration - -| Parameter | Description | Default | -|:----------------------------------------|:--------------------------------------------------------------------------------------|:------------------------------------------------------------| -| `image.repository` | The image repository to pull from | coredns/coredns | -| `image.tag` | The image tag to pull from | `v1.6.9` | -| `image.pullPolicy` | Image pull policy | IfNotPresent | -| `replicaCount` | Number of replicas | 1 | -| `resources.limits.cpu` | Container maximum CPU | `100m` | -| `resources.limits.memory` | Container maximum memory | `128Mi` | -| `resources.requests.cpu` | Container requested CPU | `100m` | -| `resources.requests.memory` | Container requested memory | `128Mi` | -| `serviceType` | Kubernetes Service type | `ClusterIP` | -| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | -| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | {} | -| `prometheus.monitor.namespace` | Selector to select which namespaces the Endpoints objects are discovered from. | `""` | -| `service.clusterIP` | IP address to assign to service | `""` | -| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` | -| `service.externalTrafficPolicy` | Enable client source IP preservation | `[]` | -| `service.annotations` | Annotations to add to service | `{prometheus.io/scrape: "true", prometheus.io/port: "9153"}`| -| `serviceAccount.create` | If true, create & use serviceAccount | false | -| `serviceAccount.name` | If not set & create is true, use template fullname | | -| `rbac.create` | If true, create & use RBAC resources | true | -| `rbac.pspEnable` | Specifies whether a PodSecurityPolicy should be created. | `false` | -| `isClusterService` | Specifies whether chart should be deployed as cluster-service or normal k8s app. | true | -| `priorityClassName` | Name of Priority Class to assign pods | `""` | -| `servers` | Configuration for CoreDNS and plugins | See values.yml | -| `affinity` | Affinity settings for pod assignment | {} | -| `nodeSelector` | Node labels for pod assignment | {} | -| `tolerations` | Tolerations for pod assignment | [] | -| `zoneFiles` | Configure custom Zone files | [] | -| `extraSecrets` | Optional array of secrets to mount inside the CoreDNS container | [] | -| `customLabels` | Optional labels for Deployment(s), Pod, Service, ServiceMonitor objects | {} | -| `podDisruptionBudget` | Optional PodDisruptionBudget | {} | -| `autoscaler.enabled` | Optionally enabled a cluster-proportional-autoscaler for CoreDNS | `false` | -| `autoscaler.coresPerReplica` | Number of cores in the cluster per CoreDNS replica | `256` | -| `autoscaler.nodesPerReplica` | Number of nodes in the cluster per CoreDNS replica | `16` | -| `autoscaler.image.repository` | The image repository to pull autoscaler from | k8s.gcr.io/cluster-proportional-autoscaler-amd64 | -| `autoscaler.image.tag` | The image tag to pull autoscaler from | `1.7.1` | -| `autoscaler.image.pullPolicy` | Image pull policy for the autoscaler | IfNotPresent | -| `autoscaler.priorityClassName` | Optional priority class for the autoscaler pod. `priorityClassName` used if not set. | `""` | -| `autoscaler.affinity` | Affinity settings for pod assignment for autoscaler | {} | -| `autoscaler.nodeSelector` | Node labels for pod assignment for autoscaler | {} | -| `autoscaler.tolerations` | Tolerations for pod assignment for autoscaler | [] | -| `autoscaler.resources.limits.cpu` | Container maximum CPU for cluster-proportional-autoscaler | `20m` | -| `autoscaler.resources.limits.memory` | Container maximum memory for cluster-proportional-autoscaler | `10Mi` | -| `autoscaler.resources.requests.cpu` | Container requested CPU for cluster-proportional-autoscaler | `20m` | -| `autoscaler.resources.requests.memory` | Container requested memory for cluster-proportional-autoscaler | `10Mi` | -| `autoscaler.configmap.annotations` | Annotations to add to autoscaler config map. For example to stop CI renaming them | {} | - -See `values.yaml` for configuration notes. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, - -```console -$ helm install --name coredns \ - --set rbac.create=false \ - stable/coredns -``` - -The above command disables automatic creation of RBAC rules. - -Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, - -```console -$ helm install --name coredns -f values.yaml stable/coredns -``` - -> **Tip**: You can use the default [values.yaml](values.yaml) - - -## Caveats - -The chart will automatically determine which protocols to listen on based on -the protocols you define in your zones. This means that you could potentially -use both "TCP" and "UDP" on a single port. -Some cloud environments like "GCE" or "Azure container service" cannot -create external loadbalancers with both "TCP" and "UDP" protocols. So -When deploying CoreDNS with `serviceType="LoadBalancer"` on such cloud -environments, make sure you do not attempt to use both protocols at the same -time. - -## Autoscaling - -By setting `autoscaler.enabled = true` a -[cluster-proportional-autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler) -will be deployed. This will default to a coredns replica for every 256 cores, or -16 nodes in the cluster. These can be changed with `autoscaler.coresPerReplica` -and `autoscaler.nodesPerReplica`. When cluster is using large nodes (with more -cores), `coresPerReplica` should dominate. If using small nodes, -`nodesPerReplica` should dominate. - -This also creates a ServiceAccount, ClusterRole, and ClusterRoleBinding for -the autoscaler deployment. - -`replicaCount` is ignored if this is enabled. diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/NOTES.txt b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/NOTES.txt deleted file mode 100755 index 3a1883b..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/NOTES.txt +++ /dev/null @@ -1,30 +0,0 @@ -{{- if .Values.isClusterService }} -CoreDNS is now running in the cluster as a cluster-service. -{{- else }} -CoreDNS is now running in the cluster. -It can be accessed using the below endpoint -{{- if contains "NodePort" .Values.serviceType }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "coredns.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo "$NODE_IP:$NODE_PORT" -{{- else if contains "LoadBalancer" .Values.serviceType }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status by running 'kubectl get svc -w {{ template "coredns.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "coredns.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo $SERVICE_IP -{{- else if contains "ClusterIP" .Values.serviceType }} - "{{ template "coredns.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local" - from within the cluster -{{- end }} -{{- end }} - -It can be tested with the following: - -1. Launch a Pod with DNS tools: - -kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools - -2. Query the DNS server: - -/ # host kubernetes diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/_helpers.tpl b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/_helpers.tpl deleted file mode 100755 index cfdbef7..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/_helpers.tpl +++ /dev/null @@ -1,158 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "coredns.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "coredns.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} - -{{/* -Generate the list of ports automatically from the server definitions -*/}} -{{- define "coredns.servicePorts" -}} - {{/* Set ports to be an empty dict */}} - {{- $ports := dict -}} - {{/* Iterate through each of the server blocks */}} - {{- range .Values.servers -}} - {{/* Capture port to avoid scoping awkwardness */}} - {{- $port := toString .port -}} - - {{/* If none of the server blocks has mentioned this port yet take note of it */}} - {{- if not (hasKey $ports $port) -}} - {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} - {{- end -}} - {{/* Retrieve the inner dict that holds the protocols for a given port */}} - {{- $innerdict := index $ports $port -}} - - {{/* - Look at each of the zones and check which protocol they serve - At the moment the following are supported by CoreDNS: - UDP: dns:// - TCP: tls://, grpc:// - */}} - {{- range .zones -}} - {{- if has (default "" .scheme) (list "dns://") -}} - {{/* Optionally enable tcp for this service as well */}} - {{- if eq .use_tcp true }} - {{- $innerdict := set $innerdict "istcp" true -}} - {{- end }} - {{- $innerdict := set $innerdict "isudp" true -}} - {{- end -}} - - {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} - {{- $innerdict := set $innerdict "istcp" true -}} - {{- end -}} - {{- end -}} - - {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} - {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} - {{- $innerdict := set $innerdict "isudp" true -}} - {{- $innerdict := set $innerdict "istcp" true -}} - {{- end -}} - - {{/* Write the dict back into the outer dict */}} - {{- $ports := set $ports $port $innerdict -}} - {{- end -}} - - {{/* Write out the ports according to the info collected above */}} - {{- range $port, $innerdict := $ports -}} - {{- if index $innerdict "isudp" -}} - {{- printf "- {port: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} - {{- end -}} - {{- if index $innerdict "istcp" -}} - {{- printf "- {port: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} - {{- end -}} - {{- end -}} -{{- end -}} - -{{/* -Generate the list of ports automatically from the server definitions -*/}} -{{- define "coredns.containerPorts" -}} - {{/* Set ports to be an empty dict */}} - {{- $ports := dict -}} - {{/* Iterate through each of the server blocks */}} - {{- range .Values.servers -}} - {{/* Capture port to avoid scoping awkwardness */}} - {{- $port := toString .port -}} - - {{/* If none of the server blocks has mentioned this port yet take note of it */}} - {{- if not (hasKey $ports $port) -}} - {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} - {{- end -}} - {{/* Retrieve the inner dict that holds the protocols for a given port */}} - {{- $innerdict := index $ports $port -}} - - {{/* - Look at each of the zones and check which protocol they serve - At the moment the following are supported by CoreDNS: - UDP: dns:// - TCP: tls://, grpc:// - */}} - {{- range .zones -}} - {{- if has (default "" .scheme) (list "dns://") -}} - {{/* Optionally enable tcp for this service as well */}} - {{- if eq .use_tcp true }} - {{- $innerdict := set $innerdict "istcp" true -}} - {{- end }} - {{- $innerdict := set $innerdict "isudp" true -}} - {{- end -}} - - {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} - {{- $innerdict := set $innerdict "istcp" true -}} - {{- end -}} - {{- end -}} - - {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} - {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} - {{- $innerdict := set $innerdict "isudp" true -}} - {{- $innerdict := set $innerdict "istcp" true -}} - {{- end -}} - - {{/* Write the dict back into the outer dict */}} - {{- $ports := set $ports $port $innerdict -}} - {{- end -}} - - {{/* Write out the ports according to the info collected above */}} - {{- range $port, $innerdict := $ports -}} - {{- if index $innerdict "isudp" -}} - {{- printf "- {containerPort: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} - {{- end -}} - {{- if index $innerdict "istcp" -}} - {{- printf "- {containerPort: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} - {{- end -}} - {{- end -}} -{{- end -}} - - -{{/* -Create the name of the service account to use -*/}} -{{- define "coredns.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "coredns.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{- define "system_default_registry" -}} -{{- if .Values.global.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.systemDefaultRegistry -}} -{{- else -}} -{{- "" -}} -{{- end -}} -{{- end -}} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole-autoscaler.yaml deleted file mode 100755 index b40bb0a..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole-autoscaler.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if and .Values.autoscaler.enabled .Values.rbac.create }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "coredns.fullname" . }}-autoscaler - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler -{{- if .Values.customLabels }} -{{ toYaml .Values.customLabels | indent 4 }} -{{- end }} -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["list","watch"] - - apiGroups: [""] - resources: ["replicationcontrollers/scale"] - verbs: ["get", "update"] - - apiGroups: ["extensions", "apps"] - resources: ["deployments/scale", "replicasets/scale"] - verbs: ["get", "update"] -# Remove the configmaps rule once below issue is fixed: -# kubernetes-incubator/cluster-proportional-autoscaler#16 - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "create"] -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole.yaml deleted file mode 100755 index 4203a02..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrole.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{- if .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "coredns.fullname" . }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} -rules: -- apiGroups: - - "" - resources: - - endpoints - - services - - pods - - namespaces - verbs: - - list - - watch -{{- if .Values.rbac.pspEnable }} -- apiGroups: - - policy - - extensions - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - {{ template "coredns.fullname" . }} -{{- end }} -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding-autoscaler.yaml deleted file mode 100755 index d1ff736..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding-autoscaler.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if and .Values.autoscaler.enabled .Values.rbac.create }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "coredns.fullname" . }}-autoscaler - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler -{{- if .Values.customLabels }} -{{ toYaml .Values.customLabels | indent 4 }} -{{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "coredns.fullname" . }}-autoscaler -subjects: -- kind: ServiceAccount - name: {{ template "coredns.fullname" . }}-autoscaler - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding.yaml deleted file mode 100755 index 7ae9d4f..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "coredns.fullname" . }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "coredns.fullname" . }} -subjects: -- kind: ServiceAccount - name: {{ template "coredns.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap-autoscaler.yaml deleted file mode 100755 index 0712e0d..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap-autoscaler.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if .Values.autoscaler.enabled }} ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: {{ template "coredns.fullname" . }}-autoscaler - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler - {{- if .Values.customLabels }} - {{- toYaml .Values.customLabels | nindent 4 }} - {{- end }} - {{- if .Values.autoscaler.configmap.annotations }} - annotations: - {{- toYaml .Values.autoscaler.configmap.annotations | nindent 4 }} - {{- end }} -data: - # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. - # If using small nodes, "nodesPerReplica" should dominate. - linear: |- - { - "coresPerReplica": {{ .Values.autoscaler.coresPerReplica | float64 }}, - "nodesPerReplica": {{ .Values.autoscaler.nodesPerReplica | float64 }}, - "preventSinglePointFailure": true - } -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap.yaml deleted file mode 100755 index b5069d3..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/configmap.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "coredns.fullname" . }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} -data: - Corefile: |- - {{ range .Values.servers }} - {{- range $idx, $zone := .zones }}{{ if $idx }} {{ else }}{{ end }}{{ default "" $zone.scheme }}{{ default "." $zone.zone }}{{ else }}.{{ end -}} - {{- if .port }}:{{ .port }} {{ end -}} - { - {{- range .plugins }} - {{ .name }} {{ if .parameters }} {{if eq .name "kubernetes" }} {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDomain }} {{ end }} {{.parameters}}{{ end }}{{ if .configBlock }} { -{{ .configBlock | indent 12 }} - }{{ end }} - {{- end }} - } - {{ end }} - {{- range .Values.zoneFiles }} - {{ .filename }}: {{ toYaml .contents | indent 4 }} - {{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment-autoscaler.yaml deleted file mode 100755 index 6ddd209..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment-autoscaler.yaml +++ /dev/null @@ -1,77 +0,0 @@ -{{- if .Values.autoscaler.enabled }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "coredns.fullname" . }}-autoscaler - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler -{{- if .Values.customLabels }} -{{ toYaml .Values.customLabels | indent 4 }} -{{- end }} -spec: - selector: - matchLabels: - app.kubernetes.io/instance: {{ .Release.Name | quote }} - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler - template: - metadata: - labels: - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler - app.kubernetes.io/instance: {{ .Release.Name | quote }} - {{- if .Values.customLabels }} - {{ toYaml .Values.customLabels | nindent 8 }} - {{- end }} - annotations: - checksum/configmap: {{ include (print $.Template.BasePath "/configmap-autoscaler.yaml") . | sha256sum }} - {{- if .Values.isClusterService }} - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - {{- end }} - spec: - serviceAccountName: {{ template "coredns.fullname" . }}-autoscaler - {{- $priorityClassName := default .Values.priorityClassName .Values.autoscaler.priorityClassName }} - {{- if $priorityClassName }} - priorityClassName: {{ $priorityClassName | quote }} - {{- end }} - {{- if .Values.autoscaler.affinity }} - affinity: -{{ toYaml .Values.autoscaler.affinity | indent 8 }} - {{- end }} - {{- if .Values.autoscaler.tolerations }} - tolerations: -{{ toYaml .Values.autoscaler.tolerations | indent 8 }} - {{- end }} - {{- if .Values.autoscaler.nodeSelector }} - nodeSelector: -{{ toYaml .Values.autoscaler.nodeSelector | indent 8 }} - {{- end }} - containers: - - name: autoscaler - image: {{ template "system_default_registry" . }}{{ .Values.autoscaler.image.repository }}:{{ .Values.autoscaler.image.tag }} - imagePullPolicy: {{ .Values.autoscaler.image.pullPolicy }} - resources: -{{ toYaml .Values.autoscaler.resources | indent 10 }} - command: - - /cluster-proportional-autoscaler - - --namespace={{ .Release.Namespace }} - - --configmap={{ template "coredns.fullname" . }}-autoscaler - - --target=Deployment/{{ template "coredns.fullname" . }} - - --logtostderr=true - - --v=2 -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment.yaml deleted file mode 100755 index 0ed3c52..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/deployment.yaml +++ /dev/null @@ -1,127 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "coredns.fullname" . }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} -{{- if .Values.customLabels }} -{{ toYaml .Values.customLabels | indent 4 }} -{{- end }} -spec: - {{- if not .Values.autoscaler.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - maxSurge: 10% - selector: - matchLabels: - app.kubernetes.io/instance: {{ .Release.Name | quote }} - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} - template: - metadata: - labels: - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} -{{- if .Values.customLabels }} -{{ toYaml .Values.customLabels | indent 8 }} -{{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - {{- if .Values.isClusterService }} - scheduler.alpha.kubernetes.io/critical-pod: '' - {{- end }} - spec: - serviceAccountName: {{ template "coredns.serviceAccountName" . }} - {{- if .Values.priorityClassName }} - priorityClassName: {{ .Values.priorityClassName | quote }} - {{- end }} - {{- if .Values.isClusterService }} - dnsPolicy: Default - {{- end }} - {{- if .Values.affinity }} - affinity: -{{ toYaml .Values.affinity | indent 8 }} - {{- end }} - {{- if or (.Values.isClusterService) (.Values.tolerations) }} - tolerations: - {{- if .Values.isClusterService }} - - key: CriticalAddonsOnly - operator: Exists - {{- end }} - {{- if .Values.tolerations }} -{{ toYaml .Values.tolerations | indent 8 }} - {{- end }} - {{- end }} - {{- if .Values.nodeSelector }} - nodeSelector: -{{ toYaml .Values.nodeSelector | indent 8 }} - {{- end }} - containers: - - name: "coredns" - image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - args: [ "-conf", "/etc/coredns/Corefile" ] - volumeMounts: - - name: config-volume - mountPath: /etc/coredns -{{- range .Values.extraSecrets }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: true -{{- end }} - resources: -{{ toYaml .Values.resources | indent 10 }} - ports: -{{ include "coredns.containerPorts" . | indent 8 }} - livenessProbe: - httpGet: - path: /health - port: 8080 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - httpGet: - path: /ready - port: 8181 - scheme: HTTP - initialDelaySeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - volumes: - - name: config-volume - configMap: - name: {{ template "coredns.fullname" . }} - items: - - key: Corefile - path: Corefile - {{ range .Values.zoneFiles }} - - key: {{ .filename }} - path: {{ .filename }} - {{ end }} -{{- range .Values.extraSecrets }} - - name: {{ .name }} - secret: - secretName: {{ .name }} - defaultMode: 400 -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/poddisruptionbudget.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/poddisruptionbudget.yaml deleted file mode 100755 index 1fee2de..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.podDisruptionBudget -}} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "coredns.fullname" . }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} -{{- if .Values.customLabels }} -{{ toYaml .Values.customLabels | indent 4 }} -{{- end }} -spec: - selector: - matchLabels: - app.kubernetes.io/instance: {{ .Release.Name | quote }} - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} -{{ toYaml .Values.podDisruptionBudget | indent 2 }} -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/podsecuritypolicy.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/podsecuritypolicy.yaml deleted file mode 100755 index 4e7a36f..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/podsecuritypolicy.yaml +++ /dev/null @@ -1,57 +0,0 @@ -{{- if .Values.rbac.pspEnable }} -{{ if .Capabilities.APIVersions.Has "policy/v1beta1" }} -apiVersion: policy/v1beta1 -{{ else }} -apiVersion: extensions/v1beta1 -{{ end -}} -kind: PodSecurityPolicy -metadata: - name: {{ template "coredns.fullname" . }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- else }} - app.kubernetes.io/name: {{ template "coredns.name" . }} - {{- end }} -spec: - privileged: false - # Required to prevent escalations to root. - allowPrivilegeEscalation: false - # Add back CAP_NET_BIND_SERVICE so that coredns can run on port 53 - allowedCapabilities: - - CAP_NET_BIND_SERVICE - # Allow core volume types. - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'secret' - - 'downwardAPI' - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - # Require the container to run without root privileges. - rule: 'RunAsAny' - seLinux: - # This policy assumes the nodes are using AppArmor rather than SELinux. - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service-metrics.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service-metrics.yaml deleted file mode 100755 index 1657cd7..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service-metrics.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if .Values.prometheus.monitor.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "coredns.fullname" . }}-metrics - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} - app.kubernetes.io/component: metrics -{{- if .Values.customLabels }} -{{ toYaml .Values.customLabels | indent 4 }} -{{- end }} - annotations: -{{ toYaml .Values.service.annotations | indent 4 }} -spec: - selector: - app.kubernetes.io/instance: {{ .Release.Name | quote }} - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} - ports: - - name: metrics - port: 9153 - targetPort: 9153 -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service.yaml deleted file mode 100755 index 95c858f..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/service.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "coredns.fullname" . }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} -{{- if .Values.customLabels }} -{{ toYaml .Values.customLabels | indent 4 }} -{{- end }} - annotations: -{{ toYaml .Values.service.annotations | indent 4 }} -spec: - selector: - app.kubernetes.io/instance: {{ .Release.Name | quote }} - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} - {{- if .Values.service.clusterIP }} - clusterIP: {{ .Values.service.clusterIP }} - {{ else }} - clusterIP: {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDNS }} - {{- end }} - {{- if .Values.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} - {{- end }} - {{- if .Values.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.service.loadBalancerIP }} - {{- end }} - ports: -{{ include "coredns.servicePorts" . | indent 2 -}} - type: {{ default "ClusterIP" .Values.serviceType }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount-autoscaler.yaml deleted file mode 100755 index 1b218d2..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount-autoscaler.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if and .Values.autoscaler.enabled .Values.rbac.create }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "coredns.fullname" . }}-autoscaler - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler -{{- if .Values.customLabels }} -{{ toYaml .Values.customLabels | indent 4 }} -{{- end }} -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount.yaml deleted file mode 100755 index 23f29a1..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/serviceaccount.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "coredns.serviceAccountName" . }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/servicemonitor.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/servicemonitor.yaml deleted file mode 100755 index ca0b691..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/templates/servicemonitor.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if .Values.prometheus.monitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "coredns.fullname" . }} - {{- if .Values.prometheus.monitor.namespace }} - namespace: {{ .Values.prometheus.monitor.namespace }} - {{- end }} - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} - {{- if .Values.prometheus.monitor.additionalLabels }} -{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} - {{- end }} -spec: - selector: - matchLabels: - app.kubernetes.io/instance: {{ .Release.Name | quote }} - {{- if .Values.isClusterService }} - k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} - {{- end }} - app.kubernetes.io/name: {{ template "coredns.name" . }} - app.kubernetes.io/component: metrics - endpoints: - - port: metrics -{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/values.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/values.yaml deleted file mode 100755 index a1703d6..0000000 --- a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022302/values.yaml +++ /dev/null @@ -1,202 +0,0 @@ -# Default values for coredns. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -image: - repository: rancher/hardened-coredns - tag: "v1.6.9-build20210223" - pullPolicy: IfNotPresent - -replicaCount: 1 - -resources: - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi - -serviceType: "ClusterIP" - -prometheus: - monitor: - enabled: false - additionalLabels: {} - namespace: "" - -service: -# clusterIP: "" -# loadBalancerIP: "" -# externalTrafficPolicy: "" - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9153" - -serviceAccount: - create: true - # The name of the ServiceAccount to use - # If not set and create is true, a name is generated using the fullname template - name: coredns - -rbac: - # If true, create & use RBAC resources - create: true - # If true, create and use PodSecurityPolicy - pspEnable: false - # The name of the ServiceAccount to use. - # If not set and create is true, a name is generated using the fullname template - # name: - -# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app. -isClusterService: true - -# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set. -priorityClassName: "system-cluster-critical" - -# Default zone is what Kubernetes recommends: -# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options -servers: -- zones: - - zone: . - port: 53 - plugins: - - name: errors - # Serves a /health endpoint on :8080, required for livenessProbe - - name: health - configBlock: |- - lameduck 5s - # Serves a /ready endpoint on :8181, required for readinessProbe - - name: ready - # Required to query kubernetes API for data - - name: kubernetes - parameters: cluster.local in-addr.arpa ip6.arpa - configBlock: |- - pods insecure - fallthrough in-addr.arpa ip6.arpa - ttl 30 - # Serves a /metrics endpoint on :9153, required for serviceMonitor - - name: prometheus - parameters: 0.0.0.0:9153 - - name: forward - parameters: . /etc/resolv.conf - - name: cache - parameters: 30 - - name: loop - - name: reload - - name: loadbalance - -# Complete example with all the options: -# - zones: # the `zones` block can be left out entirely, defaults to "." -# - zone: hello.world. # optional, defaults to "." -# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS) -# - zone: foo.bar. -# scheme: dns:// -# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol -# # Note that this will not work if you are also exposing tls or grpc on the same server -# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS) -# plugins: # the plugins to use for this server block -# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it! -# parameters: foo bar # list of parameters after the plugin -# configBlock: |- # if the plugin supports extra block style config, supply it here -# hello world -# foo bar - -# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core -# for example: -# affinity: -# nodeAffinity: -# requiredDuringSchedulingIgnoredDuringExecution: -# nodeSelectorTerms: -# - matchExpressions: -# - key: foo.bar.com/role -# operator: In -# values: -# - master -affinity: {} - -# Node labels for pod assignment -# Ref: https://kubernetes.io/docs/user-guide/node-selection/ -nodeSelector: {} - -# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core -# for example: -# tolerations: -# - key: foo.bar.com/role -# operator: Equal -# value: master -# effect: NoSchedule -tolerations: [] - -# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget -podDisruptionBudget: {} - -# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/ -zoneFiles: [] -# - filename: example.db -# domain: example.com -# contents: | -# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600 -# example.com. IN NS b.iana-servers.net. -# example.com. IN NS a.iana-servers.net. -# example.com. IN A 192.168.99.102 -# *.example.com. IN A 192.168.99.102 - -# optional array of secrets to mount inside coredns container -# possible usecase: need for secure connection with etcd backend -extraSecrets: [] -# - name: etcd-client-certs -# mountPath: /etc/coredns/tls/etcd -# - name: some-fancy-secret -# mountPath: /etc/wherever - -# Custom labels to apply to Deployment, Pod, Service, ServiceMonitor. Including autoscaler if enabled. -customLabels: {} - -## Configue a cluster-proportional-autoscaler for coredns -# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler -autoscaler: - # Enabled the cluster-proportional-autoscaler - enabled: false - - # Number of cores in the cluster per coredns replica - coresPerReplica: 256 - # Number of nodes in the cluster per coredns replica - nodesPerReplica: 16 - - image: - repository: k8s.gcr.io/cluster-proportional-autoscaler-amd64 - tag: "1.7.1" - pullPolicy: IfNotPresent - - # Optional priority class to be used for the autoscaler pods. priorityClassName used if not set. - priorityClassName: "" - - # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core - affinity: {} - - # Node labels for pod assignment - # Ref: https://kubernetes.io/docs/user-guide/node-selection/ - nodeSelector: {} - - # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core - tolerations: [] - - # resources for autoscaler pod - resources: - requests: - cpu: "20m" - memory: "10Mi" - limits: - cpu: "20m" - memory: "10Mi" - - # Options for autoscaler configmap - configmap: - ## Annotations for the coredns-autoscaler configmap - # i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed - annotations: {} -k8sApp : "kube-dns" - -global: - systemDefaultRegistry: "" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/.helmignore b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/.helmignore deleted file mode 100755 index 50af031..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/.helmignore +++ /dev/null @@ -1,22 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/Chart.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/Chart.yaml deleted file mode 100755 index dc27305..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/Chart.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -appVersion: 0.35.0 -description: Ingress controller for Kubernetes using NGINX as a reverse proxy and - load balancer -home: https://github.com/kubernetes/ingress-nginx -icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png -keywords: -- ingress -- nginx -kubeVersion: '>=1.16.0-0' -maintainers: -- name: ChiefAlexander -name: rke2-ingress-nginx -sources: -- https://github.com/kubernetes/ingress-nginx -version: 3.3.001 diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/OWNERS b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/OWNERS deleted file mode 100755 index 7aadb8d..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -approvers: - - ChiefAlexander - -reviewers: - - ChiefAlexander diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/README.md b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/README.md deleted file mode 100755 index 1ab152a..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/README.md +++ /dev/null @@ -1,221 +0,0 @@ -# ingress-nginx - -[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer - -To use, add the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. - -This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -## Prerequisites - -- Kubernetes v1.16+ - -## Get Repo Info - -```console -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm repo add stable https://kubernetes-charts.storage.googleapis.com/ -helm repo update -``` - -## Install Chart - -```console -# Helm 3 -$ helm install [RELEASE_NAME] ingress-nginx/ingress-nginx - -# Helm 2 -$ helm install --name [RELEASE_NAME] ingress-nginx/ingress-nginx -``` - -The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. - -_See [configuration](#configuration) below._ - -_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ - -## Uninstall Chart - -```console -# Helm 3 -$ helm uninstall [RELEASE_NAME] - -# Helm 2 -# helm delete --purge [RELEASE_NAME] -``` - -This removes all the Kubernetes components associated with the chart and deletes the release. - -_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ - -## Upgrading Chart - -```console -# Helm 3 or 2 -$ helm upgrade [RELEASE_NAME] [CHART] --install -``` - -_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ - -### Upgrading With Zero Downtime in Production - -By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). - -### Migrating from stable/nginx-ingress - -There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: - -1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one -1. For critical services in production that require zero-downtime, you will want to: - 1. [Install](#install-chart) a second Ingress controller - 1. Redirect your DNS traffic from the old controller to the new controller - 1. Log traffic from both controllers during this changeover - 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it - 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) - -Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. - -## Configuration - -See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: - -```console -# Helm 2 -$ helm inspect values ingress-nginx/ingress-nginx - -# Helm 3 -$ helm show values ingress-nginx/ingress-nginx -``` - -### PodDisruptionBudget - -Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, -else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. - -### Prometheus Metrics - -The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. - -You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. - -### ingress-nginx nginx\_status page/stats server - -Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: - -- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed -- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. - You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230) to re-enable the http server - -### ExternalDNS Service Configuration - -Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: - -```yaml -controller: - service: - annotations: - external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. -``` - -### AWS L7 ELB with SSL Termination - -Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/master/deploy/aws/l7/service-l7.yaml): - -```yaml -controller: - service: - targetPorts: - http: http - https: http - annotations: - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" - service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' -``` - -### AWS route53-mapper - -To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label: - -```yaml -controller: - service: - labels: - dns: "route53" - annotations: - domainName: "kubernetes-example.com" -``` - -### Additional Internal Load Balancer - -This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. - -By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. - -You'll need to set both the following values: - -`controller.service.internal.enabled` -`controller.service.internal.annotations` - -If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. - -`controller.service.internal.annotations` varies with the cloud service you're using. - -Example for AWS: - -```yaml -controller: - service: - internal: - enabled: true - annotations: - # Create internal ELB - service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 - # Any other annotation can be declared here. -``` - -Example for GCE: - -```yaml -controller: - service: - internal: - enabled: true - annotations: - # Create internal LB - cloud.google.com/load-balancer-type: "Internal" - # Any other annotation can be declared here. -``` - -Example for Azure: - -```yaml -controller: - service: - annotations: - # Create internal LB - service.beta.kubernetes.io/azure-load-balancer-internal: "true" - # Any other annotation can be declared here. -``` - -An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. - -### Ingress Admission Webhooks - -With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. -**This feature is enabled by default since 0.31.0.** - -With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) - -### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" - -If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: - -```console -Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable -``` - -Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. - -As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customconfig-values.yaml deleted file mode 100755 index e12b534..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customconfig-values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - service: - type: ClusterIP - - config: - use-proxy-protocol: "true" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customnodeport-values.yaml deleted file mode 100755 index cfc545f..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-customnodeport-values.yaml +++ /dev/null @@ -1,18 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - - service: - type: NodePort - nodePorts: - tcp: - 9000: 30090 - udp: - 9001: 30091 - -tcp: - 9000: "default/test:8080" - -udp: - 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-headers-values.yaml deleted file mode 100755 index ff82cd9..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-headers-values.yaml +++ /dev/null @@ -1,10 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - addHeaders: - X-Frame-Options: deny - proxySetHeaders: - X-Forwarded-Proto: https - service: - type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-internal-lb-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-internal-lb-values.yaml deleted file mode 100755 index 443e39d..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-internal-lb-values.yaml +++ /dev/null @@ -1,10 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - service: - type: ClusterIP - internal: - enabled: true - annotations: - service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-nodeport-values.yaml deleted file mode 100755 index 6d6605f..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-nodeport-values.yaml +++ /dev/null @@ -1,6 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - service: - type: NodePort diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-configMapNamespace-values.yaml deleted file mode 100755 index afb5487..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-configMapNamespace-values.yaml +++ /dev/null @@ -1,16 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - service: - type: ClusterIP - tcp: - configMapNamespace: default - udp: - configMapNamespace: default - -tcp: - 9000: "default/test:8080" - -udp: - 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-values.yaml deleted file mode 100755 index 7b4d7cb..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-udp-values.yaml +++ /dev/null @@ -1,12 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - service: - type: ClusterIP - -tcp: - 9000: "default/test:8080" - -udp: - 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-values.yaml deleted file mode 100755 index a359a6a..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/daemonset-tcp-values.yaml +++ /dev/null @@ -1,10 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - service: - type: ClusterIP - -tcp: - 9000: "default/test:8080" - 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-default-values.yaml deleted file mode 100755 index e63a7f5..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-default-values.yaml +++ /dev/null @@ -1,6 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - service: - type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-metrics-values.yaml deleted file mode 100755 index 1e5190a..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-metrics-values.yaml +++ /dev/null @@ -1,8 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - metrics: - enabled: true - service: - type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-psp-values.yaml deleted file mode 100755 index 017b60a..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-psp-values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: false - service: - type: ClusterIP - -podSecurityPolicy: - enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-and-psp-values.yaml deleted file mode 100755 index 88aafc6..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-and-psp-values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: true - service: - type: ClusterIP - -podSecurityPolicy: - enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-values.yaml deleted file mode 100755 index 6e3b371..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deamonset-webhook-values.yaml +++ /dev/null @@ -1,6 +0,0 @@ -controller: - kind: DaemonSet - admissionWebhooks: - enabled: true - service: - type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-autoscaling-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-autoscaling-values.yaml deleted file mode 100755 index 5314cec..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-autoscaling-values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -controller: - autoscaling: - enabled: true - admissionWebhooks: - enabled: false - service: - type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customconfig-values.yaml deleted file mode 100755 index f232531..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customconfig-values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -controller: - config: - use-proxy-protocol: "true" - admissionWebhooks: - enabled: false - service: - type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customnodeport-values.yaml deleted file mode 100755 index 9eda282..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-customnodeport-values.yaml +++ /dev/null @@ -1,16 +0,0 @@ -controller: - admissionWebhooks: - enabled: false - service: - type: NodePort - nodePorts: - tcp: - 9000: 30090 - udp: - 9001: 30091 - -tcp: - 9000: "default/test:8080" - -udp: - 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-default-values.yaml deleted file mode 100755 index 93a393c..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-default-values.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Left blank to test default values -controller: - service: - type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-headers-values.yaml deleted file mode 100755 index 665fd48..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-headers-values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -controller: - admissionWebhooks: - enabled: false - addHeaders: - X-Frame-Options: deny - proxySetHeaders: - X-Forwarded-Proto: https - service: - type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-internal-lb-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-internal-lb-values.yaml deleted file mode 100755 index 892f6de..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-internal-lb-values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -controller: - admissionWebhooks: - enabled: false - service: - type: ClusterIP - internal: - enabled: true - annotations: - service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-metrics-values.yaml deleted file mode 100755 index 887ed0f..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-metrics-values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -controller: - admissionWebhooks: - enabled: false - metrics: - enabled: true - service: - type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-nodeport-values.yaml deleted file mode 100755 index 84f1f75..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-nodeport-values.yaml +++ /dev/null @@ -1,5 +0,0 @@ -controller: - admissionWebhooks: - enabled: false - service: - type: NodePort diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-psp-values.yaml deleted file mode 100755 index e339c69..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-psp-values.yaml +++ /dev/null @@ -1,6 +0,0 @@ -controller: - service: - type: ClusterIP - -podSecurityPolicy: - enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-configMapNamespace-values.yaml deleted file mode 100755 index 141e06b..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-configMapNamespace-values.yaml +++ /dev/null @@ -1,15 +0,0 @@ -controller: - admissionWebhooks: - enabled: false - service: - type: ClusterIP - tcp: - configMapNamespace: default - udp: - configMapNamespace: default - -tcp: - 9000: "default/test:8080" - -udp: - 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-values.yaml deleted file mode 100755 index bc29abe..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-udp-values.yaml +++ /dev/null @@ -1,11 +0,0 @@ -controller: - admissionWebhooks: - enabled: false - service: - type: ClusterIP - -tcp: - 9000: "default/test:8080" - -udp: - 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-values.yaml deleted file mode 100755 index b7f54c0..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-tcp-values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -controller: - service: - type: ClusterIP - -tcp: - 9000: "default/test:8080" - 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-and-psp-values.yaml deleted file mode 100755 index a829c36..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-and-psp-values.yaml +++ /dev/null @@ -1,8 +0,0 @@ -controller: - admissionWebhooks: - enabled: true - service: - type: ClusterIP - -podSecurityPolicy: - enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-values.yaml deleted file mode 100755 index 4f18a70..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/ci/deployment-webhook-values.yaml +++ /dev/null @@ -1,5 +0,0 @@ -controller: - admissionWebhooks: - enabled: true - service: - type: ClusterIP diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/NOTES.txt b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/NOTES.txt deleted file mode 100755 index 60fb2c1..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/NOTES.txt +++ /dev/null @@ -1,71 +0,0 @@ -The ingress-nginx controller has been installed. - -{{- if contains "NodePort" .Values.controller.service.type }} -Get the application URL by running these commands: - -{{- if (not (empty .Values.controller.service.nodePorts.http)) }} - export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} -{{- else }} - export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) -{{- end }} -{{- if (not (empty .Values.controller.service.nodePorts.https)) }} - export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} -{{- else }} - export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ include "ingress-nginx.controller.fullname" . }}) -{{- end }} - export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") - - echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." - echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." -{{- else if contains "LoadBalancer" .Values.controller.service.type }} -It may take a few minutes for the LoadBalancer IP to be available. -You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ include "ingress-nginx.controller.fullname" . }}' -{{- else if contains "ClusterIP" .Values.controller.service.type }} -Get the application URL by running these commands: - export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "ingress-nginx.name" . }},component={{ .Values.controller.name }},release={{ .Release.Name }}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 - echo "Visit http://127.0.0.1:8080 to access your application." -{{- end }} - -An example Ingress that makes use of the controller: - - apiVersion: networking.k8s.io/v1beta1 - kind: Ingress - metadata: - annotations: - kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} - name: example - namespace: foo - spec: - rules: - - host: www.example.com - http: - paths: - - backend: - serviceName: exampleService - servicePort: 80 - path: / - # This section is only required if TLS is to be enabled for the Ingress - tls: - - hosts: - - www.example.com - secretName: example-tls - -If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: - - apiVersion: v1 - kind: Secret - metadata: - name: example-tls - namespace: foo - data: - tls.crt: - tls.key: - type: kubernetes.io/tls - -{{- if .Values.controller.headers }} -################################################################################# -###### WARNING: `controller.headers` has been deprecated! ##### -###### It has been renamed to `controller.proxySetHeaders`. ##### -################################################################################# -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/_helpers.tpl b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/_helpers.tpl deleted file mode 100755 index 61aadf0..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/_helpers.tpl +++ /dev/null @@ -1,132 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "ingress-nginx.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "ingress-nginx.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "ingress-nginx.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a default fully qualified controller name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "ingress-nginx.controller.fullname" -}} -{{- printf "%s-%s" (include "ingress-nginx.fullname" .) "controller" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Construct the path for the publish-service. - -By convention this will simply use the / to match the name of the -service generated. - -Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` - -*/}} -{{- define "ingress-nginx.controller.publishServicePath" -}} -{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}} -{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} -{{- print $servicePath | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified default backend name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "ingress-nginx.defaultBackend.fullname" -}} -{{- printf "%s-%s" (include "ingress-nginx.fullname" .) "defaultbackend" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "ingress-nginx.labels" -}} -helm.sh/chart: {{ include "ingress-nginx.chart" . }} -{{ include "ingress-nginx.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Selector labels -*/}} -{{- define "ingress-nginx.selectorLabels" -}} -app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} - -{{/* -Create the name of the controller service account to use -*/}} -{{- define "ingress-nginx.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled -*/}} -{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}} -{{- if .Values.defaultBackend.serviceAccount.create -}} - {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }} -{{- else -}} - {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiGroup for PodSecurityPolicy. -*/}} -{{- define "podSecurityPolicy.apiGroup" -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "policy" -}} -{{- else -}} -{{- print "extensions" -}} -{{- end -}} -{{- end -}} - -{{/* -Check the ingress controller version tag is at most three versions behind the last release -*/}} -{{- define "isControllerTagValid" -}} -{{- if not (semverCompare ">=0.27.0-0" (trimPrefix "nginx-" .Values.controller.image.tag)) -}} -{{- fail "Controller container image tag should be 0.27.0 or higher" -}} -{{- end -}} -{{- end -}} - -{{- define "system_default_registry" -}} -{{- if .Values.global.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.systemDefaultRegistry -}} -{{- else -}} -{{- "" -}} -{{- end -}} -{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrole.yaml deleted file mode 100755 index 7eb5738..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrole.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ include "ingress-nginx.fullname" . }}-admission - annotations: - "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade - "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: admission-webhook -rules: - - apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - update -{{- if .Values.podSecurityPolicy.enabled }} - - apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ include "ingress-nginx.fullname" . }}-admission -{{- end }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrolebinding.yaml deleted file mode 100755 index 9793125..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ include "ingress-nginx.fullname" . }}-admission - annotations: - "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade - "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: admission-webhook -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ include "ingress-nginx.fullname" . }}-admission -subjects: - - kind: ServiceAccount - name: {{ include "ingress-nginx.fullname" . }}-admission - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-createSecret.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-createSecret.yaml deleted file mode 100755 index 04a3e10..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-createSecret.yaml +++ /dev/null @@ -1,60 +0,0 @@ -{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ include "ingress-nginx.fullname" . }}-admission-create - annotations: - "helm.sh/hook": pre-install,pre-upgrade - "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: admission-webhook -spec: -{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} - # Alpha feature since k8s 1.12 - ttlSecondsAfterFinished: 0 -{{- end }} - template: - metadata: - name: {{ include "ingress-nginx.fullname" . }}-admission-create - {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} - annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "ingress-nginx.labels" . | nindent 8 }} - app.kubernetes.io/component: admission-webhook - spec: - {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} - priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} - {{- end }} - containers: - - name: create - {{- with .Values.controller.admissionWebhooks.patch.image }} - image: "{{ template "system_default_registry" . }}{{.repository}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" - {{- end }} - imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} - args: - - create - - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc - - --namespace=$(POD_NAMESPACE) - - --secret-name={{ include "ingress-nginx.fullname" . }}-admission - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - restartPolicy: OnFailure - serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission - {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} - nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} - {{- end }} - {{- if .Values.controller.admissionWebhooks.patch.tolerations }} - tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} - {{- end }} - securityContext: - runAsNonRoot: true - runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-patchWebhook.yaml deleted file mode 100755 index 43d9bf5..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +++ /dev/null @@ -1,62 +0,0 @@ -{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ include "ingress-nginx.fullname" . }}-admission-patch - annotations: - "helm.sh/hook": post-install,post-upgrade - "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: admission-webhook -spec: -{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} - # Alpha feature since k8s 1.12 - ttlSecondsAfterFinished: 0 -{{- end }} - template: - metadata: - name: {{ include "ingress-nginx.fullname" . }}-admission-patch - {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} - annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "ingress-nginx.labels" . | nindent 8 }} - app.kubernetes.io/component: admission-webhook - spec: - {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} - priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} - {{- end }} - containers: - - name: patch - {{- with .Values.controller.admissionWebhooks.patch.image }} - image: "{{ template "system_default_registry" . }}{{.repository}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" - {{- end }} - imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} - args: - - patch - - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission - - --namespace=$(POD_NAMESPACE) - - --patch-mutating=false - - --secret-name={{ include "ingress-nginx.fullname" . }}-admission - - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - restartPolicy: OnFailure - serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission - {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} - nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} - {{- end }} - {{- if .Values.controller.admissionWebhooks.patch.tolerations }} - tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} - {{- end }} - securityContext: - runAsNonRoot: true - runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/psp.yaml deleted file mode 100755 index e8c8da9..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/psp.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled -}} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "ingress-nginx.fullname" . }}-admission - annotations: - "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade - "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: admission-webhook -spec: - allowPrivilegeEscalation: false - fsGroup: - ranges: - - max: 65535 - min: 1 - rule: MustRunAs - requiredDropCapabilities: - - ALL - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - ranges: - - max: 65535 - min: 1 - rule: MustRunAs - volumes: - - configMap - - emptyDir - - projected - - secret - - downwardAPI -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/role.yaml deleted file mode 100755 index fe1c2ee..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ include "ingress-nginx.fullname" . }}-admission - annotations: - "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade - "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: admission-webhook -rules: - - apiGroups: - - "" - resources: - - secrets - verbs: - - get - - create -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/rolebinding.yaml deleted file mode 100755 index 391e5e9..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/rolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ include "ingress-nginx.fullname" . }}-admission - annotations: - "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade - "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: admission-webhook -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ include "ingress-nginx.fullname" . }}-admission -subjects: - - kind: ServiceAccount - name: {{ include "ingress-nginx.fullname" . }}-admission - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/serviceaccount.yaml deleted file mode 100755 index 5dfdd34..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/job-patch/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "ingress-nginx.fullname" . }}-admission - annotations: - "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade - "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: admission-webhook -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/validating-webhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/validating-webhook.yaml deleted file mode 100755 index 5d338e2..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/admission-webhooks/validating-webhook.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if .Values.controller.admissionWebhooks.enabled -}} -# before changing this value, check the required kubernetes version -# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: admission-webhook - name: {{ include "ingress-nginx.fullname" . }}-admission -webhooks: - - name: validate.nginx.ingress.kubernetes.io - rules: - - apiGroups: - - networking.k8s.io - apiVersions: - - v1beta1 - - v1 - operations: - - CREATE - - UPDATE - resources: - - ingresses - failurePolicy: Fail - sideEffects: None - admissionReviewVersions: - - v1beta1 - clientConfig: - service: - namespace: {{ .Release.Namespace }} - name: {{ include "ingress-nginx.controller.fullname" . }}-admission - path: /networking/v1beta1/ingresses -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrole.yaml deleted file mode 100755 index 2035f54..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrole.yaml +++ /dev/null @@ -1,76 +0,0 @@ -{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - name: {{ include "ingress-nginx.fullname" . }} -rules: - - apiGroups: - - "" - resources: - - configmaps - - endpoints - - nodes - - pods - - secrets - verbs: - - list - - watch -{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} - - apiGroups: - - "" - resources: - - namespaces - resourceNames: - - "{{ .Values.controller.scope.namespace }}" - verbs: - - get -{{- end }} - - apiGroups: - - "" - resources: - - nodes - verbs: - - get - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - update - - watch - - apiGroups: - - extensions - - "networking.k8s.io" # k8s 1.14+ - resources: - - ingresses - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - apiGroups: - - extensions - - "networking.k8s.io" # k8s 1.14+ - resources: - - ingresses/status - verbs: - - update - - apiGroups: - - "networking.k8s.io" # k8s 1.14+ - resources: - - ingressclasses - verbs: - - get - - list - - watch -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrolebinding.yaml deleted file mode 100755 index a341f52..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - name: {{ include "ingress-nginx.fullname" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ include "ingress-nginx.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "ingress-nginx.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-addheaders.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-addheaders.yaml deleted file mode 100755 index c064589..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-addheaders.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- if .Values.controller.addHeaders -}} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers -data: {{ toYaml .Values.controller.addHeaders | nindent 2 }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-proxyheaders.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-proxyheaders.yaml deleted file mode 100755 index 5a1b252..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-proxyheaders.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers -data: -{{- if .Values.controller.proxySetHeaders }} -{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} -{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} -{{ toYaml .Values.controller.headers | indent 2 }} -{{- end }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-tcp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-tcp.yaml deleted file mode 100755 index bc97251..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-tcp.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.tcp -}} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller -{{- if .Values.controller.tcp.annotations }} - annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }} -{{- end }} - name: {{ include "ingress-nginx.fullname" . }}-tcp -data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-udp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-udp.yaml deleted file mode 100755 index a9dc388..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap-udp.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.udp -}} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller -{{- if .Values.controller.udp.annotations }} - annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }} -{{- end }} - name: {{ include "ingress-nginx.fullname" . }}-udp -data: {{ tpl (toYaml .Values.udp) . | nindent 2 }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap.yaml deleted file mode 100755 index 5b0d371..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-configmap.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller -{{- if .Values.controller.configAnnotations }} - annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }} -{{- end }} - name: {{ include "ingress-nginx.controller.fullname" . }} -data: -{{- if .Values.controller.addHeaders }} - add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers -{{- end }} -{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} - proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers -{{- end }} -{{- if .Values.controller.config }} - {{ toYaml .Values.controller.config | nindent 2 }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-daemonset.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-daemonset.yaml deleted file mode 100755 index 4c6a1e2..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-daemonset.yaml +++ /dev/null @@ -1,252 +0,0 @@ -{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}} -{{- include "isControllerTagValid" . -}} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - {{- with .Values.controller.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - name: {{ include "ingress-nginx.controller.fullname" . }} - {{- if .Values.controller.annotations }} - annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} - {{- end }} -spec: - selector: - matchLabels: - {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} - app.kubernetes.io/component: controller - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} - {{- if .Values.controller.updateStrategy }} - updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }} - {{- end }} - minReadySeconds: {{ .Values.controller.minReadySeconds }} - template: - metadata: - {{- if .Values.controller.podAnnotations }} - annotations: {{ toYaml .Values.controller.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} - app.kubernetes.io/component: controller - {{- if .Values.controller.podLabels }} - {{- toYaml .Values.controller.podLabels | nindent 8 }} - {{- end }} - spec: - {{- if .Values.controller.dnsConfig }} - dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} - {{- end }} - dnsPolicy: {{ .Values.controller.dnsPolicy }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} - {{- end }} - {{- if .Values.controller.priorityClassName }} - priorityClassName: {{ .Values.controller.priorityClassName }} - {{- end }} - {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} - securityContext: - {{- end }} - {{- if .Values.controller.podSecurityContext }} - {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} - {{- end }} - {{- if .Values.controller.sysctls }} - sysctls: - {{- range $sysctl, $value := .Values.controller.sysctls }} - - name: {{ $sysctl | quote }} - value: {{ $value | quote }} - {{- end }} - {{- end }} - containers: - - name: controller - {{- with .Values.controller.image }} - image: "{{ template "system_default_registry" . }}{{.repository}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" - {{- end }} - imagePullPolicy: {{ .Values.controller.image.pullPolicy }} - {{- if .Values.controller.lifecycle }} - lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} - {{- end }} - args: - - /nginx-ingress-controller - {{- if .Values.defaultBackend.enabled }} - - --default-backend-service={{ .Release.Namespace }}/{{ include "ingress-nginx.defaultBackend.fullname" . }} - {{- end }} - {{- if .Values.controller.publishService.enabled }} - - --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} - {{- end }} - - --election-id={{ .Values.controller.electionID }} - - --ingress-class={{ .Values.controller.ingressClass }} - - --configmap={{ .Release.Namespace }}/{{ include "ingress-nginx.controller.fullname" . }} - {{- if .Values.tcp }} - - --tcp-services-configmap={{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-tcp - {{- end }} - {{- if .Values.udp }} - - --udp-services-configmap={{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-udp - {{- end }} - {{- if .Values.controller.scope.enabled }} - - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} - {{- end }} - {{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} - - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} - {{- end }} - {{- if .Values.controller.admissionWebhooks.enabled }} - - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} - - --validating-webhook-certificate=/usr/local/certificates/cert - - --validating-webhook-key=/usr/local/certificates/key - {{- end }} - {{- if .Values.controller.maxmindLicenseKey }} - - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} - {{- end }} - {{- if not (eq .Values.controller.healthCheckPath "/healthz") }} - - --health-check-path={{ .Values.controller.healthCheckPath }} - {{- end }} - {{- range $key, $value := .Values.controller.extraArgs }} - {{- /* Accept keys without values or with false as value */}} - {{- if eq ($value | quote | len) 2 }} - - --{{ $key }} - {{- else }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- end }} - securityContext: - capabilities: - drop: - - ALL - add: - - NET_BIND_SERVICE - runAsUser: {{ .Values.controller.image.runAsUser }} - allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - {{- if .Values.controller.enableMimalloc }} - - name: LD_PRELOAD - value: /usr/local/lib/libmimalloc.so - {{- end }} - {{- if .Values.controller.extraEnvs }} - {{- toYaml .Values.controller.extraEnvs | nindent 12 }} - {{- end }} - livenessProbe: - httpGet: - path: /healthz - port: {{ .Values.controller.livenessProbe.port }} - scheme: HTTP - initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} - readinessProbe: - httpGet: - path: /healthz - port: {{ .Values.controller.readinessProbe.port }} - scheme: HTTP - initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} - ports: - {{- range $key, $value := .Values.controller.containerPort }} - - name: {{ $key }} - containerPort: {{ $value }} - protocol: TCP - {{- if $.Values.controller.hostPort.enabled }} - hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} - {{- end }} - {{- end }} - {{- if .Values.controller.metrics.enabled }} - - name: metrics - containerPort: {{ .Values.controller.metrics.port }} - protocol: TCP - {{- end }} - {{- if .Values.controller.admissionWebhooks.enabled }} - - name: webhook - containerPort: {{ .Values.controller.admissionWebhooks.port }} - protocol: TCP - {{- end }} - {{- range $key, $value := .Values.tcp }} - - name: {{ $key }}-tcp - containerPort: {{ $key }} - protocol: TCP - {{- if $.Values.controller.hostPort.enabled }} - hostPort: {{ $key }} - {{- end }} - {{- end }} - {{- range $key, $value := .Values.udp }} - - name: {{ $key }}-udp - containerPort: {{ $key }} - protocol: UDP - {{- if $.Values.controller.hostPort.enabled }} - hostPort: {{ $key }} - {{- end }} - {{- end }} - {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} - volumeMounts: - {{- if .Values.controller.customTemplate.configMapName }} - - mountPath: /etc/nginx/template - name: nginx-template-volume - readOnly: true - {{- end }} - {{- if .Values.controller.admissionWebhooks.enabled }} - - name: webhook-cert - mountPath: /usr/local/certificates/ - readOnly: true - {{- end }} - {{- if .Values.controller.extraVolumeMounts }} - {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} - {{- end }} - {{- end }} - {{- if .Values.controller.resources }} - resources: {{ toYaml .Values.controller.resources | nindent 12 }} - {{- end }} - {{- if .Values.controller.extraContainers }} - {{ toYaml .Values.controller.extraContainers | nindent 8 }} - {{- end }} - {{- if .Values.controller.extraInitContainers }} - initContainers: {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} - {{- end }} - {{- if .Values.controller.hostNetwork }} - hostNetwork: {{ .Values.controller.hostNetwork }} - {{- end }} - {{- if .Values.controller.nodeSelector }} - nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} - {{- end }} - {{- if .Values.controller.tolerations }} - tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} - {{- end }} - {{- if .Values.controller.affinity }} - affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} - {{- end }} - {{- if .Values.controller.topologySpreadConstraints }} - topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} - {{- end }} - serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} - terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} - {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} - volumes: - {{- if .Values.controller.customTemplate.configMapName }} - - name: nginx-template-volume - configMap: - name: {{ .Values.controller.customTemplate.configMapName }} - items: - - key: {{ .Values.controller.customTemplate.configMapKey }} - path: nginx.tmpl - {{- end }} - {{- if .Values.controller.admissionWebhooks.enabled }} - - name: webhook-cert - secret: - secretName: {{ include "ingress-nginx.fullname" . }}-admission - {{- end }} - {{- if .Values.controller.extraVolumes }} - {{ toYaml .Values.controller.extraVolumes | nindent 8 }} - {{- end }} - {{- end }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-deployment.yaml deleted file mode 100755 index f0b7afd..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-deployment.yaml +++ /dev/null @@ -1,256 +0,0 @@ -{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}} -{{- include "isControllerTagValid" . -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - {{- with .Values.controller.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - name: {{ include "ingress-nginx.controller.fullname" . }} - {{- if .Values.controller.annotations }} - annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} - {{- end }} -spec: - selector: - matchLabels: - {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} - app.kubernetes.io/component: controller - {{- if not .Values.controller.autoscaling.enabled }} - replicas: {{ .Values.controller.replicaCount }} - {{- end }} - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} - {{- if .Values.controller.updateStrategy }} - strategy: - {{ toYaml .Values.controller.updateStrategy | nindent 4 }} - {{- end }} - minReadySeconds: {{ .Values.controller.minReadySeconds }} - template: - metadata: - {{- if .Values.controller.podAnnotations }} - annotations: {{ toYaml .Values.controller.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} - app.kubernetes.io/component: controller - {{- if .Values.controller.podLabels }} - {{- toYaml .Values.controller.podLabels | nindent 8 }} - {{- end }} - spec: - {{- if .Values.controller.dnsConfig }} - dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} - {{- end }} - dnsPolicy: {{ .Values.controller.dnsPolicy }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} - {{- end }} - {{- if .Values.controller.priorityClassName }} - priorityClassName: {{ .Values.controller.priorityClassName }} - {{- end }} - {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} - securityContext: - {{- end }} - {{- if .Values.controller.podSecurityContext }} - {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} - {{- end }} - {{- if .Values.controller.sysctls }} - sysctls: - {{- range $sysctl, $value := .Values.controller.sysctls }} - - name: {{ $sysctl | quote }} - value: {{ $value | quote }} - {{- end }} - {{- end }} - containers: - - name: controller - {{- with .Values.controller.image }} - image: "{{ template "system_default_registry" . }}{{.repository}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" - {{- end }} - imagePullPolicy: {{ .Values.controller.image.pullPolicy }} - {{- if .Values.controller.lifecycle }} - lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} - {{- end }} - args: - - /nginx-ingress-controller - {{- if .Values.defaultBackend.enabled }} - - --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }} - {{- end }} - {{- if .Values.controller.publishService.enabled }} - - --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} - {{- end }} - - --election-id={{ .Values.controller.electionID }} - - --ingress-class={{ .Values.controller.ingressClass }} - - --configmap=$(POD_NAMESPACE)/{{ include "ingress-nginx.controller.fullname" . }} - {{- if .Values.tcp }} - - --tcp-services-configmap=$(POD_NAMESPACE)/{{ include "ingress-nginx.fullname" . }}-tcp - {{- end }} - {{- if .Values.udp }} - - --udp-services-configmap=$(POD_NAMESPACE)/{{ include "ingress-nginx.fullname" . }}-udp - {{- end }} - {{- if .Values.controller.scope.enabled }} - - --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }} - {{- end }} - {{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} - - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} - {{- end }} - {{- if .Values.controller.admissionWebhooks.enabled }} - - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} - - --validating-webhook-certificate=/usr/local/certificates/cert - - --validating-webhook-key=/usr/local/certificates/key - {{- end }} - {{- if .Values.controller.maxmindLicenseKey }} - - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} - {{- end }} - {{- if not (eq .Values.controller.healthCheckPath "/healthz") }} - - --health-check-path={{ .Values.controller.healthCheckPath }} - {{- end }} - {{- range $key, $value := .Values.controller.extraArgs }} - {{- /* Accept keys without values or with false as value */}} - {{- if eq ($value | quote | len) 2 }} - - --{{ $key }} - {{- else }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- end }} - securityContext: - capabilities: - drop: - - ALL - add: - - NET_BIND_SERVICE - runAsUser: {{ .Values.controller.image.runAsUser }} - allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - {{- if .Values.controller.enableMimalloc }} - - name: LD_PRELOAD - value: /usr/local/lib/libmimalloc.so - {{- end }} - {{- if .Values.controller.extraEnvs }} - {{- toYaml .Values.controller.extraEnvs | nindent 12 }} - {{- end }} - livenessProbe: - httpGet: - path: {{ .Values.controller.healthCheckPath }} - port: {{ .Values.controller.livenessProbe.port }} - scheme: HTTP - initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} - readinessProbe: - httpGet: - path: {{ .Values.controller.healthCheckPath }} - port: {{ .Values.controller.readinessProbe.port }} - scheme: HTTP - initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} - ports: - {{- range $key, $value := .Values.controller.containerPort }} - - name: {{ $key }} - containerPort: {{ $value }} - protocol: TCP - {{- if $.Values.controller.hostPort.enabled }} - hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} - {{- end }} - {{- end }} - {{- if .Values.controller.metrics.enabled }} - - name: metrics - containerPort: {{ .Values.controller.metrics.port }} - protocol: TCP - {{- end }} - {{- if .Values.controller.admissionWebhooks.enabled }} - - name: webhook - containerPort: {{ .Values.controller.admissionWebhooks.port }} - protocol: TCP - {{- end }} - {{- range $key, $value := .Values.tcp }} - - name: {{ $key }}-tcp - containerPort: {{ $key }} - protocol: TCP - {{- if $.Values.controller.hostPort.enabled }} - hostPort: {{ $key }} - {{- end }} - {{- end }} - {{- range $key, $value := .Values.udp }} - - name: {{ $key }}-udp - containerPort: {{ $key }} - protocol: UDP - {{- if $.Values.controller.hostPort.enabled }} - hostPort: {{ $key }} - {{- end }} - {{- end }} - {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} - volumeMounts: - {{- if .Values.controller.customTemplate.configMapName }} - - mountPath: /etc/nginx/template - name: nginx-template-volume - readOnly: true - {{- end }} - {{- if .Values.controller.admissionWebhooks.enabled }} - - name: webhook-cert - mountPath: /usr/local/certificates/ - readOnly: true - {{- end }} - {{- if .Values.controller.extraVolumeMounts }} - {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} - {{- end }} - {{- end }} - {{- if .Values.controller.resources }} - resources: {{ toYaml .Values.controller.resources | nindent 12 }} - {{- end }} - {{- if .Values.controller.extraContainers }} - {{ toYaml .Values.controller.extraContainers | nindent 8 }} - {{- end }} - {{- if .Values.controller.extraInitContainers }} - initContainers: {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} - {{- end }} - {{- if .Values.controller.hostNetwork }} - hostNetwork: {{ .Values.controller.hostNetwork }} - {{- end }} - {{- if .Values.controller.nodeSelector }} - nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} - {{- end }} - {{- if .Values.controller.tolerations }} - tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} - {{- end }} - {{- if .Values.controller.affinity }} - affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} - {{- end }} - {{- if .Values.controller.topologySpreadConstraints }} - topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} - {{- end }} - serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} - terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} - {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} - volumes: - {{- if .Values.controller.customTemplate.configMapName }} - - name: nginx-template-volume - configMap: - name: {{ .Values.controller.customTemplate.configMapName }} - items: - - key: {{ .Values.controller.customTemplate.configMapKey }} - path: nginx.tmpl - {{- end }} - {{- if .Values.controller.admissionWebhooks.enabled }} - - name: webhook-cert - secret: - secretName: {{ include "ingress-nginx.fullname" . }}-admission - {{- end }} - {{- if .Values.controller.extraVolumes }} - {{ toYaml .Values.controller.extraVolumes | nindent 8 }} - {{- end }} - {{- end }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-hpa.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-hpa.yaml deleted file mode 100755 index 4923cf8..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-hpa.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} -apiVersion: autoscaling/v2beta2 -kind: HorizontalPodAutoscaler -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - name: {{ include "ingress-nginx.controller.fullname" . }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "ingress-nginx.controller.fullname" . }} - minReplicas: {{ .Values.controller.autoscaling.minReplicas }} - maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} - metrics: - {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ . }} - {{- end }} - {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - type: Utilization - averageUtilization: {{ . }} - {{- end }} - {{- with .Values.controller.autoscalingTemplate }} -{{- toYaml . | nindent 2 }} - {{- end }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-poddisruptionbudget.yaml deleted file mode 100755 index 9dc8789..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-poddisruptionbudget.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (gt (.Values.controller.replicaCount | int) 1) -}} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - name: {{ include "ingress-nginx.controller.fullname" . }} -spec: - selector: - matchLabels: - {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} - app.kubernetes.io/component: controller - minAvailable: {{ .Values.controller.minAvailable }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-prometheusrules.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-prometheusrules.yaml deleted file mode 100755 index c0b7e89..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-prometheusrules.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.prometheusRule.enabled -}} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ include "ingress-nginx.controller.fullname" . }} -{{- if .Values.controller.metrics.prometheusRule.namespace }} - namespace: {{ .Values.controller.metrics.prometheusRule.namespace }} -{{- end }} - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} - {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }} - {{- end }} -spec: -{{- if .Values.controller.metrics.prometheusRule.rules }} - groups: - - name: {{ template "ingress-nginx.name" . }} - rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }} -{{- end }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-psp.yaml deleted file mode 100755 index bcf588c..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-psp.yaml +++ /dev/null @@ -1,86 +0,0 @@ -{{- if .Values.podSecurityPolicy.enabled -}} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "ingress-nginx.fullname" . }} - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller -spec: - allowedCapabilities: - - NET_BIND_SERVICE -{{- if .Values.controller.sysctls }} - allowedUnsafeSysctls: - {{- range $sysctl, $value := .Values.controller.sysctls }} - - {{ $sysctl }} - {{- end }} -{{- end }} - privileged: false - allowPrivilegeEscalation: true - # Allow core volume types. - volumes: - - 'configMap' - #- 'emptyDir' - #- 'projected' - - 'secret' - #- 'downwardAPI' -{{- if .Values.controller.hostNetwork }} - hostNetwork: {{ .Values.controller.hostNetwork }} -{{- end }} -{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }} - hostPorts: -{{- if .Values.controller.hostNetwork }} -{{- range $key, $value := .Values.controller.containerPort }} - # {{ $key }} - - min: {{ $value }} - max: {{ $value }} -{{- end }} -{{- else if .Values.controller.hostPort.enabled }} -{{- range $key, $value := .Values.controller.hostPort.ports }} - # {{ $key }} - - min: {{ $value }} - max: {{ $value }} -{{- end }} -{{- end }} -{{- if .Values.controller.metrics.enabled }} - # metrics - - min: {{ .Values.controller.metrics.port }} - max: {{ .Values.controller.metrics.port }} -{{- end }} -{{- if .Values.controller.admissionWebhooks.enabled }} - # admission webhooks - - min: {{ .Values.controller.admissionWebhooks.port }} - max: {{ .Values.controller.admissionWebhooks.port }} -{{- end }} -{{- range $key, $value := .Values.tcp }} - # {{ $key }}-tcp - - min: {{ $key }} - max: {{ $key }} -{{- end }} -{{- range $key, $value := .Values.udp }} - # {{ $key }}-udp - - min: {{ $key }} - max: {{ $key }} -{{- end }} -{{- end }} - hostIPC: false - hostPID: false - runAsUser: - # Require the container to run without root privileges. - rule: 'MustRunAsNonRoot' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - seLinux: - rule: 'RunAsAny' -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-role.yaml deleted file mode 100755 index f2e3927..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-role.yaml +++ /dev/null @@ -1,96 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - name: {{ include "ingress-nginx.fullname" . }} -rules: - - apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - endpoints - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - update - - watch - - apiGroups: - - extensions - - "networking.k8s.io" # k8s 1.14+ - resources: - - ingresses - verbs: - - get - - list - - watch - - apiGroups: - - extensions - - "networking.k8s.io" # k8s 1.14+ - resources: - - ingresses/status - verbs: - - update - - apiGroups: - - "networking.k8s.io" # k8s 1.14+ - resources: - - ingressclasses - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - configmaps - resourceNames: - - {{ .Values.controller.electionID }}-{{ .Values.controller.ingressClass }} - verbs: - - get - - update - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - apiGroups: - - "" - resources: - - endpoints - verbs: - - create - - get - - update - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -{{- if .Values.podSecurityPolicy.enabled }} - - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ include "ingress-nginx.fullname" . }}] -{{- end }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-rolebinding.yaml deleted file mode 100755 index 5031350..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-rolebinding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - name: {{ include "ingress-nginx.fullname" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ include "ingress-nginx.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "ingress-nginx.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-internal.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-internal.yaml deleted file mode 100755 index 0bdae23..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-internal.yaml +++ /dev/null @@ -1,44 +0,0 @@ -{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}} -apiVersion: v1 -kind: Service -metadata: - annotations: - {{- range $key, $value := .Values.controller.service.internal.annotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - {{- if .Values.controller.service.labels }} - {{- toYaml .Values.controller.service.labels | nindent 4 }} - {{- end }} - name: {{ include "ingress-nginx.controller.fullname" . }}-internal -spec: - type: "{{ .Values.controller.service.type }}" -{{- if .Values.controller.service.internal.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }} -{{- end }} - ports: - {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} - {{- if .Values.controller.service.enableHttp }} - - name: http - port: {{ .Values.controller.service.ports.http }} - protocol: TCP - targetPort: {{ .Values.controller.service.targetPorts.http }} - {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} - nodePort: {{ .Values.controller.service.nodePorts.http }} - {{- end }} - {{- end }} - {{- if .Values.controller.service.enableHttps }} - - name: https - port: {{ .Values.controller.service.ports.https }} - protocol: TCP - targetPort: {{ .Values.controller.service.targetPorts.https }} - {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} - nodePort: {{ .Values.controller.service.nodePorts.https }} - {{- end }} - {{- end }} - selector: - {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} - app.kubernetes.io/component: controller -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-metrics.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-metrics.yaml deleted file mode 100755 index b01f460..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-metrics.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{- if .Values.controller.metrics.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.controller.metrics.service.annotations }} - annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }} -{{- end }} - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - {{- if .Values.controller.metrics.service.labels }} - {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }} - {{- end }} - name: {{ include "ingress-nginx.controller.fullname" . }}-metrics -spec: - type: {{ .Values.controller.metrics.service.type }} -{{- if .Values.controller.metrics.service.clusterIP }} - clusterIP: {{ .Values.controller.metrics.service.clusterIP }} -{{- end }} -{{- if .Values.controller.metrics.service.externalIPs }} - externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }} -{{- end }} -{{- if .Values.controller.metrics.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }} -{{- end }} -{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }} -{{- end }} -{{- if .Values.controller.metrics.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }} -{{- end }} - ports: - - name: metrics - port: {{ .Values.controller.metrics.service.servicePort }} - targetPort: metrics - {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }} - {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }} - nodePort: {{ .Values.controller.metrics.service.nodePort }} - {{- end }} - selector: - {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} - app.kubernetes.io/component: controller -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-webhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-webhook.yaml deleted file mode 100755 index 7a4dd51..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service-webhook.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if .Values.controller.admissionWebhooks.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.controller.admissionWebhooks.service.annotations }} - annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }} -{{- end }} - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - name: {{ include "ingress-nginx.controller.fullname" . }}-admission -spec: - type: {{ .Values.controller.admissionWebhooks.service.type }} -{{- if .Values.controller.admissionWebhooks.service.clusterIP }} - clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }} -{{- end }} -{{- if .Values.controller.admissionWebhooks.service.externalIPs }} - externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }} -{{- end }} -{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }} -{{- end }} -{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }} -{{- end }} - ports: - - name: https-webhook - port: 443 - targetPort: webhook - selector: - {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} - app.kubernetes.io/component: controller -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service.yaml deleted file mode 100755 index dce18c5..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-service.yaml +++ /dev/null @@ -1,83 +0,0 @@ -{{- if .Values.controller.service.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.controller.service.annotations }} - annotations: {{ toYaml .Values.controller.service.annotations | nindent 4 }} -{{- end }} - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - {{- if .Values.controller.service.labels }} - {{- toYaml .Values.controller.service.labels | nindent 4 }} - {{- end }} - name: {{ include "ingress-nginx.controller.fullname" . }} -spec: - type: {{ .Values.controller.service.type }} -{{- if .Values.controller.service.clusterIP }} - clusterIP: {{ .Values.controller.service.clusterIP }} -{{- end }} -{{- if .Values.controller.service.externalIPs }} - externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }} -{{- end }} -{{- if .Values.controller.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }} -{{- end }} -{{- if .Values.controller.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }} -{{- end }} -{{- if .Values.controller.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} -{{- end }} -{{- if .Values.controller.service.sessionAffinity }} - sessionAffinity: {{ .Values.controller.service.sessionAffinity }} -{{- end }} -{{- if .Values.controller.service.healthCheckNodePort }} - healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} -{{- end }} - ports: - {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} - {{- if .Values.controller.service.enableHttp }} - - name: http - port: {{ .Values.controller.service.ports.http }} - protocol: TCP - targetPort: {{ .Values.controller.service.targetPorts.http }} - {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} - nodePort: {{ .Values.controller.service.nodePorts.http }} - {{- end }} - {{- end }} - {{- if .Values.controller.service.enableHttps }} - - name: https - port: {{ .Values.controller.service.ports.https }} - protocol: TCP - targetPort: {{ .Values.controller.service.targetPorts.https }} - {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} - nodePort: {{ .Values.controller.service.nodePorts.https }} - {{- end }} - {{- end }} - {{- range $key, $value := .Values.tcp }} - - name: {{ $key }}-tcp - port: {{ $key }} - protocol: TCP - targetPort: {{ $key }}-tcp - {{- if $.Values.controller.service.nodePorts.tcp }} - {{- if index $.Values.controller.service.nodePorts.tcp $key }} - nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} - {{- end }} - {{- end }} - {{- end }} - {{- range $key, $value := .Values.udp }} - - name: {{ $key }}-udp - port: {{ $key }} - protocol: UDP - targetPort: {{ $key }}-udp - {{- if $.Values.controller.service.nodePorts.udp }} - {{- if index $.Values.controller.service.nodePorts.udp $key }} - nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} - {{- end }} - {{- end }} - {{- end }} - selector: - {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} - app.kubernetes.io/component: controller -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-serviceaccount.yaml deleted file mode 100755 index 4358507..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-serviceaccount.yaml +++ /dev/null @@ -1,9 +0,0 @@ -{{- if or .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - name: {{ template "ingress-nginx.serviceAccountName" . }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-servicemonitor.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-servicemonitor.yaml deleted file mode 100755 index 68b1c92..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/controller-servicemonitor.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "ingress-nginx.controller.fullname" . }} -{{- if .Values.controller.metrics.serviceMonitor.namespace }} - namespace: {{ .Values.controller.metrics.serviceMonitor.namespace }} -{{- end }} - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: controller - {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} - {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }} - {{- end }} -spec: - endpoints: - - port: metrics - interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} - {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} - honorLabels: true - {{- end }} - {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }} - metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }} - {{- end }} -{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} - namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }} -{{ else }} - namespaceSelector: - matchNames: - - {{ .Release.Namespace }} -{{- end }} -{{- if .Values.controller.metrics.serviceMonitor.targetLabels }} - targetLabels: - {{- range .Values.controller.metrics.serviceMonitor.targetLabels }} - - {{ . }} - {{- end }} -{{- end }} - selector: - matchLabels: - {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} - app.kubernetes.io/component: controller -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-deployment.yaml deleted file mode 100755 index 58bf7a6..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-deployment.yaml +++ /dev/null @@ -1,97 +0,0 @@ -{{- if .Values.defaultBackend.enabled -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: default-backend - name: {{ include "ingress-nginx.defaultBackend.fullname" . }} -spec: - selector: - matchLabels: - {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} - app.kubernetes.io/component: default-backend - replicas: {{ .Values.defaultBackend.replicaCount }} - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} - template: - metadata: - {{- if .Values.defaultBackend.podAnnotations }} - annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} - app.kubernetes.io/component: default-backend - {{- if .Values.defaultBackend.podLabels }} - {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }} - {{- end }} - spec: - {{- if .Values.imagePullSecrets }} - imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} - {{- end }} - {{- if .Values.defaultBackend.priorityClassName }} - priorityClassName: {{ .Values.defaultBackend.priorityClassName }} - {{- end }} - {{- if .Values.defaultBackend.podSecurityContext }} - securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }} - {{- end }} - containers: - - name: {{ template "ingress-nginx.name" . }}-default-backend - {{- with .Values.defaultBackend.image }} - image: "{{ template "system_default_registry" . }}{{.repository}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" - {{- end }} - imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} - {{- if .Values.defaultBackend.extraArgs }} - args: - {{- range $key, $value := .Values.defaultBackend.extraArgs }} - {{- /* Accept keys without values or with false as value */}} - {{- if eq ($value | quote | len) 2 }} - - --{{ $key }} - {{- else }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- end }} - {{- end }} - securityContext: - runAsUser: {{ .Values.defaultBackend.image.runAsUser }} - {{- if .Values.defaultBackend.extraEnvs }} - env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }} - {{- end }} - livenessProbe: - httpGet: - path: /healthz - port: {{ .Values.defaultBackend.port }} - scheme: HTTP - initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} - readinessProbe: - httpGet: - path: /healthz - port: {{ .Values.defaultBackend.port }} - scheme: HTTP - initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} - ports: - - name: http - containerPort: {{ .Values.defaultBackend.port }} - protocol: TCP - {{- if .Values.defaultBackend.resources }} - resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }} - {{- end }} - {{- if .Values.defaultBackend.nodeSelector }} - nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }} - {{- end }} - serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} - {{- if .Values.defaultBackend.tolerations }} - tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }} - {{- end }} - {{- if .Values.defaultBackend.affinity }} - affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }} - {{- end }} - terminationGracePeriodSeconds: 60 -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-poddisruptionbudget.yaml deleted file mode 100755 index b6c9c44..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-poddisruptionbudget.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if gt (.Values.defaultBackend.replicaCount | int) 1 -}} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: default-backend - name: {{ include "ingress-nginx.defaultBackend.fullname" . }} -spec: - selector: - matchLabels: - {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} - app.kubernetes.io/component: default-backend - minAvailable: {{ .Values.defaultBackend.minAvailable }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-psp.yaml deleted file mode 100755 index 055f434..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-psp.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "ingress-nginx.fullname" . }}-backend - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: default-backend -spec: - allowPrivilegeEscalation: false - fsGroup: - ranges: - - max: 65535 - min: 1 - rule: MustRunAs - requiredDropCapabilities: - - ALL - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - ranges: - - max: 65535 - min: 1 - rule: MustRunAs - volumes: - - configMap - - emptyDir - - projected - - secret - - downwardAPI -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-role.yaml deleted file mode 100755 index 23498de..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-role.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: default-backend - name: {{ include "ingress-nginx.fullname" . }}-backend -rules: - - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend] -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-rolebinding.yaml deleted file mode 100755 index 45558aa..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-rolebinding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: default-backend - name: {{ include "ingress-nginx.fullname" . }}-backend -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ include "ingress-nginx.fullname" . }}-backend -subjects: - - kind: ServiceAccount - name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-service.yaml deleted file mode 100755 index e74714d..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-service.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if .Values.defaultBackend.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.defaultBackend.service.annotations }} - annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }} -{{- end }} - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: default-backend - name: {{ include "ingress-nginx.defaultBackend.fullname" . }} -spec: - type: {{ .Values.defaultBackend.service.type }} -{{- if .Values.defaultBackend.service.clusterIP }} - clusterIP: {{ .Values.defaultBackend.service.clusterIP }} -{{- end }} -{{- if .Values.defaultBackend.service.externalIPs }} - externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }} -{{- end }} -{{- if .Values.defaultBackend.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }} -{{- end }} -{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }} -{{- end }} - ports: - - name: http - port: {{ .Values.defaultBackend.service.servicePort }} - protocol: TCP - targetPort: http - selector: - {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} - app.kubernetes.io/component: default-backend -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-serviceaccount.yaml deleted file mode 100755 index 96419cf..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/templates/default-backend-serviceaccount.yaml +++ /dev/null @@ -1,9 +0,0 @@ -{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "ingress-nginx.labels" . | nindent 4 }} - app.kubernetes.io/component: default-backend - name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} -{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/values.yaml deleted file mode 100755 index ef766cc..0000000 --- a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.001/values.yaml +++ /dev/null @@ -1,666 +0,0 @@ -## nginx configuration -## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md -## -controller: - image: - repository: rancher/nginx-ingress-controller - tag: "nginx-0.35.0-rancher2" - digest: sha256:fc4979d8b8443a831c9789b5155cded454cb7de737a8b727bc2ba0106d2eae8b - pullPolicy: IfNotPresent - # www-data -> uid 101 - runAsUser: 101 - allowPrivilegeEscalation: true - - # Configures the ports the nginx-controller listens on - containerPort: - http: 80 - https: 443 - - # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ - config: {} - - ## Annotations to be added to the controller config configuration configmap - ## - configAnnotations: {} - - # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers - proxySetHeaders: {} - - # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers - addHeaders: {} - - # Optionally customize the pod dnsConfig. - dnsConfig: {} - - # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. - # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller - # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. - dnsPolicy: ClusterFirstWithHostNet - - # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network - # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply - reportNodeInternalIp: false - - # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), - # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 - # is merged - hostNetwork: true - - ## Use host ports 80 and 443 - ## Disabled by default - ## - hostPort: - enabled: false - ports: - http: 80 - https: 443 - - ## Election ID to use for status update - ## - electionID: ingress-controller-leader - - ## Name of the ingress class to route through this controller - ## - ingressClass: nginx - - # labels to add to the pod container metadata - podLabels: {} - # key: value - - ## Security Context policies for controller pods - ## - podSecurityContext: {} - - ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for - ## notes on enabling and using sysctls - ### - sysctls: {} - # sysctls: - # "net.core.somaxconn": "8192" - - ## Allows customization of the source of the IP address or FQDN to report - ## in the ingress status field. By default, it reads the information provided - ## by the service. If disable, the status field reports the IP address of the - ## node or nodes where an ingress controller pod is running. - publishService: - enabled: true - ## Allows overriding of the publish service to bind to - ## Must be / - ## - pathOverride: "" - - ## Limit the scope of the controller - ## - scope: - enabled: false - namespace: "" # defaults to .Release.Namespace - - ## Allows customization of the configmap / nginx-configmap namespace - ## - configMapNamespace: "" # defaults to .Release.Namespace - - ## Allows customization of the tcp-services-configmap - ## - tcp: - configMapNamespace: "" # defaults to .Release.Namespace - ## Annotations to be added to the tcp config configmap - annotations: {} - - ## Allows customization of the udp-services-configmap - ## - udp: - configMapNamespace: "" # defaults to .Release.Namespace - ## Annotations to be added to the udp config configmap - annotations: {} - - ## Additional command line arguments to pass to nginx-ingress-controller - ## E.g. to specify the default SSL certificate you can use - ## extraArgs: - ## default-ssl-certificate: "/" - extraArgs: {} - - ## Additional environment variables to set - extraEnvs: [] - # extraEnvs: - # - name: FOO - # valueFrom: - # secretKeyRef: - # key: FOO - # name: secret-resource - - ## DaemonSet or Deployment - ## - kind: Deployment - - ## Annotations to be added to the controller Deployment or DaemonSet - ## - annotations: {} - # keel.sh/pollSchedule: "@every 60m" - - ## Labels to be added to the controller Deployment or DaemonSet - ## - labels: {} - # keel.sh/policy: patch - # keel.sh/trigger: poll - - - # The update strategy to apply to the Deployment or DaemonSet - ## - updateStrategy: {} - # rollingUpdate: - # maxUnavailable: 1 - # type: RollingUpdate - - # minReadySeconds to avoid killing pods before we are ready - ## - minReadySeconds: 0 - - - ## Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Affinity and anti-affinity - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - # # An example of preferred pod anti-affinity, weight is in the range 1-100 - # podAntiAffinity: - # preferredDuringSchedulingIgnoredDuringExecution: - # - weight: 100 - # podAffinityTerm: - # labelSelector: - # matchExpressions: - # - key: app.kubernetes.io/name - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/instance - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/component - # operator: In - # values: - # - controller - # topologyKey: kubernetes.io/hostname - - # # An example of required pod anti-affinity - # podAntiAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # - labelSelector: - # matchExpressions: - # - key: app.kubernetes.io/name - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/instance - # operator: In - # values: - # - ingress-nginx - # - key: app.kubernetes.io/component - # operator: In - # values: - # - controller - # topologyKey: "kubernetes.io/hostname" - - ## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: failure-domain.beta.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - # labelSelector: - # matchLabels: - # app.kubernetes.io/instance: ingress-nginx-internal - - ## terminationGracePeriodSeconds - ## wait up to five minutes for the drain of connections - ## - terminationGracePeriodSeconds: 300 - - ## Node labels for controller pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Liveness and readiness probe values - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## - livenessProbe: - failureThreshold: 5 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - port: 10254 - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - port: 10254 - - # Path of the health check endpoint. All requests received on the port defined by - # the healthz-port parameter are forwarded internally to this path. - healthCheckPath: "/healthz" - - ## Annotations to be added to controller pods - ## - podAnnotations: {} - - replicaCount: 1 - - minAvailable: 1 - - # Define requests resources to avoid probe issues due to CPU utilization in busy nodes - # ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 - # Ideally, there should be no limits. - # https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ - resources: - # limits: - # cpu: 100m - # memory: 90Mi - requests: - cpu: 100m - memory: 90Mi - - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 11 - targetCPUUtilizationPercentage: 50 - targetMemoryUtilizationPercentage: 50 - - autoscalingTemplate: [] - # Custom or additional autoscaling metrics - # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics - # - type: Pods - # pods: - # metric: - # name: nginx_ingress_controller_nginx_process_requests_total - # target: - # type: AverageValue - # averageValue: 10000m - - ## Enable mimalloc as a drop-in replacement for malloc. - ## ref: https://github.com/microsoft/mimalloc - ## - enableMimalloc: true - - ## Override NGINX template - customTemplate: - configMapName: "" - configMapKey: "" - - service: - enabled: false - - annotations: {} - labels: {} - # clusterIP: "" - - ## List of IP addresses at which the controller services are available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - # loadBalancerIP: "" - loadBalancerSourceRanges: [] - - enableHttp: true - enableHttps: true - - ## Set external traffic policy to: "Local" to preserve source IP on - ## providers supporting it - ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer - # externalTrafficPolicy: "" - - # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". - # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - # sessionAffinity: "" - - # specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, - # the service controller allocates a port from your cluster’s NodePort range. - # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - # healthCheckNodePort: 0 - - ports: - http: 80 - https: 443 - - targetPorts: - http: http - https: https - - type: LoadBalancer - - # type: NodePort - # nodePorts: - # http: 32080 - # https: 32443 - # tcp: - # 8080: 32808 - nodePorts: - http: "" - https: "" - tcp: {} - udp: {} - - ## Enables an additional internal load balancer (besides the external one). - ## Annotations are mandatory for the load balancer to come up. Varies with the cloud service. - internal: - enabled: false - annotations: {} - - ## Set external traffic policy to: "Local" to preserve source IP on - ## providers supporting it - ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer - # externalTrafficPolicy: "" - - extraContainers: [] - ## Additional containers to be added to the controller pod. - ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. - # - name: my-sidecar - # image: nginx:latest - # - name: lemonldap-ng-controller - # image: lemonldapng/lemonldap-ng-controller:0.2.0 - # args: - # - /lemonldap-ng-controller - # - --alsologtostderr - # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration - # env: - # - name: POD_NAME - # valueFrom: - # fieldRef: - # fieldPath: metadata.name - # - name: POD_NAMESPACE - # valueFrom: - # fieldRef: - # fieldPath: metadata.namespace - # volumeMounts: - # - name: copy-portal-skins - # mountPath: /srv/var/lib/lemonldap-ng/portal/skins - - extraVolumeMounts: [] - ## Additional volumeMounts to the controller main container. - # - name: copy-portal-skins - # mountPath: /var/lib/lemonldap-ng/portal/skins - - extraVolumes: [] - ## Additional volumes to the controller pod. - # - name: copy-portal-skins - # emptyDir: {} - - extraInitContainers: [] - ## Containers, which are run before the app containers are started. - # - name: init-myservice - # image: busybox - # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] - - admissionWebhooks: - enabled: true - failurePolicy: Fail - port: 8443 - - service: - annotations: {} - # clusterIP: "" - externalIPs: [] - # loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 443 - type: ClusterIP - - patch: - enabled: true - image: - repository: docker.io/jettech/kube-webhook-certgen - tag: v1.3.0 - pullPolicy: IfNotPresent - ## Provide a priority class name to the webhook patching job - ## - priorityClassName: "" - podAnnotations: {} - nodeSelector: {} - tolerations: [] - runAsUser: 2000 - - metrics: - port: 10254 - # if this port is changed, change healthz-port: in extraArgs: accordingly - enabled: false - - service: - annotations: {} - # prometheus.io/scrape: "true" - # prometheus.io/port: "10254" - - # clusterIP: "" - - ## List of IP addresses at which the stats-exporter service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - # loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 9913 - type: ClusterIP - # externalTrafficPolicy: "" - # nodePort: "" - - serviceMonitor: - enabled: false - additionalLabels: {} - namespace: "" - namespaceSelector: {} - # Default: scrape .Release.Namespace only - # To scrape all, use the following: - # namespaceSelector: - # any: true - scrapeInterval: 30s - # honorLabels: true - targetLabels: [] - metricRelabelings: [] - - prometheusRule: - enabled: false - additionalLabels: {} - # namespace: "" - rules: [] - # # These are just examples rules, please adapt them to your needs - # - alert: NGINXConfigFailed - # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 - # for: 1s - # labels: - # severity: critical - # annotations: - # description: bad ingress config - nginx config test failed - # summary: uninstall the latest ingress changes to allow config reloads to resume - # - alert: NGINXCertificateExpiry - # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 - # for: 1s - # labels: - # severity: critical - # annotations: - # description: ssl certificate(s) will expire in less then a week - # summary: renew expiring certificates to avoid downtime - # - alert: NGINXTooMany500s - # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 - # for: 1m - # labels: - # severity: warning - # annotations: - # description: Too many 5XXs - # summary: More than 5% of all requests returned 5XX, this requires your attention - # - alert: NGINXTooMany400s - # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 - # for: 1m - # labels: - # severity: warning - # annotations: - # description: Too many 4XXs - # summary: More than 5% of all requests returned 4XX, this requires your attention - - ## Improve connection draining when ingress controller pod is deleted using a lifecycle hook: - ## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds - ## to 300, allowing the draining of connections up to five minutes. - ## If the active connections end before that, the pod will terminate gracefully at that time. - ## To effectively take advantage of this feature, the Configmap feature - ## worker-shutdown-timeout new value is 240s instead of 10s. - ## - lifecycle: - preStop: - exec: - command: - - /wait-shutdown - - priorityClassName: "" - -## Rollback limit -## -revisionHistoryLimit: 10 - -# Maxmind license key to download GeoLite2 Databases -# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases -maxmindLicenseKey: "" - -## Default 404 backend -## -defaultBackend: - ## - enabled: false - - image: - repository: rancher/nginx-ingress-controller-defaultbackend - tag: "1.5-rancher1" - pullPolicy: IfNotPresent - # nobody user -> uid 65534 - runAsUser: 65534 - - extraArgs: {} - - serviceAccount: - create: true - name: - ## Additional environment variables to set for defaultBackend pods - extraEnvs: [] - - port: 8080 - - ## Readiness and liveness probes for default backend - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ - ## - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - readinessProbe: - failureThreshold: 6 - initialDelaySeconds: 0 - periodSeconds: 5 - successThreshold: 1 - timeoutSeconds: 5 - - ## Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - affinity: {} - - ## Security Context policies for controller pods - ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for - ## notes on enabling and using sysctls - ## - podSecurityContext: {} - - # labels to add to the pod container metadata - podLabels: {} - # key: value - - ## Node labels for default backend pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Annotations to be added to default backend pods - ## - podAnnotations: {} - - replicaCount: 1 - - minAvailable: 1 - - resources: {} - # limits: - # cpu: 10m - # memory: 20Mi - # requests: - # cpu: 10m - # memory: 20Mi - - service: - annotations: {} - - # clusterIP: "" - - ## List of IP addresses at which the default backend service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - # loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 - type: ClusterIP - - priorityClassName: "" - -## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 -rbac: - create: true - scope: false - -# If true, create & use Pod Security Policy resources -# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ -podSecurityPolicy: - enabled: false - -serviceAccount: - create: true - name: - -## Optional array of imagePullSecrets containing private registry credentials -## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -imagePullSecrets: [] -# - name: secretName - -# TCP service key:value pairs -# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp -## -tcp: {} -# 8080: "default/example-tcp-svc:9000" - -# UDP service key:value pairs -# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp -## -udp: {} -# 53: "kube-system/kube-dns:53" - -global: - systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/Chart.yaml deleted file mode 100755 index 26ac286..0000000 --- a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -appVersion: v1.19.8 -description: Install Kube Proxy. -keywords: -- kube-proxy -maintainers: -- email: charts@rancher.com - name: Rancher Labs -name: rke2-kube-proxy -sources: -- https://github.com/rancher/rke2-charts -version: v1.19.801 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/NOTES.txt deleted file mode 100755 index 2da0e24..0000000 --- a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/NOTES.txt +++ /dev/null @@ -1,2 +0,0 @@ -Kube-proxy has been installed. - diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/_helpers.tpl deleted file mode 100755 index cb64d1f..0000000 --- a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/_helpers.tpl +++ /dev/null @@ -1,21 +0,0 @@ -{{- define "system_default_registry" -}} -{{- if .Values.global.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.systemDefaultRegistry -}} -{{- else -}} -{{- "" -}} -{{- end -}} -{{- end -}} -{{- define "rke2_data_dir" -}} -{{- if .Values.global.rke2DataDir -}} -{{- printf "%s" .Values.global.rke2DataDir -}} -{{- else -}} -{{- "/var/lib/rancher/rke2" -}} -{{- end -}} -{{- end -}} -{{- define "kubeproxy_kubeconfig" -}} -{{- if .Values.global.rke2DataDir -}} -{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} -{{- else -}} -{{- printf "%s" .Values.clientConnection.kubeconfig -}} -{{- end -}} -{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/config.yaml deleted file mode 100755 index 536a12a..0000000 --- a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/config.yaml +++ /dev/null @@ -1,69 +0,0 @@ ---- -apiVersion: v1 -data: - config.conf: |- - apiVersion: kubeproxy.config.k8s.io/v1alpha1 - bindAddress: {{ .Values.bindAddress | quote }} - clientConnection: - acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} - burst: {{ .Values.clientConnection.burst }} - contentType: {{ .Values.clientConnection.contentType | quote }} - kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} - qps: {{ .Values.clientConnection.qps }} - clusterCIDR: {{ .Values.clusterCIDR | quote }} - configSyncPeriod: {{ .Values.configSyncPeriod }} - conntrack: - maxPerCore: {{ .Values.conntrack.maxPerCore }} - min: {{ .Values.conntrack.min }} - tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} - tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} - detectLocalMode: {{ .Values.detectLocalMode | quote }} - enableProfiling: {{ .Values.enableProfiling | quote }} - healthzBindAddress: {{ .Values.healthzBindAddress | quote }} - hostnameOverride: {{ .Values.hostnameOverride | quote }} - iptables: - masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} - masqueradeBit: {{ .Values.iptables.masqueradeBit }} - {{ if .Values.iptables.minSyncPeriod }} - minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} - {{ end }} - syncPeriod: {{ .Values.iptables.syncPeriod }} - ipvs: - excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} - {{ if .Values.ipvs.minSyncPeriod }} - minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} - {{ end }} - scheduler: {{ .Values.ipvs.scheduler | quote }} - strictARP: {{ .Values.ipvs.strictARP | quote }} - syncPeriod: {{ .Values.ipvs.syncPeriod }} - {{ if .Values.ipvs.tcpFinTimeout }} - tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} - {{ end }} - {{ if .Values.ipvs.tcpTimeout }} - tcpTimeout: {{ .Values.ipvs.tcpTimeout }} - {{ end }} - {{ if .Values.ipvs.udpTimeout }} - udpTimeout: {{ .Values.ipvs.udpTimeout }} - {{ end }} - kind: KubeProxyConfiguration - metricsBindAddress: {{ .Values.metricsBindAddress | quote }} - mode: {{ .Values.proxy.mode | quote }} - nodePortAddresses: null - oomScoreAdj: {{ .Values.oomScoreAdj }} - portRange: {{ .Values.proxy.portRange | quote }} - showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} - udpIdleTimeout: {{ .Values.udpTimeout | quote }} - featureGates: - {{- range $key, $value := .Values.featureGates }} - {{ $key }}: {{ $value }} - {{- end }} - winkernel: - enableDSR: false - networkName: "" - sourceVip: "" -kind: ConfigMap -metadata: - labels: - app: kube-proxy - name: kube-proxy - namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/daemonset.yaml deleted file mode 100755 index 1267df8..0000000 --- a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/daemonset.yaml +++ /dev/null @@ -1,78 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-proxy - name: kube-proxy - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: kube-proxy - template: - metadata: - labels: - k8s-app: kube-proxy - spec: - containers: - - command: - - /usr/local/bin/kube-proxy - - --config=/var/lib/kube-proxy/config.conf - - --hostname-override=$(NODE_NAME) - env: - - name: NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} - imagePullPolicy: IfNotPresent - name: kube-proxy - securityContext: - privileged: true - volumeMounts: - - mountPath: /var/lib/kube-proxy - name: kube-proxy - - mountPath: {{ template "rke2_data_dir" . }}/agent - name: rke2config - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - - mountPath: /lib/modules - name: lib-modules - readOnly: true - dnsPolicy: ClusterFirst - hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - priorityClassName: system-node-critical - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: kube-proxy - serviceAccountName: kube-proxy - terminationGracePeriodSeconds: 30 - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - operator: Exists - volumes: - - hostPath: - path: {{ template "rke2_data_dir" . }}/agent - type: "" - name: rke2config - - configMap: - name: kube-proxy - name: kube-proxy - - hostPath: - path: /run/xtables.lock - type: FileOrCreate - name: xtables-lock - - hostPath: - path: /lib/modules - type: "" - name: lib-modules - updateStrategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/rbac.yaml deleted file mode 100755 index d98f84c..0000000 --- a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/rbac.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rke2:node-proxier -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:node-proxier -subjects: -- kind: ServiceAccount - name: kube-proxy - namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/serviceaccount.yaml deleted file mode 100755 index 59408a2..0000000 --- a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/templates/serviceaccount.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: ServiceAccount -apiVersion: v1 -metadata: - name: kube-proxy - namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/values.yaml deleted file mode 100755 index e362472..0000000 --- a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.801/values.yaml +++ /dev/null @@ -1,142 +0,0 @@ ---- - -# image for kubeproxy -image: - repository: rancher/hardened-kube-proxy - tag: v1.19.8 - -# The IP address for the proxy server to serve on -# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) -bindAddress: 0.0.0.0 - -# If true cleanup iptables and ipvs rules and exit. -cleanup: - -# The CIDR range of pods in the cluster. -# When configured, traffic sent to a Service cluster IP from outside this range -# will be masqueraded and traffic sent from pods to an external -# LoadBalancer IP will be directed to the respective cluster IP instead -clusterCIDR: 10.42.0.0/16 - -# The path to the configuration file. -config: - -# How often configuration from the apiserver is refreshed. Must be greater than 0. -configSyncPeriod: 15m0s - -conntrack: - # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). - maxPerCore: 32768 - # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). - min: 131072 - # NAT timeout for TCP connections in the CLOSE_WAIT state - tcpTimeoutCloseWait: 1h0m0s - # Idle timeout for established TCP connections (0 to leave as-is) - tcpTimeoutEstablished: 24h0m0s - -# Mode to use to detect local traffic -detectLocalMode: - -# A set of key=value pairs that describe feature gates for alpha/experimental features: -featureGates: - -# The IP address with port for the health check server to serve on -# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. -healthzBindAddress: 0.0.0.0:10256 - -# help for kube-proxy -help: - -# If non-empty, will use this string as identification instead of the actual hostname. -hostnameOverride: - -iptables: - # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) - masqueradeAll: - # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. - masqueradeBit: 14 - # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). - minSyncPeriod: - # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. - syncPeriod: 30s - -ipvs: - # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. - excludeCidrs: - # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). - minSyncPeriod: - # The ipvs scheduler type when proxy mode is ipvs - scheduler: - # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 - strictArp: - # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. - syncPeriod: 30s - # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). - tcpTimeout: - # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). - tcpfinTimeout: - # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). - udpTimeout: - - -clientConnection: - # Burst to use while talking with kubernetes apiserver - apiBurst: 10 - # Content type of requests sent to apiserver. - apiContentType: - # QPS to use while talking with kubernetes apiserver - qps: 5 - # Path to kubeconfig file with authorization information (the master location is set by the master flag). - kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig - -# Maximum number of seconds between log flushes -logFlushFrequency: 5s - - - -# The address of the Kubernetes API server (overrides any value in kubeconfig) -master: - -# The IP address with port for the metrics server to serve on -# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. -metricsBindAddress: 127.0.0.1:10249 - -# A string slice of values which specify the addresses to use for NodePorts. -# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. -nodeportAddresses: - -# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] -oomScoreAdj: -999 - -# If true enables profiling via web interface on /debug/pprof handler. -profiling: - -proxy: - # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. - # If blank, use the best-available proxy (currently iptables). - # If the iptables proxy is selected, regardless of how, but the system's - # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. - mode: -# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) -# that may be consumed in order to proxy service traffic. -# If (unspecified, 0, or 0-0) then ports will be randomly chosen. - portRange: - -# The previous version for which you want to show hidden metrics. -# Only the previous minor version is meaningful, other values will not be allowed. -# The format is ., e.g.: '1.16'. The purpose of this format is make -# sure you have the opportunity to notice if the next release hides additional metrics, -# rather than being surprised when they are permanently removed in the release after that. -showHiddenMetricsForVersion: - -# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). -# Must be greater than 0. Only applicable for proxy-mode=userspace -udpTimeout: 250ms - -# Print version information and quit -version: - -# If set, write the default configuration values to this file and exit. -writeConfigTo: -global: - systemDefaultRegistry: "" diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/.helmignore b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/.helmignore deleted file mode 100755 index 37ea1d7..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/.helmignore +++ /dev/null @@ -1,22 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -OWNERS -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/Chart.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/Chart.yaml deleted file mode 100755 index f1cebfe..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/Chart.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -appVersion: 0.3.6 -description: Metrics Server is a cluster-wide aggregator of resource usage data. -home: https://github.com/kubernetes-incubator/metrics-server -keywords: -- metrics-server -maintainers: -- email: o.with@sportradar.com - name: olemarkus -- email: k.aasan@sportradar.com - name: kennethaasan -name: rke2-metrics-server -sources: -- https://github.com/kubernetes-incubator/metrics-server -version: 2.11.100-build2021022301 diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/README.md b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/README.md deleted file mode 100755 index 678f084..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# metrics-server - -[Metrics Server](https://github.com/kubernetes-incubator/metrics-server) is a cluster-wide aggregator of resource usage data. Resource metrics are used by components like `kubectl top` and the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale) to scale workloads. To autoscale based upon a custom metric, see the [Prometheus Adapter chart](https://github.com/helm/charts/blob/master/stable/prometheus-adapter). - -## Configuration - -Parameter | Description | Default ---- | --- | --- -`rbac.create` | Enable Role-based authentication | `true` -`rbac.pspEnabled` | Enable pod security policy support | `false` -`serviceAccount.create` | If `true`, create a new service account | `true` -`serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | `` -`apiService.create` | Create the v1beta1.metrics.k8s.io API service | `true` -`hostNetwork.enabled` | Enable hostNetwork mode | `false` -`image.repository` | Image repository | `k8s.gcr.io/metrics-server-amd64` -`image.tag` | Image tag | `v0.3.2` -`image.pullPolicy` | Image pull policy | `IfNotPresent` -`imagePullSecrets` | Image pull secrets | `[]` -`args` | Command line arguments | `[]` -`resources` | CPU/Memory resource requests/limits. | `{}` -`tolerations` | List of node taints to tolerate (requires Kubernetes >=1.6) | `[]` -`nodeSelector` | Node labels for pod assignment | `{}` -`affinity` | Node affinity | `{}` -`replicas` | Number of replicas | `1` -`extraVolumeMounts` | Ability to provide volume mounts to the pod | `[]` -`extraVolumes` | Ability to provide volumes to the pod | `[]` -`livenessProbe` | Container liveness probe | See values.yaml -`podLabels` | Labels to be added to pods | `{}` -`podAnnotations` | Annotations to be added to pods | `{}` -`priorityClassName` | Pod priority class | `""` -`readinessProbe` | Container readiness probe | See values.yaml -`service.annotations` | Annotations to add to the service | `{}` -`service.labels` | Labels to be added to the metrics-server service | `{}` -`service.port` | Service port to expose | `443` -`service.type` | Type of service to create | `ClusterIP` -`podDisruptionBudget.enabled` | Create a PodDisruptionBudget | `false` -`podDisruptionBudget.minAvailable` | Minimum available instances; ignored if there is no PodDisruptionBudget | -`podDisruptionBudget.maxUnavailable` | Maximum unavailable instances; ignored if there is no PodDisruptionBudget | -`extraContainers` | Add additional containers | `[]` diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/ci/ci-values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/ci/ci-values.yaml deleted file mode 100755 index a9d81b4..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/ci/ci-values.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# CI is running on GKE, which already ships metrics-server. This cause -# conflicts on the apiService resource. - -apiService: - create: false diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/NOTES.txt b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/NOTES.txt deleted file mode 100755 index 1034c12..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/NOTES.txt +++ /dev/null @@ -1,11 +0,0 @@ -The metric server has been deployed. -{{ if .Values.apiService.create }} -In a few minutes you should be able to list metrics using the following -command: - - kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes" -{{ else }} -NOTE: You have disabled the API service creation for this release. The metrics -API will not work with this release unless you configure the metrics API -service outside of this Helm chart. -{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/_helpers.tpl b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/_helpers.tpl deleted file mode 100755 index b59ca03..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/_helpers.tpl +++ /dev/null @@ -1,59 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "metrics-server.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "metrics-server.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "metrics-server.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a service name that defaults to app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "metrics-server.service.fullname" -}} -{{- .Values.service.nameOverride | default .Chart.Name }} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "metrics-server.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "metrics-server.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{- define "system_default_registry" -}} -{{- if .Values.global.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.systemDefaultRegistry -}} -{{- else -}} -{{- "" -}} -{{- end -}} -{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/aggregated-metrics-reader-cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/aggregated-metrics-reader-cluster-role.yaml deleted file mode 100755 index e91a3d8..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/aggregated-metrics-reader-cluster-role.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- if .Values.rbac.create -}} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:{{ template "metrics-server.name" . }}-aggregated-reader - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - rbac.authorization.k8s.io/aggregate-to-view: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" -rules: - - apiGroups: ["metrics.k8s.io"] - resources: ["pods","nodes"] - verbs: ["get", "list", "watch"] -{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/auth-delegator-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/auth-delegator-crb.yaml deleted file mode 100755 index e82fca0..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/auth-delegator-crb.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "metrics-server.fullname" . }}:system:auth-delegator - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: - - kind: ServiceAccount - name: {{ template "metrics-server.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/cluster-role.yaml deleted file mode 100755 index 8763acd..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/cluster-role.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:{{ template "metrics-server.fullname" . }} - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -rules: - - apiGroups: - - "" - resources: - - pods - - nodes - - nodes/stats - - namespaces - verbs: - - get - - list - - watch - {{- if .Values.rbac.pspEnabled }} - - apiGroups: - - extensions - - policy - resources: - - podsecuritypolicies - resourceNames: - - privileged-{{ template "metrics-server.fullname" . }} - verbs: - - use - {{- end -}} -{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metric-server-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metric-server-service.yaml deleted file mode 100755 index 0d64cd1..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metric-server-service.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "metrics-server.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - {{- with .Values.service.labels -}} - {{ toYaml . | nindent 4 }} - {{- end }} - annotations: - {{- toYaml .Values.service.annotations | trim | nindent 4 }} -spec: - ports: - - port: {{ .Values.service.port }} - protocol: TCP - targetPort: https - selector: - app: {{ template "metrics-server.name" . }} - release: {{ .Release.Name }} - type: {{ .Values.service.type }} - diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-api-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-api-service.yaml deleted file mode 100755 index 552ffea..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-api-service.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if .Values.apiService.create -}} -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.metrics.k8s.io - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - service: - name: {{ template "metrics-server.fullname" . }} - namespace: {{ .Release.Namespace }} - group: metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 -{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-crb.yaml deleted file mode 100755 index eb04c6f..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-crb.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:{{ template "metrics-server.fullname" . }} - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:{{ template "metrics-server.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "metrics-server.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-deployment.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-deployment.yaml deleted file mode 100755 index 2e54f27..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-deployment.yaml +++ /dev/null @@ -1,88 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "metrics-server.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -spec: - selector: - matchLabels: - app: {{ template "metrics-server.name" . }} - release: {{ .Release.Name }} - replicas: {{ .Values.replicas }} - template: - metadata: - labels: - app: {{ template "metrics-server.name" . }} - release: {{ .Release.Name }} - {{- if .Values.podLabels }} -{{ toYaml .Values.podLabels | indent 8 }} - {{- end }} - {{- with .Values.podAnnotations }} - annotations: - {{- range $key, $value := . }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - spec: - {{- if .Values.priorityClassName }} - priorityClassName: "{{ .Values.priorityClassName }}" - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.imagePullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} - serviceAccountName: {{ template "metrics-server.serviceAccountName" . }} -{{- if .Values.hostNetwork.enabled }} - hostNetwork: true -{{- end }} - containers: - {{- if .Values.extraContainers }} - {{- ( tpl (toYaml .Values.extraContainers) . ) | nindent 8 }} - {{- end }} - - name: metrics-server - image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: - - /metrics-server - - --cert-dir=/tmp - - --logtostderr - - --secure-port=8443 - {{- range .Values.args }} - - {{ . }} - {{- end }} - ports: - - containerPort: 8443 - name: https - livenessProbe: - {{- toYaml .Values.livenessProbe | trim | nindent 12 }} - readinessProbe: - {{- toYaml .Values.readinessProbe | trim | nindent 12 }} - resources: - {{- toYaml .Values.resources | trim | nindent 12 }} - securityContext: - {{- toYaml .Values.securityContext | trim | nindent 12 }} - volumeMounts: - - name: tmp - mountPath: /tmp - {{- with .Values.extraVolumeMounts }} - {{- toYaml . | nindent 10 }} - {{- end }} - nodeSelector: - {{- toYaml .Values.nodeSelector | trim | nindent 8 }} - affinity: - {{- toYaml .Values.affinity | trim | nindent 8 }} - tolerations: - {{- toYaml .Values.tolerations | trim | nindent 8 }} - volumes: - - name: tmp - emptyDir: {} - {{- with .Values.extraVolumes }} - {{- toYaml . | nindent 6}} - {{- end }} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-serviceaccount.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-serviceaccount.yaml deleted file mode 100755 index 4d748ed..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/metrics-server-serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "metrics-server.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/pdb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/pdb.yaml deleted file mode 100755 index 3831097..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/pdb.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- if .Values.podDisruptionBudget.enabled -}} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - name: {{ template "metrics-server.fullname" . }} - namespace: {{ .Release.Namespace }} - -spec: - {{- if .Values.podDisruptionBudget.minAvailable }} - minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} - {{- end }} - {{- if .Values.podDisruptionBudget.maxUnavailable }} - maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} - {{- end }} - selector: - matchLabels: - app: {{ template "metrics-server.name" . }} -{{- end -}} \ No newline at end of file diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/psp.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/psp.yaml deleted file mode 100755 index b5cb7da..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/psp.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if .Values.rbac.pspEnabled }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: privileged-{{ template "metrics-server.fullname" . }} -spec: - allowedCapabilities: - - '*' - fsGroup: - rule: RunAsAny - privileged: true - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' - hostPID: true - hostIPC: true - hostNetwork: true - hostPorts: - - min: 1 - max: 65536 -{{- end }} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/role-binding.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/role-binding.yaml deleted file mode 100755 index 3169f24..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/role-binding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ template "metrics-server.fullname" . }}-auth-reader - namespace: kube-system - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: - - kind: ServiceAccount - name: {{ template "metrics-server.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/tests/test-version.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/tests/test-version.yaml deleted file mode 100755 index 3648e6d..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/templates/tests/test-version.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: {{ template "metrics-server.fullname" . }}-test - labels: - app: {{ template "metrics-server.name" . }} - chart: {{ template "metrics-server.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} - annotations: - "helm.sh/hook": test-success -spec: - containers: - - name: wget - image: busybox - command: ['/bin/sh'] - args: - - -c - - 'wget -qO- https://{{ include "metrics-server.fullname" . }}:{{ .Values.service.port }}/version | grep -F {{ .Values.image.tag }}' - restartPolicy: Never - diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/values.yaml deleted file mode 100755 index 52e0fdd..0000000 --- a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022301/values.yaml +++ /dev/null @@ -1,113 +0,0 @@ -rbac: - # Specifies whether RBAC resources should be created - create: true - pspEnabled: false - -serviceAccount: - # Specifies whether a ServiceAccount should be created - create: true - # The name of the ServiceAccount to use. - # If not set and create is true, a name is generated using the fullname template - name: - -apiService: - # Specifies if the v1beta1.metrics.k8s.io API service should be created. - # - # You typically want this enabled! If you disable API service creation you have to - # manage it outside of this chart for e.g horizontal pod autoscaling to - # work with this release. - create: true - -hostNetwork: - # Specifies if metrics-server should be started in hostNetwork mode. - # - # You would require this enabled if you use alternate overlay networking for pods and - # API server unable to communicate with metrics-server. As an example, this is required - # if you use Weave network on EKS - enabled: false - -image: - repository: rancher/hardened-k8s-metrics-server - tag: v0.3.6-build20210223 - pullPolicy: IfNotPresent - -imagePullSecrets: [] -# - registrySecretName - -args: -# enable this if you have self-signed certificates, see: https://github.com/kubernetes-incubator/metrics-server -# - --kubelet-insecure-tls - - --kubelet-preferred-address-types=InternalIP - -resources: {} - -nodeSelector: {} - -tolerations: [] - -affinity: {} - -replicas: 1 - -extraContainers: [] - -podLabels: {} - -podAnnotations: {} -# The following annotations guarantee scheduling for critical add-on pods. -# See more at: https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ -# scheduler.alpha.kubernetes.io/critical-pod: '' - -## Set a pod priorityClassName -priorityClassName: system-node-critical - -extraVolumeMounts: [] -# - name: secrets -# mountPath: /etc/kubernetes/secrets -# readOnly: true - -extraVolumes: [] -# - name: secrets -# secret: -# secretName: kube-apiserver - -livenessProbe: - httpGet: - path: /healthz - port: https - scheme: HTTPS - initialDelaySeconds: 20 - -readinessProbe: - httpGet: - path: /healthz - port: https - scheme: HTTPS - initialDelaySeconds: 20 - -securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: ["all"] - readOnlyRootFilesystem: true - runAsGroup: 10001 - runAsNonRoot: true - runAsUser: 10001 - -service: - annotations: {} - labels: {} - # Add these labels to have metrics-server show up in `kubectl cluster-info` - # kubernetes.io/cluster-service: "true" - # kubernetes.io/name: "Metrics-server" - port: 443 - type: ClusterIP - -podDisruptionBudget: - # https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - enabled: false - minAvailable: - maxUnavailable: - -global: - systemDefaultRegistry: "" diff --git a/index.yaml b/index.yaml old mode 100755 new mode 100644 index 9170c07..dee5a53 --- a/index.yaml +++ b/index.yaml @@ -1,23 +1,6 @@ apiVersion: v1 entries: rke2-canal: - - apiVersion: v1 - appVersion: v3.13.3 - created: "2021-02-25T18:49:47.253718-08:00" - description: Install Canal Network Plugin. - digest: 8be6ab96961a079b3ba56ba79bb49bcc14792b7b1235688384b51bc4f4c818b5 - home: https://www.projectcalico.org/ - keywords: - - canal - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-canal - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-canal/rke2-canal-v3.13.300-build2021022301.tgz - version: v3.13.300-build2021022301 - apiVersion: v1 appVersion: v3.13.3 created: "2021-02-25T17:59:12.931728-08:00" @@ -98,33 +81,6 @@ entries: urls: - assets/rke2-coredns/rke2-coredns-1.10.101.tgz version: 1.10.101 - - apiVersion: v1 - appVersion: 1.6.9 - created: "2021-02-25T18:49:47.256208-08:00" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes - DNS Services - digest: a6bcceac244eb1f4161ab29474dfed5464da49b7fd7af4df37e1ab2ebdd67ddd - home: https://coredns.io - icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png - keywords: - - coredns - - dns - - kubedns - maintainers: - - email: hello@acale.ph - name: Acaleph - - email: shashidhara.huawei@gmail.com - name: shashidharatd - - email: andor44@gmail.com - name: andor44 - - email: manuel@rueg.eu - name: mrueg - name: rke2-coredns - sources: - - https://github.com/coredns/coredns - urls: - - assets/rke2-coredns/rke2-coredns-1.10.101-build2021022302.tgz - version: 1.10.101-build2021022302 - apiVersion: v1 appVersion: 1.6.9 created: "2021-02-25T17:59:12.933187-08:00" @@ -153,26 +109,6 @@ entries: - assets/rke2-coredns/rke2-coredns-1.10.101-build2021022301.tgz version: 1.10.101-build2021022301 rke2-ingress-nginx: - - apiVersion: v1 - appVersion: 0.35.0 - created: "2021-02-25T18:49:47.265982-08:00" - description: Ingress controller for Kubernetes using NGINX as a reverse proxy - and load balancer - digest: 2c4d09aa9c99c62ec2141e1067ee8d37485cebe3531e711a3d736520939cfba6 - home: https://github.com/kubernetes/ingress-nginx - icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png - keywords: - - ingress - - nginx - kubeVersion: '>=1.16.0-0' - maintainers: - - name: ChiefAlexander - name: rke2-ingress-nginx - sources: - - https://github.com/kubernetes/ingress-nginx - urls: - - assets/rke2-ingress-nginx/rke2-ingress-nginx-3.3.001.tgz - version: 3.3.001 - apiVersion: v1 appVersion: 0.35.0 created: "2021-02-25T17:59:12.938912-08:00" @@ -232,22 +168,6 @@ entries: urls: - assets/rke2-kube-proxy/rke2-kube-proxy-v1.20.2.tgz version: v1.20.2 - - apiVersion: v1 - appVersion: v1.19.8 - created: "2021-02-25T18:49:47.27395-08:00" - description: Install Kube Proxy. - digest: 41c6625db0dec5b21d98fb601b26abb3a23cd5ce598553ee87461c7c566f70ad - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.801.tgz - version: v1.19.801 - apiVersion: v1 appVersion: v1.19.8 created: "2021-02-25T17:59:12.951821-08:00" @@ -444,25 +364,6 @@ entries: urls: - assets/rke2-metrics-server/rke2-metrics-server-2.11.100.tgz version: 2.11.100 - - apiVersion: v1 - appVersion: 0.3.6 - created: "2021-02-25T18:49:47.27561-08:00" - description: Metrics Server is a cluster-wide aggregator of resource usage data. - digest: 15ddecb18f303eb0ba7c9278246a332320a3a211f3db7690295a1a684a1dd65e - home: https://github.com/kubernetes-incubator/metrics-server - keywords: - - metrics-server - maintainers: - - email: o.with@sportradar.com - name: olemarkus - - email: k.aasan@sportradar.com - name: kennethaasan - name: rke2-metrics-server - sources: - - https://github.com/kubernetes-incubator/metrics-server - urls: - - assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022301.tgz - version: 2.11.100-build2021022301 - apiVersion: v1 appVersion: 0.3.6 created: "2021-02-25T17:59:12.952919-08:00" From 5ba341da06906ba1737267fc824d456e862a80dd Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Thu, 25 Feb 2021 18:51:14 -0800 Subject: [PATCH 09/10] Revert "Temporarily point to fork" This reverts commit 4ecd0f7203c230bdd9c67908c1c6ef5a1fa75a82. Context: The purpose behind introducing the previous two change was to run a `make sync`, which tries to pull in new charts *only* from the source branch. This step would fail if the packageVersion was not modified. This worked, as seen below: ```bash arvindiyengar: ~/Rancher/rke2-charts/src/github.com/rancher/rke2-charts $ make sync ./bin/charts-build-scripts sync INFO[0000] Synchronizing with charts that will be generated from migrate-source ... (omitted for brevity) ... INFO[0002] Found the following latest release candidate versions: { "rke2-canal/v3.13.300-build2021022301": "rke2-canal/v3.13.300-build2021022301-rc00", "rke2-coredns/1.10.101-build2021022302": "rke2-coredns/1.10.101-build2021022302-rc00", "rke2-ingress-nginx/3.3.001": "rke2-ingress-nginx/3.3.001-rc00", "rke2-kube-proxy/v1.19.801": "rke2-kube-proxy/v1.19.801-rc00", "rke2-metrics-server/2.11.100-build2021022301": "rke2-metrics-server/2.11.100-build2021022301-rc00" } ... (omitted for brevity) ... INFO[0003] Sync was successful! INFO[0003] Successfully synchronized with migrate-source! INFO[0003] Creating or updating the Helm index with the newly added assets... INFO[0003] Your working directory is ready for a commit. ``` --- configuration.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configuration.yaml b/configuration.yaml index 2e3833e..56d6667 100644 --- a/configuration.yaml +++ b/configuration.yaml @@ -1,8 +1,8 @@ template: live sync: -- url: https://github.com/aiyengar2/rke2-charts.git - branch: migrate-source +- url: https://github.com/rancher/rke2-charts.git + branch: main-source dropReleaseCandidates: true helmRepo: cname: rke2-charts.rancher.io \ No newline at end of file From 035cb2b0be7dd7317ac890a72bbe213a6b680d9a Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Mon, 1 Mar 2021 11:21:09 -0800 Subject: [PATCH 10/10] remove executable permissions --- .gitignore | 0 Makefile | 0 README.md | 0 _config.yml | 0 assets/README.md | 0 charts/README.md | 0 6 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 .gitignore mode change 100755 => 100644 Makefile mode change 100755 => 100644 README.md mode change 100755 => 100644 _config.yml mode change 100755 => 100644 assets/README.md mode change 100755 => 100644 charts/README.md diff --git a/.gitignore b/.gitignore old mode 100755 new mode 100644 diff --git a/Makefile b/Makefile old mode 100755 new mode 100644 diff --git a/README.md b/README.md old mode 100755 new mode 100644 diff --git a/_config.yml b/_config.yml old mode 100755 new mode 100644 diff --git a/assets/README.md b/assets/README.md old mode 100755 new mode 100644 diff --git a/charts/README.md b/charts/README.md old mode 100755 new mode 100644