diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5f44260 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +bin +*.DS_Store \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..2eb544b --- /dev/null +++ b/Makefile @@ -0,0 +1,10 @@ +pull-scripts: + ./scripts/pull-scripts + +TARGETS := prepare patch charts clean sync validate rebase docs + +$(TARGETS): + @ls ./bin/charts-build-scripts 1>/dev/null 2>/dev/null || ./scripts/pull-scripts + ./bin/charts-build-scripts $@ + +.PHONY: $(TARGETS) \ No newline at end of file diff --git a/README.md b/README.md index ac1fda3..8fc2db3 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,62 @@ -# Asset branch +## Live Branch -This branch is auto-generated from main-source branch, please open PRs to main-source. +This branch contains generated assets that have been officially released on rke2-charts.rancher.io. -[asset](./assets) Folder contains all the helm chart artifacts. +The following directory structure is expected: +```text +assets/ + / + -.tgz + ... +charts/ + + + + # Unarchived Helm chart +``` -[charts](./charts) Folder contains all the helm chart content of the latest version for browsing purpose. \ No newline at end of file +### Configuration + +This repository branch contains a `configuration.yaml` file that is used to specify how it interacts with other repository branches. + +#### Sync + +This branch syncs with the generated assets from the following branches: +- main-source at https://github.com/rancher/rke2-charts.git (only latest assets) + +To release a new version of a chart, please open the relevant PRs to one of these branches. + +Merging should trigger a sync workflow on pushing to these branches. + +### Cutting a Release + +In the Live branch, cutting a release requires you to run the `make sync` command. + +This command will automatically get the latest charts / resources merged into the the branches you sync with (as indicated in this branch's `configuration.yaml`) and will fail if any of those branches try to modify already released assets. + +If the `make sync` command fails, you might have to manually make changes to the contents of the Staging Branch to resolve any issues. + +Once you successfully run the `make sync` command, the logs outputted will itemize the releaseCandidateVersions picked out from the Staging branch and make exactly two changes: + +1. It will update the `Chart.yaml`'s version for each chart to drop the `-rcXX` from it + +2. It will update the `Chart.yaml`'s annotations for each chart to drop the `-rcXX` from it only for some special annotations (note: currently, the only special annotation we track is `catalog.cattle.io/auto-install`). + +Once you successfully run the `make release` command, ensure the following is true: +- The `assets/` and `charts/` directories each only have a single file contained within them: `README.md` +- The `released/assets/` directory has a .tgz file for each releaseCandidateVersion of a Chart that was created during this release. +- The `index.yaml` and `released/assets/index.yaml` both are identical and the `index.yaml`'s diff shows only two types of changes: a timestamp update or a modification of an existing URL from `assets/*` to `released/assets/*`. + +No other changes are expected. + +### Makefile + +#### Basic Commands + +`make pull-scripts`: Pulls in the version of the `charts-build-scripts` indicated in scripts. + +`make sync`: Syncs the assets in your current repository with the merged contents of all of the repository branches indicated in your configuration.yaml + +`make validate`: Validates your current repository branch against all the repository branches indicated in your configuration.yaml + +`make docs`: Pulls in the latest docs, scripts, etc. from the charts-build-scripts repository \ No newline at end of file diff --git a/_config.yml b/_config.yml new file mode 100644 index 0000000..1888c5a --- /dev/null +++ b/_config.yml @@ -0,0 +1 @@ +exclude: [charts] diff --git a/assets/README.md b/assets/README.md new file mode 100644 index 0000000..9fb85d2 --- /dev/null +++ b/assets/README.md @@ -0,0 +1,3 @@ +## Assets + +This folder contains Helm chart archives that are served from rke2-charts.rancher.io. \ No newline at end of file diff --git a/assets/index.yaml b/assets/index.yaml deleted file mode 100644 index 215b65c..0000000 --- a/assets/index.yaml +++ /dev/null @@ -1,381 +0,0 @@ -apiVersion: v1 -entries: - rke2-canal: - - apiVersion: v1 - appVersion: v3.13.3 - created: "2021-02-24T21:41:48.737080031Z" - description: Install Canal Network Plugin. - digest: 4b6ac74aec73a70d12186701660c1f221fdbcb582571029a6c8fbc2738065742 - home: https://www.projectcalico.org/ - keywords: - - canal - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-canal - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-canal/rke2-canal-v3.13.300-build20210223.tgz - version: v3.13.300-build20210223 - - apiVersion: v1 - appVersion: v3.13.3 - created: "2021-02-19T16:11:27.472930693Z" - description: Install Canal Network Plugin. - digest: 2396b0aca28a6d4a373a251b02e4efa12bbfedf29e37e45904b860176d0c80f8 - home: https://www.projectcalico.org/ - keywords: - - canal - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-canal - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-canal/rke2-canal-v3.13.3.tgz - version: v3.13.3 - rke2-coredns: - - apiVersion: v1 - appVersion: 1.7.1 - created: "2021-01-08T18:12:00.296423364Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services - digest: 335099356a98589e09f1bb940913b0ed6abb8d2c4db91720f87d1cf7697a5cf7 - home: https://coredns.io - icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png - keywords: - - coredns - - dns - - kubedns - name: rke2-coredns - sources: - - https://github.com/coredns/coredns - urls: - - assets/rke2-coredns/rke2-coredns-1.13.800.tgz - version: 1.13.800 - - apiVersion: v1 - appVersion: 1.6.9 - created: "2021-01-22T21:35:45.403680219Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services - digest: be60a62ec184cf6ca7b0ed917e6962e8a2578fa1eeef6a835e82d2b7709933d5 - home: https://coredns.io - icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png - keywords: - - coredns - - dns - - kubedns - maintainers: - - email: hello@acale.ph - name: Acaleph - - email: shashidhara.huawei@gmail.com - name: shashidharatd - - email: andor44@gmail.com - name: andor44 - - email: manuel@rueg.eu - name: mrueg - name: rke2-coredns - sources: - - https://github.com/coredns/coredns - urls: - - assets/rke2-coredns/rke2-coredns-1.10.101.tgz - version: 1.10.101 - - apiVersion: v1 - appVersion: 1.6.9 - created: "2021-02-24T21:41:48.738290233Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services - digest: 869cb592cac545f579b6de6b35de82de4904566fd91826bc16546fddc48fe1c4 - home: https://coredns.io - icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png - keywords: - - coredns - - dns - - kubedns - maintainers: - - email: hello@acale.ph - name: Acaleph - - email: shashidhara.huawei@gmail.com - name: shashidharatd - - email: andor44@gmail.com - name: andor44 - - email: manuel@rueg.eu - name: mrueg - name: rke2-coredns - sources: - - https://github.com/coredns/coredns - urls: - - assets/rke2-coredns/rke2-coredns-1.10.101-build2021022301.tgz - version: 1.10.101-build2021022301 - rke2-ingress-nginx: - - apiVersion: v1 - appVersion: 0.35.0 - created: "2021-02-24T21:42:02.60663315Z" - description: Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer - digest: 2480ed0be9032f8f839913e12f0528128a15483ced57c851baed605156532782 - home: https://github.com/kubernetes/ingress-nginx - icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png - keywords: - - ingress - - nginx - kubeVersion: '>=1.16.0-0' - maintainers: - - name: ChiefAlexander - name: rke2-ingress-nginx - sources: - - https://github.com/kubernetes/ingress-nginx - urls: - - assets/rke2-ingress-nginx/rke2-ingress-nginx-3.3.000.tgz - version: 3.3.000 - - apiVersion: v1 - appVersion: 0.30.0 - created: "2021-02-19T16:11:27.47593126Z" - description: An nginx Ingress controller that uses ConfigMap to store the nginx configuration. - digest: 768ce303918a97a2d0f9a333f4eb0f2ebb3b7f54b849e83c6bdd52f8b513af9b - home: https://github.com/kubernetes/ingress-nginx - icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png - keywords: - - ingress - - nginx - kubeVersion: '>=1.10.0-0' - maintainers: - - name: ChiefAlexander - - email: Trevor.G.Wood@gmail.com - name: taharah - name: rke2-ingress-nginx - sources: - - https://github.com/kubernetes/ingress-nginx - urls: - - assets/rke2-ingress-nginx/rke2-ingress-nginx-1.36.300.tgz - version: 1.36.300 - rke2-kube-proxy: - - apiVersion: v1 - appVersion: v1.20.2 - created: "2021-01-25T23:01:11.589999085Z" - description: Install Kube Proxy. - digest: 68f08c49c302bfe23e9c6f8074a21a6a3e0c90fdb16f5e6fb32a5a3ee3f7c717 - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.20.2.tgz - version: v1.20.2 - - apiVersion: v1 - appVersion: v1.19.8 - created: "2021-02-24T21:41:48.739048333Z" - description: Install Kube Proxy. - digest: f2bace51d33062e3ac713ebbedd48dd4df56c821dfa52da9fdf71891d601bcde - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.8.tgz - version: v1.19.8 - - apiVersion: v1 - appVersion: v1.19.7 - created: "2021-01-22T21:35:45.405178128Z" - description: Install Kube Proxy. - digest: def9baa9bc5c12267d3575a03a2e5f2eccc907a6058202ed09a6cd39967790ca - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.7.tgz - version: v1.19.7 - - apiVersion: v1 - appVersion: v1.19.5 - created: "2020-12-17T19:20:49.383692056Z" - description: Install Kube Proxy. - digest: f74f820857b79601f3b8e498e701297d71f3b37bbf94dc3ae96dfcca50fb80df - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.19.5.tgz - version: v1.19.5 - - apiVersion: v1 - appVersion: v1.18.16 - created: "2021-02-19T17:03:49.957724823Z" - description: Install Kube Proxy. - digest: a57acde11e30a9a15330ffec38686b605325b145f21935e79843b28652d46a21 - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.16.tgz - version: v1.18.16 - - apiVersion: v1 - appVersion: v1.18.15 - created: "2021-01-14T18:05:30.822746229Z" - description: Install Kube Proxy. - digest: 3a6429d05a3d22e3959ceac27db15f922f1033553e8e6b5da2eb7cd18ed9309f - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.15.tgz - version: v1.18.15 - - apiVersion: v1 - appVersion: v1.18.13 - created: "2020-12-10T22:07:42.184767459Z" - description: Install Kube Proxy. - digest: 15d192f5016b8573d2c6f17ab55fa6f14fa1352fcdef2c391a6a477b199867ec - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.13.tgz - version: v1.18.13 - - apiVersion: v1 - appVersion: v1.18.12 - created: "2020-12-07T21:17:34.244857883Z" - description: Install Kube Proxy. - digest: e1da2b245da23aaa526cb94c04ed48cd3e730b848c0d33e420dcfd5b15374f5e - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.12.tgz - version: v1.18.12 - - apiVersion: v1 - appVersion: v1.18.10 - created: "2020-10-15T22:21:23.252729387Z" - description: Install Kube Proxy. - digest: 1ae84231365f19d82a4ea7c6b069ce90308147ba77bef072290ef7464ff1694e - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.10.tgz - version: v1.18.10 - - apiVersion: v1 - appVersion: v1.18.9 - created: "2020-10-14T23:04:28.48143194Z" - description: Install Kube Proxy. - digest: e1e5b6f98c535fa5d90469bd3f731d331bdaa3f9154157d7625b367a7023f399 - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.9.tgz - version: v1.18.9 - - apiVersion: v1 - appVersion: v1.18.8 - created: "2020-09-29T00:14:59.633896455Z" - description: Install Kube Proxy. - digest: 7765237ddc39c416178242e7a6798d679a50f466ac18d3a412207606cd0d66ed - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.8.tgz - version: v1.18.8 - - apiVersion: v1 - appVersion: v1.18.4 - created: "2020-09-29T00:14:59.632610835Z" - description: Install Kube Proxy. - digest: b859363c5ecab8c46b53efa34d866b9c27840737ad1afec0eb9729b8968304fb - keywords: - - kube-proxy - maintainers: - - email: charts@rancher.com - name: Rancher Labs - name: rke2-kube-proxy - sources: - - https://github.com/rancher/rke2-charts - urls: - - assets/rke2-kube-proxy/rke2-kube-proxy-v1.18.4.tgz - version: v1.18.4 - rke2-metrics-server: - - apiVersion: v1 - appVersion: 0.3.6 - created: "2021-02-19T16:11:27.477610954Z" - description: Metrics Server is a cluster-wide aggregator of resource usage data. - digest: 295435f65cc6c0c5ed8fd6b028cac5614b761789c5e09c0483170c3fd46f6e59 - home: https://github.com/kubernetes-incubator/metrics-server - keywords: - - metrics-server - maintainers: - - email: o.with@sportradar.com - name: olemarkus - - email: k.aasan@sportradar.com - name: kennethaasan - name: rke2-metrics-server - sources: - - https://github.com/kubernetes-incubator/metrics-server - urls: - - assets/rke2-metrics-server/rke2-metrics-server-2.11.100.tgz - version: 2.11.100 - - apiVersion: v1 - appVersion: 0.3.6 - created: "2021-02-24T21:41:48.739850734Z" - description: Metrics Server is a cluster-wide aggregator of resource usage data. - digest: a7cbec2f4764c99db298fb4e1f5297246253a3228daf2747281c953059160fc9 - home: https://github.com/kubernetes-incubator/metrics-server - keywords: - - metrics-server - maintainers: - - email: o.with@sportradar.com - name: olemarkus - - email: k.aasan@sportradar.com - name: kennethaasan - name: rke2-metrics-server - sources: - - https://github.com/kubernetes-incubator/metrics-server - urls: - - assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022300.tgz - version: 2.11.100-build2021022300 -generated: "2021-02-24T21:42:02.60300284Z" diff --git a/charts/README.md b/charts/README.md new file mode 100644 index 0000000..c6a14ae --- /dev/null +++ b/charts/README.md @@ -0,0 +1,3 @@ +## Charts + +This folder contains the unarchived Helm charts that are currently being served at rke2-charts.rancher.io. \ No newline at end of file diff --git a/charts/rke2-canal/Chart.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/Chart.yaml similarity index 65% rename from charts/rke2-canal/Chart.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/Chart.yaml index 69e3d3f..8520cb8 100644 --- a/charts/rke2-canal/Chart.yaml +++ b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/Chart.yaml @@ -1,13 +1,13 @@ apiVersion: v1 -name: rke2-canal -description: Install Canal Network Plugin. -version: v3.13.300-build20210223 appVersion: v3.13.3 +description: Install Canal Network Plugin. home: https://www.projectcalico.org/ keywords: - - canal -sources: - - https://github.com/rancher/rke2-charts +- canal maintainers: - - name: Rancher Labs - email: charts@rancher.com +- email: charts@rancher.com + name: Rancher Labs +name: rke2-canal +sources: +- https://github.com/rancher/rke2-charts +version: v3.13.300-build20210223 diff --git a/charts/rke2-canal/templates/NOTES.txt b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/NOTES.txt similarity index 100% rename from charts/rke2-canal/templates/NOTES.txt rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/NOTES.txt diff --git a/charts/rke2-canal/templates/_helpers.tpl b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/_helpers.tpl old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-canal/templates/_helpers.tpl rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/_helpers.tpl diff --git a/charts/rke2-canal/templates/config.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/config.yaml similarity index 100% rename from charts/rke2-canal/templates/config.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/config.yaml diff --git a/charts/rke2-canal/templates/crd.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/crd.yaml similarity index 100% rename from charts/rke2-canal/templates/crd.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/crd.yaml diff --git a/charts/rke2-canal/templates/daemonset.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/daemonset.yaml similarity index 100% rename from charts/rke2-canal/templates/daemonset.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/daemonset.yaml diff --git a/charts/rke2-canal/templates/rbac.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/rbac.yaml similarity index 100% rename from charts/rke2-canal/templates/rbac.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/rbac.yaml diff --git a/charts/rke2-canal/templates/serviceaccount.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/serviceaccount.yaml similarity index 100% rename from charts/rke2-canal/templates/serviceaccount.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/templates/serviceaccount.yaml diff --git a/charts/rke2-canal/values.yaml b/charts/rke2-canal/rke2-canal-v3.13.300-build20210223/values.yaml similarity index 100% rename from charts/rke2-canal/values.yaml rename to charts/rke2-canal/rke2-canal-v3.13.300-build20210223/values.yaml diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/Chart.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/Chart.yaml new file mode 100644 index 0000000..52bdce9 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +appVersion: v3.13.3 +description: Install Canal Network Plugin. +home: https://www.projectcalico.org/ +keywords: +- canal +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-canal +sources: +- https://github.com/rancher/rke2-charts +version: v3.13.3 diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/NOTES.txt b/charts/rke2-canal/rke2-canal/v3.13.3/templates/NOTES.txt new file mode 100644 index 0000000..12a30ff --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/NOTES.txt @@ -0,0 +1,3 @@ +Canal network plugin has been installed. + +NOTE: It may take few minutes until Canal image install CNI files and node become in ready state. diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/_helpers.tpl b/charts/rke2-canal/rke2-canal/v3.13.3/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/config.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/templates/config.yaml new file mode 100644 index 0000000..37f28ef --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/config.yaml @@ -0,0 +1,67 @@ +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Canal installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .Release.Name }}-config + namespace: kube-system +data: + # Typha is disabled. + typha_service_name: {{ .Values.calico.typhaServiceName | quote }} + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + canal_iface: {{ .Values.flannel.iface | quote }} + + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: {{ .Values.calico.masquerade | quote }} + + # Configure the MTU to use + veth_mtu: {{ .Values.calico.vethuMTU | quote }} + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + + # Flannel network configuration. Mounted into the flannel container. + net-conf.json: | + { + "Network": {{ .Values.podCidr | quote }}, + "Backend": { + "Type": {{ .Values.flannel.backend | quote }} + } + } diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/crd.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/templates/crd.yaml new file mode 100644 index 0000000..0351759 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/crd.yaml @@ -0,0 +1,197 @@ +--- +# Source: calico/templates/kdd-crds.yaml + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMConfig + plural: ipamconfigs + singular: ipamconfig + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMHandle + plural: ipamhandles + singular: ipamhandle + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkSet + plural: networksets + singular: networkset diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/daemonset.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/templates/daemonset.yaml new file mode 100644 index 0000000..1431df8 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/daemonset.yaml @@ -0,0 +1,262 @@ +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the canal container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Release.Name | quote }} + namespace: kube-system + labels: + k8s-app: canal +spec: + selector: + matchLabels: + k8s-app: canal + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: canal + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure canal gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: canal + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: {{ template "system_default_registry" . }}{{ .Values.calico.cniImage.repository }}:{{ .Values.calico.cniImage.tag }} + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-canal.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: {{ template "system_default_registry" . }}{{ .Values.calico.flexvolImage.repository }}:{{ .Values.calico.flexvolImage.tag }} + command: ['/usr/local/bin/flexvol.sh', '-s', '/usr/local/bin/flexvol', '-i', 'flexvoldriver'] + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true + containers: + # Runs canal container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + command: + - "start_runit" + image: {{ template "system_default_registry" . }}{{ .Values.calico.nodeImage.repository }}:{{ .Values.calico.nodeImage.tag }} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: {{ .Values.calico.datastoreType | quote }} + # Configure route aggregation based on pod CIDR. + - name: USE_POD_CIDR + value: {{ .Values.calico.usePodCIDR | quote }} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: {{ .Values.calico.waitForDatastore | quote }} + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: {{ .Values.calico.networkingBackend | quote }} + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: {{ .Values.calico.clusterType | quote}} + # Period, in seconds, at which felix re-applies all iptables state + - name: FELIX_IPTABLESREFRESHINTERVAL + value: {{ .Values.calico.felixIptablesRefreshInterval | quote}} + - name: FELIX_IPTABLESBACKEND + value: {{ .Values.calico.felixIptablesBackend | quote}} + # No IP address needed. + - name: IP + value: "" + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: {{ .Values.calico.felixDefaultEndpointToHostAction | quote }} + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: {{ .Values.calico.felixIpv6Support | quote }} + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: {{ .Values.calico.felixLogSeverityScreen | quote }} + - name: FELIX_HEALTHENABLED + value: {{ .Values.calico.felixHealthEnabled | quote }} + # enable promentheus metrics + - name: FELIX_PROMETHEUSMETRICSENABLED + value: {{ .Values.calico.felixPrometheusMetricsEnabled | quote }} + - name: FELIX_XDPENABLED + value: {{ .Values.calico.felixXDPEnabled | quote }} + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + host: localhost + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # This container runs flannel using the kube-subnet-mgr backend + # for allocating subnets. + - name: kube-flannel + image: {{ template "system_default_registry" . }}{{ .Values.flannel.image.repository }}:{{ .Values.flannel.image.tag }} + command: + - "/opt/bin/flanneld" + {{- range .Values.flannel.args }} + - {{ . | quote }} + {{- end }} + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: FLANNELD_IFACE + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: canal_iface + - name: FLANNELD_IP_MASQ + valueFrom: + configMapKeyRef: + name: {{ .Release.Name }}-config + key: masquerade + volumeMounts: + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + # Used by canal. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used by flannel. + - name: flannel-cfg + configMap: + name: {{ .Release.Name }}-config + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/rbac.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/templates/rbac.yaml new file mode 100644 index 0000000..cd39730 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/rbac.yaml @@ -0,0 +1,163 @@ +--- +# Source: calico/templates/rbac.yaml + +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only requried for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + +--- +# Flannel ClusterRole +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: + - apiGroups: [""] + resources: + - pods + verbs: + - get + - apiGroups: [""] + resources: + - nodes + verbs: + - list + - watch + - apiGroups: [""] + resources: + - nodes/status + verbs: + - patch +--- +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: canal-flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: canal-calico +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/templates/serviceaccount.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/templates/serviceaccount.yaml new file mode 100644 index 0000000..582d55b --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: canal + namespace: kube-system diff --git a/charts/rke2-canal/rke2-canal/v3.13.3/values.yaml b/charts/rke2-canal/rke2-canal/v3.13.3/values.yaml new file mode 100644 index 0000000..1bb70a0 --- /dev/null +++ b/charts/rke2-canal/rke2-canal/v3.13.3/values.yaml @@ -0,0 +1,74 @@ +--- + +# The IPv4 cidr pool to create on startup if none exists. Pod IPs will be +# chosen from this range. +podCidr: "10.42.0.0/16" + +flannel: + # kube-flannel image + image: + repository: rancher/hardened-flannel + tag: v0.13.0-rancher1 + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + iface: "" + # kube-flannel command arguments + args: + - "--ip-masq" + - "--kube-subnet-mgr" + # Backend for kube-flannel. Backend should not be changed + # at runtime. + backend: "vxlan" + +calico: + # CNI installation image. + cniImage: + repository: rancher/hardened-calico + tag: v3.13.3 + # Canal node image. + nodeImage: + repository: rancher/hardened-calico + tag: v3.13.3 + # Flexvol Image. + flexvolImage: + repository: rancher/hardened-calico + tag: v3.13.3 + # Datastore type for canal. It can be either kuberentes or etcd. + datastoreType: kubernetes + # Wait for datastore to initialize. + waitForDatastore: true + # Configure route aggregation based on pod CIDR. + usePodCIDR: true + # Disable BGP routing. + networkingBackend: none + # Cluster type to identify the deployment type. + clusterType: "k8s,canal" + # Disable file logging so `kubectl logs` works. + disableFileLogging: true + # Disable IPv6 on Kubernetes. + felixIpv6Support: false + # Period, in seconds, at which felix re-applies all iptables state + felixIptablesRefreshInterval: 60 + # iptables backend to use for felix, defaults to auto but can also be set to nft or legacy + felixIptablesBackend: auto + # Set Felix logging to "info". + felixLogSeverityScreen: info + # Enable felix healthcheck. + felixHealthEnabled: true + # Enable prometheus metrics + felixPrometheusMetricsEnabled: true + # Disable XDP Acceleration as we do not support it with our ubi7 base image + felixXDPEnabled: false + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: true + # Set Felix endpoint to host default action to ACCEPT. + felixDefaultEndpointToHostAction: ACCEPT + # Configure the MTU to use. + vethuMTU: 1450 + # Typha is disabled. + typhaServiceName: none + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/.helmignore b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/.helmignore new file mode 100644 index 0000000..7c04072 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/rke2-coredns/Chart.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/Chart.yaml old mode 100755 new mode 100644 similarity index 94% rename from charts/rke2-coredns/Chart.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/Chart.yaml index 4606ef8..c4ddcf4 --- a/charts/rke2-coredns/Chart.yaml +++ b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/Chart.yaml @@ -1,7 +1,6 @@ apiVersion: v1 appVersion: 1.6.9 -description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS - Services +description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services home: https://coredns.io icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png keywords: diff --git a/charts/rke2-coredns/README.md b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/README.md old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/README.md rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/README.md diff --git a/charts/rke2-coredns/templates/NOTES.txt b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/NOTES.txt old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/NOTES.txt rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/NOTES.txt diff --git a/charts/rke2-coredns/templates/_helpers.tpl b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/_helpers.tpl old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/_helpers.tpl rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/_helpers.tpl diff --git a/charts/rke2-coredns/templates/clusterrole-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrole-autoscaler.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/clusterrole-autoscaler.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrole-autoscaler.yaml diff --git a/charts/rke2-coredns/templates/clusterrole.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrole.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/clusterrole.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrole.yaml diff --git a/charts/rke2-coredns/templates/clusterrolebinding-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrolebinding-autoscaler.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/clusterrolebinding-autoscaler.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrolebinding-autoscaler.yaml diff --git a/charts/rke2-coredns/templates/clusterrolebinding.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/clusterrolebinding.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/clusterrolebinding.yaml diff --git a/charts/rke2-coredns/templates/configmap-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/configmap-autoscaler.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/configmap-autoscaler.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/configmap-autoscaler.yaml diff --git a/charts/rke2-coredns/templates/configmap.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/configmap.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/configmap.yaml diff --git a/charts/rke2-coredns/templates/deployment-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/deployment-autoscaler.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/deployment-autoscaler.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/deployment-autoscaler.yaml diff --git a/charts/rke2-coredns/templates/deployment.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/deployment.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/deployment.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/deployment.yaml diff --git a/charts/rke2-coredns/templates/poddisruptionbudget.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/poddisruptionbudget.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/poddisruptionbudget.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/poddisruptionbudget.yaml diff --git a/charts/rke2-coredns/templates/podsecuritypolicy.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/podsecuritypolicy.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/podsecuritypolicy.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/podsecuritypolicy.yaml diff --git a/charts/rke2-coredns/templates/service-metrics.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/service-metrics.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/service-metrics.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/service-metrics.yaml diff --git a/charts/rke2-coredns/templates/service.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/service.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/service.yaml diff --git a/charts/rke2-coredns/templates/serviceaccount-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/serviceaccount-autoscaler.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/serviceaccount-autoscaler.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/serviceaccount-autoscaler.yaml diff --git a/charts/rke2-coredns/templates/serviceaccount.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/serviceaccount.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/serviceaccount.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/serviceaccount.yaml diff --git a/charts/rke2-coredns/templates/servicemonitor.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/servicemonitor.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/servicemonitor.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/templates/servicemonitor.yaml diff --git a/charts/rke2-coredns/values.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/values.yaml rename to charts/rke2-coredns/rke2-coredns/1.10.101-build2021022301/values.yaml diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/.helmignore b/charts/rke2-coredns/rke2-coredns/1.10.101/.helmignore new file mode 100644 index 0000000..7c04072 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/Chart.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/Chart.yaml new file mode 100644 index 0000000..fea533e --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +appVersion: 1.6.9 +description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS + Services +home: https://coredns.io +icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png +keywords: +- coredns +- dns +- kubedns +maintainers: +- email: hello@acale.ph + name: Acaleph +- email: shashidhara.huawei@gmail.com + name: shashidharatd +- email: andor44@gmail.com + name: andor44 +- email: manuel@rueg.eu + name: mrueg +name: rke2-coredns +sources: +- https://github.com/coredns/coredns +version: 1.10.101 diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/README.md b/charts/rke2-coredns/rke2-coredns/1.10.101/README.md new file mode 100644 index 0000000..0d41d40 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/README.md @@ -0,0 +1,138 @@ +# CoreDNS + +[CoreDNS](https://coredns.io/) is a DNS server that chains plugins and provides DNS Services + +# TL;DR; + +```console +$ helm install --name coredns --namespace=kube-system stable/coredns +``` + +## Introduction + +This chart bootstraps a [CoreDNS](https://github.com/coredns/coredns) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. This chart will provide DNS Services and can be deployed in multiple configuration to support various scenarios listed below: + + - CoreDNS as a cluster dns service and a drop-in replacement for Kube/SkyDNS. This is the default mode and CoreDNS is deployed as cluster-service in kube-system namespace. This mode is chosen by setting `isClusterService` to true. + - CoreDNS as an external dns service. In this mode CoreDNS is deployed as any kubernetes app in user specified namespace. The CoreDNS service can be exposed outside the cluster by using using either the NodePort or LoadBalancer type of service. This mode is chosen by setting `isClusterService` to false. + - CoreDNS as an external dns provider for kubernetes federation. This is a sub case of 'external dns service' which uses etcd plugin for CoreDNS backend. This deployment mode as a dependency on `etcd-operator` chart, which needs to be pre-installed. + +## Prerequisites + +- Kubernetes 1.10 or later + +## Installing the Chart + +The chart can be installed as follows: + +```console +$ helm install --name coredns --namespace=kube-system stable/coredns +``` + +The command deploys CoreDNS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists various ways to override default configuration during deployment. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete coredns +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +|:----------------------------------------|:--------------------------------------------------------------------------------------|:------------------------------------------------------------| +| `image.repository` | The image repository to pull from | coredns/coredns | +| `image.tag` | The image tag to pull from | `v1.6.9` | +| `image.pullPolicy` | Image pull policy | IfNotPresent | +| `replicaCount` | Number of replicas | 1 | +| `resources.limits.cpu` | Container maximum CPU | `100m` | +| `resources.limits.memory` | Container maximum memory | `128Mi` | +| `resources.requests.cpu` | Container requested CPU | `100m` | +| `resources.requests.memory` | Container requested memory | `128Mi` | +| `serviceType` | Kubernetes Service type | `ClusterIP` | +| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | {} | +| `prometheus.monitor.namespace` | Selector to select which namespaces the Endpoints objects are discovered from. | `""` | +| `service.clusterIP` | IP address to assign to service | `""` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `[]` | +| `service.annotations` | Annotations to add to service | `{prometheus.io/scrape: "true", prometheus.io/port: "9153"}`| +| `serviceAccount.create` | If true, create & use serviceAccount | false | +| `serviceAccount.name` | If not set & create is true, use template fullname | | +| `rbac.create` | If true, create & use RBAC resources | true | +| `rbac.pspEnable` | Specifies whether a PodSecurityPolicy should be created. | `false` | +| `isClusterService` | Specifies whether chart should be deployed as cluster-service or normal k8s app. | true | +| `priorityClassName` | Name of Priority Class to assign pods | `""` | +| `servers` | Configuration for CoreDNS and plugins | See values.yml | +| `affinity` | Affinity settings for pod assignment | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `tolerations` | Tolerations for pod assignment | [] | +| `zoneFiles` | Configure custom Zone files | [] | +| `extraSecrets` | Optional array of secrets to mount inside the CoreDNS container | [] | +| `customLabels` | Optional labels for Deployment(s), Pod, Service, ServiceMonitor objects | {} | +| `podDisruptionBudget` | Optional PodDisruptionBudget | {} | +| `autoscaler.enabled` | Optionally enabled a cluster-proportional-autoscaler for CoreDNS | `false` | +| `autoscaler.coresPerReplica` | Number of cores in the cluster per CoreDNS replica | `256` | +| `autoscaler.nodesPerReplica` | Number of nodes in the cluster per CoreDNS replica | `16` | +| `autoscaler.image.repository` | The image repository to pull autoscaler from | k8s.gcr.io/cluster-proportional-autoscaler-amd64 | +| `autoscaler.image.tag` | The image tag to pull autoscaler from | `1.7.1` | +| `autoscaler.image.pullPolicy` | Image pull policy for the autoscaler | IfNotPresent | +| `autoscaler.priorityClassName` | Optional priority class for the autoscaler pod. `priorityClassName` used if not set. | `""` | +| `autoscaler.affinity` | Affinity settings for pod assignment for autoscaler | {} | +| `autoscaler.nodeSelector` | Node labels for pod assignment for autoscaler | {} | +| `autoscaler.tolerations` | Tolerations for pod assignment for autoscaler | [] | +| `autoscaler.resources.limits.cpu` | Container maximum CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.limits.memory` | Container maximum memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.resources.requests.cpu` | Container requested CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.requests.memory` | Container requested memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.configmap.annotations` | Annotations to add to autoscaler config map. For example to stop CI renaming them | {} | + +See `values.yaml` for configuration notes. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install --name coredns \ + --set rbac.create=false \ + stable/coredns +``` + +The above command disables automatic creation of RBAC rules. + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install --name coredns -f values.yaml stable/coredns +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + + +## Caveats + +The chart will automatically determine which protocols to listen on based on +the protocols you define in your zones. This means that you could potentially +use both "TCP" and "UDP" on a single port. +Some cloud environments like "GCE" or "Azure container service" cannot +create external loadbalancers with both "TCP" and "UDP" protocols. So +When deploying CoreDNS with `serviceType="LoadBalancer"` on such cloud +environments, make sure you do not attempt to use both protocols at the same +time. + +## Autoscaling + +By setting `autoscaler.enabled = true` a +[cluster-proportional-autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler) +will be deployed. This will default to a coredns replica for every 256 cores, or +16 nodes in the cluster. These can be changed with `autoscaler.coresPerReplica` +and `autoscaler.nodesPerReplica`. When cluster is using large nodes (with more +cores), `coresPerReplica` should dominate. If using small nodes, +`nodesPerReplica` should dominate. + +This also creates a ServiceAccount, ClusterRole, and ClusterRoleBinding for +the autoscaler deployment. + +`replicaCount` is ignored if this is enabled. diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/NOTES.txt b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/NOTES.txt new file mode 100644 index 0000000..3a1883b --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/NOTES.txt @@ -0,0 +1,30 @@ +{{- if .Values.isClusterService }} +CoreDNS is now running in the cluster as a cluster-service. +{{- else }} +CoreDNS is now running in the cluster. +It can be accessed using the below endpoint +{{- if contains "NodePort" .Values.serviceType }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "coredns.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "$NODE_IP:$NODE_PORT" +{{- else if contains "LoadBalancer" .Values.serviceType }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status by running 'kubectl get svc -w {{ template "coredns.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "coredns.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo $SERVICE_IP +{{- else if contains "ClusterIP" .Values.serviceType }} + "{{ template "coredns.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local" + from within the cluster +{{- end }} +{{- end }} + +It can be tested with the following: + +1. Launch a Pod with DNS tools: + +kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools + +2. Query the DNS server: + +/ # host kubernetes diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/_helpers.tpl b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/_helpers.tpl new file mode 100644 index 0000000..cfdbef7 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/_helpers.tpl @@ -0,0 +1,158 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "coredns.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "coredns.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.servicePorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq .use_tcp true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- if index $innerdict "isudp" -}} + {{- printf "- {port: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- printf "- {port: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.containerPorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq .use_tcp true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- if index $innerdict "isudp" -}} + {{- printf "- {containerPort: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- printf "- {containerPort: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} + {{- end -}} + {{- end -}} +{{- end -}} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "coredns.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "coredns.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole-autoscaler.yaml new file mode 100644 index 0000000..b40bb0a --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole-autoscaler.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list","watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] +# Remove the configmaps rule once below issue is fixed: +# kubernetes-incubator/cluster-proportional-autoscaler#16 + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole.yaml new file mode 100644 index 0000000..4203a02 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrole.yaml @@ -0,0 +1,38 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +{{- if .Values.rbac.pspEnable }} +- apiGroups: + - policy + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "coredns.fullname" . }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding-autoscaler.yaml new file mode 100644 index 0000000..d1ff736 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding-autoscaler.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }}-autoscaler +subjects: +- kind: ServiceAccount + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..7ae9d4f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "coredns.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap-autoscaler.yaml new file mode 100644 index 0000000..0712e0d --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap-autoscaler.yaml @@ -0,0 +1,34 @@ +{{- if .Values.autoscaler.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + {{- if .Values.customLabels }} + {{- toYaml .Values.customLabels | nindent 4 }} + {{- end }} + {{- if .Values.autoscaler.configmap.annotations }} + annotations: + {{- toYaml .Values.autoscaler.configmap.annotations | nindent 4 }} + {{- end }} +data: + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + linear: |- + { + "coresPerReplica": {{ .Values.autoscaler.coresPerReplica | float64 }}, + "nodesPerReplica": {{ .Values.autoscaler.nodesPerReplica | float64 }}, + "preventSinglePointFailure": true + } +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap.yaml new file mode 100644 index 0000000..b5069d3 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/configmap.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +data: + Corefile: |- + {{ range .Values.servers }} + {{- range $idx, $zone := .zones }}{{ if $idx }} {{ else }}{{ end }}{{ default "" $zone.scheme }}{{ default "." $zone.zone }}{{ else }}.{{ end -}} + {{- if .port }}:{{ .port }} {{ end -}} + { + {{- range .plugins }} + {{ .name }} {{ if .parameters }} {{if eq .name "kubernetes" }} {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDomain }} {{ end }} {{.parameters}}{{ end }}{{ if .configBlock }} { +{{ .configBlock | indent 12 }} + }{{ end }} + {{- end }} + } + {{ end }} + {{- range .Values.zoneFiles }} + {{ .filename }}: {{ toYaml .contents | indent 4 }} + {{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment-autoscaler.yaml new file mode 100644 index 0000000..6ddd209 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment-autoscaler.yaml @@ -0,0 +1,77 @@ +{{- if .Values.autoscaler.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + template: + metadata: + labels: + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.customLabels }} + {{ toYaml .Values.customLabels | nindent 8 }} + {{- end }} + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/configmap-autoscaler.yaml") . | sha256sum }} + {{- if .Values.isClusterService }} + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' + {{- end }} + spec: + serviceAccountName: {{ template "coredns.fullname" . }}-autoscaler + {{- $priorityClassName := default .Values.priorityClassName .Values.autoscaler.priorityClassName }} + {{- if $priorityClassName }} + priorityClassName: {{ $priorityClassName | quote }} + {{- end }} + {{- if .Values.autoscaler.affinity }} + affinity: +{{ toYaml .Values.autoscaler.affinity | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.tolerations }} + tolerations: +{{ toYaml .Values.autoscaler.tolerations | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.nodeSelector }} + nodeSelector: +{{ toYaml .Values.autoscaler.nodeSelector | indent 8 }} + {{- end }} + containers: + - name: autoscaler + image: {{ template "system_default_registry" . }}{{ .Values.autoscaler.image.repository }}:{{ .Values.autoscaler.image.tag }} + imagePullPolicy: {{ .Values.autoscaler.image.pullPolicy }} + resources: +{{ toYaml .Values.autoscaler.resources | indent 10 }} + command: + - /cluster-proportional-autoscaler + - --namespace={{ .Release.Namespace }} + - --configmap={{ template "coredns.fullname" . }}-autoscaler + - --target=Deployment/{{ template "coredns.fullname" . }} + - --logtostderr=true + - --v=2 +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment.yaml new file mode 100644 index 0000000..0ed3c52 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/deployment.yaml @@ -0,0 +1,127 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + {{- if not .Values.autoscaler.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 10% + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + template: + metadata: + labels: + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- if .Values.isClusterService }} + scheduler.alpha.kubernetes.io/critical-pod: '' + {{- end }} + spec: + serviceAccountName: {{ template "coredns.serviceAccountName" . }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.isClusterService }} + dnsPolicy: Default + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if or (.Values.isClusterService) (.Values.tolerations) }} + tolerations: + {{- if .Values.isClusterService }} + - key: CriticalAddonsOnly + operator: Exists + {{- end }} + {{- if .Values.tolerations }} +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + containers: + - name: "coredns" + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns +{{- range .Values.extraSecrets }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: true +{{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + ports: +{{ include "coredns.containerPorts" . | indent 8 }} + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + volumes: + - name: config-volume + configMap: + name: {{ template "coredns.fullname" . }} + items: + - key: Corefile + path: Corefile + {{ range .Values.zoneFiles }} + - key: {{ .filename }} + path: {{ .filename }} + {{ end }} +{{- range .Values.extraSecrets }} + - name: {{ .name }} + secret: + secretName: {{ .name }} + defaultMode: 400 +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/poddisruptionbudget.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..1fee2de --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/poddisruptionbudget.yaml @@ -0,0 +1,28 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/podsecuritypolicy.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..4e7a36f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/podsecuritypolicy.yaml @@ -0,0 +1,57 @@ +{{- if .Values.rbac.pspEnable }} +{{ if .Capabilities.APIVersions.Has "policy/v1beta1" }} +apiVersion: policy/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: PodSecurityPolicy +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- else }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- end }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # Add back CAP_NET_BIND_SERVICE so that coredns can run on port 53 + allowedCapabilities: + - CAP_NET_BIND_SERVICE + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service-metrics.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service-metrics.yaml new file mode 100644 index 0000000..1657cd7 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service-metrics.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "coredns.fullname" . }}-metrics + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + selector: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + ports: + - name: metrics + port: 9153 + targetPort: 9153 +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service.yaml new file mode 100644 index 0000000..95c858f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/service.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + selector: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{ else }} + clusterIP: {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDNS }} + {{- end }} + {{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} + {{- end }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: +{{ include "coredns.servicePorts" . | indent 2 -}} + type: {{ default "ClusterIP" .Values.serviceType }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount-autoscaler.yaml new file mode 100644 index 0000000..1b218d2 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount-autoscaler.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount.yaml new file mode 100644 index 0000000..23f29a1 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.serviceAccountName" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/templates/servicemonitor.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/servicemonitor.yaml new file mode 100644 index 0000000..ca0b691 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "coredns.fullname" . }} + {{- if .Values.prometheus.monitor.namespace }} + namespace: {{ .Values.prometheus.monitor.namespace }} + {{- end }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics + endpoints: + - port: metrics +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.10.101/values.yaml b/charts/rke2-coredns/rke2-coredns/1.10.101/values.yaml new file mode 100644 index 0000000..828589e --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.10.101/values.yaml @@ -0,0 +1,202 @@ +# Default values for coredns. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: rancher/hardened-coredns + tag: "v1.6.9" + pullPolicy: IfNotPresent + +replicaCount: 1 + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +serviceType: "ClusterIP" + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + +service: +# clusterIP: "" +# loadBalancerIP: "" +# externalTrafficPolicy: "" + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9153" + +serviceAccount: + create: true + # The name of the ServiceAccount to use + # If not set and create is true, a name is generated using the fullname template + name: coredns + +rbac: + # If true, create & use RBAC resources + create: true + # If true, create and use PodSecurityPolicy + pspEnable: false + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + # name: + +# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app. +isClusterService: true + +# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set. +priorityClassName: "system-cluster-critical" + +# Default zone is what Kubernetes recommends: +# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options +servers: +- zones: + - zone: . + port: 53 + plugins: + - name: errors + # Serves a /health endpoint on :8080, required for livenessProbe + - name: health + configBlock: |- + lameduck 5s + # Serves a /ready endpoint on :8181, required for readinessProbe + - name: ready + # Required to query kubernetes API for data + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + # Serves a /metrics endpoint on :9153, required for serviceMonitor + - name: prometheus + parameters: 0.0.0.0:9153 + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + +# Complete example with all the options: +# - zones: # the `zones` block can be left out entirely, defaults to "." +# - zone: hello.world. # optional, defaults to "." +# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS) +# - zone: foo.bar. +# scheme: dns:// +# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol +# # Note that this will not work if you are also exposing tls or grpc on the same server +# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS) +# plugins: # the plugins to use for this server block +# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it! +# parameters: foo bar # list of parameters after the plugin +# configBlock: |- # if the plugin supports extra block style config, supply it here +# hello world +# foo bar + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core +# for example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core +# for example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] + +# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +podDisruptionBudget: {} + +# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/ +zoneFiles: [] +# - filename: example.db +# domain: example.com +# contents: | +# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600 +# example.com. IN NS b.iana-servers.net. +# example.com. IN NS a.iana-servers.net. +# example.com. IN A 192.168.99.102 +# *.example.com. IN A 192.168.99.102 + +# optional array of secrets to mount inside coredns container +# possible usecase: need for secure connection with etcd backend +extraSecrets: [] +# - name: etcd-client-certs +# mountPath: /etc/coredns/tls/etcd +# - name: some-fancy-secret +# mountPath: /etc/wherever + +# Custom labels to apply to Deployment, Pod, Service, ServiceMonitor. Including autoscaler if enabled. +customLabels: {} + +## Configue a cluster-proportional-autoscaler for coredns +# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler +autoscaler: + # Enabled the cluster-proportional-autoscaler + enabled: false + + # Number of cores in the cluster per coredns replica + coresPerReplica: 256 + # Number of nodes in the cluster per coredns replica + nodesPerReplica: 16 + + image: + repository: k8s.gcr.io/cluster-proportional-autoscaler-amd64 + tag: "1.7.1" + pullPolicy: IfNotPresent + + # Optional priority class to be used for the autoscaler pods. priorityClassName used if not set. + priorityClassName: "" + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core + affinity: {} + + # Node labels for pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core + tolerations: [] + + # resources for autoscaler pod + resources: + requests: + cpu: "20m" + memory: "10Mi" + limits: + cpu: "20m" + memory: "10Mi" + + # Options for autoscaler configmap + configmap: + ## Annotations for the coredns-autoscaler configmap + # i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed + annotations: {} +k8sApp : "kube-dns" + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/.helmignore b/charts/rke2-coredns/rke2-coredns/1.13.800/.helmignore new file mode 100644 index 0000000..7c04072 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/Chart.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/Chart.yaml new file mode 100644 index 0000000..ea1b23c --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +appVersion: 1.7.1 +description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS + Services +home: https://coredns.io +icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png +keywords: +- coredns +- dns +- kubedns +name: rke2-coredns +sources: +- https://github.com/coredns/coredns +version: 1.13.800 diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/README.md b/charts/rke2-coredns/rke2-coredns/1.13.800/README.md new file mode 100644 index 0000000..9d9ad64 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/README.md @@ -0,0 +1,169 @@ +# ⚠️ Repo Archive Notice + +As of Nov 13, 2020, charts in this repo will no longer be updated. +For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/). + +# CoreDNS + +[CoreDNS](https://coredns.io/) is a DNS server that chains plugins and provides DNS Services + +## DEPRECATION NOTICE + +This chart is deprecated and no longer supported. + +# TL;DR; + +```console +$ helm install --name coredns --namespace=kube-system stable/coredns +``` + +## Introduction + +This chart bootstraps a [CoreDNS](https://github.com/coredns/coredns) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. This chart will provide DNS Services and can be deployed in multiple configuration to support various scenarios listed below: + + - CoreDNS as a cluster dns service and a drop-in replacement for Kube/SkyDNS. This is the default mode and CoreDNS is deployed as cluster-service in kube-system namespace. This mode is chosen by setting `isClusterService` to true. + - CoreDNS as an external dns service. In this mode CoreDNS is deployed as any kubernetes app in user specified namespace. The CoreDNS service can be exposed outside the cluster by using using either the NodePort or LoadBalancer type of service. This mode is chosen by setting `isClusterService` to false. + - CoreDNS as an external dns provider for kubernetes federation. This is a sub case of 'external dns service' which uses etcd plugin for CoreDNS backend. This deployment mode as a dependency on `etcd-operator` chart, which needs to be pre-installed. + +## Prerequisites + +- Kubernetes 1.10 or later + +## Installing the Chart + +The chart can be installed as follows: + +```console +$ helm install --name coredns --namespace=kube-system stable/coredns +``` + +The command deploys CoreDNS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists various ways to override default configuration during deployment. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete coredns +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +|:----------------------------------------|:--------------------------------------------------------------------------------------|:------------------------------------------------------------| +| `image.repository` | The image repository to pull from | coredns/coredns | +| `image.tag` | The image tag to pull from | `v1.7.1` | +| `image.pullPolicy` | Image pull policy | IfNotPresent | +| `replicaCount` | Number of replicas | 1 | +| `resources.limits.cpu` | Container maximum CPU | `100m` | +| `resources.limits.memory` | Container maximum memory | `128Mi` | +| `resources.requests.cpu` | Container requested CPU | `100m` | +| `resources.requests.memory` | Container requested memory | `128Mi` | +| `serviceType` | Kubernetes Service type | `ClusterIP` | +| `prometheus.service.enabled` | Set this to `true` to create Service for Prometheus metrics | `false` | +| `prometheus.service.annotations` | Annotations to add to the metrics Service | `{prometheus.io/scrape: "true", prometheus.io/port: "9153"}`| +| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | {} | +| `prometheus.monitor.namespace` | Selector to select which namespaces the Endpoints objects are discovered from. | `""` | +| `service.clusterIP` | IP address to assign to service | `""` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` | +| `service.externalIPs` | External IP addresses | [] | +| `service.externalTrafficPolicy` | Enable client source IP preservation | [] | +| `service.annotations` | Annotations to add to service | {} | +| `serviceAccount.create` | If true, create & use serviceAccount | false | +| `serviceAccount.name` | If not set & create is true, use template fullname | | +| `rbac.create` | If true, create & use RBAC resources | true | +| `rbac.pspEnable` | Specifies whether a PodSecurityPolicy should be created. | `false` | +| `isClusterService` | Specifies whether chart should be deployed as cluster-service or normal k8s app. | true | +| `priorityClassName` | Name of Priority Class to assign pods | `""` | +| `servers` | Configuration for CoreDNS and plugins | See values.yml | +| `affinity` | Affinity settings for pod assignment | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `tolerations` | Tolerations for pod assignment | [] | +| `zoneFiles` | Configure custom Zone files | [] | +| `extraVolumes` | Optional array of volumes to create | [] | +| `extraVolumeMounts` | Optional array of volumes to mount inside the CoreDNS container | [] | +| `extraSecrets` | Optional array of secrets to mount inside the CoreDNS container | [] | +| `customLabels` | Optional labels for Deployment(s), Pod, Service, ServiceMonitor objects | {} | +| `rollingUpdate.maxUnavailable` | Maximum number of unavailable replicas during rolling update | `1` | +| `rollingUpdate.maxSurge` | Maximum number of pods created above desired number of pods | `25%` | +| `podDisruptionBudget` | Optional PodDisruptionBudget | {} | +| `podAnnotations` | Optional Pod only Annotations | {} | +| `terminationGracePeriodSeconds` | Optional duration in seconds the pod needs to terminate gracefully. | 30 | +| `preStopSleep` | Definition of Kubernetes preStop hook executed before Pod termination | {} | +| `hpa.enabled` | Enable Hpa autoscaler instead of proportional one | `false` | +| `hpa.minReplicas` | Hpa minimum number of CoreDNS replicas | `1` | +| `hpa.maxReplicas` | Hpa maximum number of CoreDNS replicas | `2` | +| `hpa.metrics` | Metrics definitions used by Hpa to scale up and down | {} | +| `autoscaler.enabled` | Optionally enabled a cluster-proportional-autoscaler for CoreDNS | `false` | +| `autoscaler.coresPerReplica` | Number of cores in the cluster per CoreDNS replica | `256` | +| `autoscaler.nodesPerReplica` | Number of nodes in the cluster per CoreDNS replica | `16` | +| `autoscaler.min` | Min size of replicaCount | 0 | +| `autoscaler.max` | Max size of replicaCount | 0 (aka no max) | +| `autoscaler.includeUnschedulableNodes` | Should the replicas scale based on the total number or only schedulable nodes | `false` | +| `autoscaler.preventSinglePointFailure` | If true does not allow single points of failure to form | `true` | +| `autoscaler.image.repository` | The image repository to pull autoscaler from | k8s.gcr.io/cluster-proportional-autoscaler-amd64 | +| `autoscaler.image.tag` | The image tag to pull autoscaler from | `1.7.1` | +| `autoscaler.image.pullPolicy` | Image pull policy for the autoscaler | IfNotPresent | +| `autoscaler.priorityClassName` | Optional priority class for the autoscaler pod. `priorityClassName` used if not set. | `""` | +| `autoscaler.affinity` | Affinity settings for pod assignment for autoscaler | {} | +| `autoscaler.nodeSelector` | Node labels for pod assignment for autoscaler | {} | +| `autoscaler.tolerations` | Tolerations for pod assignment for autoscaler | [] | +| `autoscaler.resources.limits.cpu` | Container maximum CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.limits.memory` | Container maximum memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.resources.requests.cpu` | Container requested CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.requests.memory` | Container requested memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.configmap.annotations` | Annotations to add to autoscaler config map. For example to stop CI renaming them | {} | + +See `values.yaml` for configuration notes. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install --name coredns \ + --set rbac.create=false \ + stable/coredns +``` + +The above command disables automatic creation of RBAC rules. + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install --name coredns -f values.yaml stable/coredns +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + + +## Caveats + +The chart will automatically determine which protocols to listen on based on +the protocols you define in your zones. This means that you could potentially +use both "TCP" and "UDP" on a single port. +Some cloud environments like "GCE" or "Azure container service" cannot +create external loadbalancers with both "TCP" and "UDP" protocols. So +When deploying CoreDNS with `serviceType="LoadBalancer"` on such cloud +environments, make sure you do not attempt to use both protocols at the same +time. + +## Autoscaling + +By setting `autoscaler.enabled = true` a +[cluster-proportional-autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler) +will be deployed. This will default to a coredns replica for every 256 cores, or +16 nodes in the cluster. These can be changed with `autoscaler.coresPerReplica` +and `autoscaler.nodesPerReplica`. When cluster is using large nodes (with more +cores), `coresPerReplica` should dominate. If using small nodes, +`nodesPerReplica` should dominate. + +This also creates a ServiceAccount, ClusterRole, and ClusterRoleBinding for +the autoscaler deployment. + +`replicaCount` is ignored if this is enabled. + +By setting `hpa.enabled = true` a [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) +is enabled for Coredns deployment. This can scale number of replicas based on meitrics +like CpuUtilization, MemoryUtilization or Custom ones. diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/NOTES.txt b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/NOTES.txt new file mode 100644 index 0000000..3a1883b --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/NOTES.txt @@ -0,0 +1,30 @@ +{{- if .Values.isClusterService }} +CoreDNS is now running in the cluster as a cluster-service. +{{- else }} +CoreDNS is now running in the cluster. +It can be accessed using the below endpoint +{{- if contains "NodePort" .Values.serviceType }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "coredns.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "$NODE_IP:$NODE_PORT" +{{- else if contains "LoadBalancer" .Values.serviceType }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status by running 'kubectl get svc -w {{ template "coredns.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "coredns.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo $SERVICE_IP +{{- else if contains "ClusterIP" .Values.serviceType }} + "{{ template "coredns.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local" + from within the cluster +{{- end }} +{{- end }} + +It can be tested with the following: + +1. Launch a Pod with DNS tools: + +kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools + +2. Query the DNS server: + +/ # host kubernetes diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/_helpers.tpl b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/_helpers.tpl new file mode 100644 index 0000000..6b089e7 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/_helpers.tpl @@ -0,0 +1,158 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "coredns.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "coredns.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.servicePorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq (default false .use_tcp) true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- if index $innerdict "isudp" -}} + {{- printf "- {port: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- printf "- {port: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.containerPorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq (default false .use_tcp) true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- if index $innerdict "isudp" -}} + {{- printf "- {containerPort: %v, protocol: UDP, name: udp-%s}\n" $port $port -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- printf "- {containerPort: %v, protocol: TCP, name: tcp-%s}\n" $port $port -}} + {{- end -}} + {{- end -}} +{{- end -}} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "coredns.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "coredns.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole-autoscaler.yaml new file mode 100644 index 0000000..b40bb0a --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole-autoscaler.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list","watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] +# Remove the configmaps rule once below issue is fixed: +# kubernetes-incubator/cluster-proportional-autoscaler#16 + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole.yaml new file mode 100644 index 0000000..4203a02 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrole.yaml @@ -0,0 +1,38 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +{{- if .Values.rbac.pspEnable }} +- apiGroups: + - policy + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "coredns.fullname" . }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding-autoscaler.yaml new file mode 100644 index 0000000..d1ff736 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding-autoscaler.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }}-autoscaler +subjects: +- kind: ServiceAccount + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..7ae9d4f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "coredns.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap-autoscaler.yaml new file mode 100644 index 0000000..608a0b7 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap-autoscaler.yaml @@ -0,0 +1,37 @@ +{{- if .Values.autoscaler.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + {{- if .Values.customLabels }} + {{- toYaml .Values.customLabels | nindent 4 }} + {{- end }} + {{- if .Values.autoscaler.configmap.annotations }} + annotations: + {{- toYaml .Values.autoscaler.configmap.annotations | nindent 4 }} + {{- end }} +data: + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + linear: |- + { + "coresPerReplica": {{ .Values.autoscaler.coresPerReplica | float64 }}, + "nodesPerReplica": {{ .Values.autoscaler.nodesPerReplica | float64 }}, + "preventSinglePointFailure": {{ .Values.autoscaler.preventSinglePointFailure }}, + "min": {{ .Values.autoscaler.min | int }}, + "max": {{ .Values.autoscaler.max | int }}, + "includeUnschedulableNodes": {{ .Values.autoscaler.includeUnschedulableNodes }} + } +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap.yaml new file mode 100644 index 0000000..b5069d3 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/configmap.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +data: + Corefile: |- + {{ range .Values.servers }} + {{- range $idx, $zone := .zones }}{{ if $idx }} {{ else }}{{ end }}{{ default "" $zone.scheme }}{{ default "." $zone.zone }}{{ else }}.{{ end -}} + {{- if .port }}:{{ .port }} {{ end -}} + { + {{- range .plugins }} + {{ .name }} {{ if .parameters }} {{if eq .name "kubernetes" }} {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDomain }} {{ end }} {{.parameters}}{{ end }}{{ if .configBlock }} { +{{ .configBlock | indent 12 }} + }{{ end }} + {{- end }} + } + {{ end }} + {{- range .Values.zoneFiles }} + {{ .filename }}: {{ toYaml .contents | indent 4 }} + {{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment-autoscaler.yaml new file mode 100644 index 0000000..8461532 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment-autoscaler.yaml @@ -0,0 +1,77 @@ +{{- if and (.Values.autoscaler.enabled) (not .Values.hpa.enabled) }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + template: + metadata: + labels: + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.customLabels }} + {{ toYaml .Values.customLabels | nindent 8 }} + {{- end }} + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/configmap-autoscaler.yaml") . | sha256sum }} + {{- if .Values.isClusterService }} + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' + {{- end }} + spec: + serviceAccountName: {{ template "coredns.fullname" . }}-autoscaler + {{- $priorityClassName := default .Values.priorityClassName .Values.autoscaler.priorityClassName }} + {{- if $priorityClassName }} + priorityClassName: {{ $priorityClassName | quote }} + {{- end }} + {{- if .Values.autoscaler.affinity }} + affinity: +{{ toYaml .Values.autoscaler.affinity | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.tolerations }} + tolerations: +{{ toYaml .Values.autoscaler.tolerations | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.nodeSelector }} + nodeSelector: +{{ toYaml .Values.autoscaler.nodeSelector | indent 8 }} + {{- end }} + containers: + - name: autoscaler + image: {{ template "system_default_registry" . }}{{ .Values.autoscaler.image.repository }}:{{ .Values.autoscaler.image.tag }} + imagePullPolicy: {{ .Values.autoscaler.image.pullPolicy }} + resources: +{{ toYaml .Values.autoscaler.resources | indent 10 }} + command: + - /cluster-proportional-autoscaler + - --namespace={{ .Release.Namespace }} + - --configmap={{ template "coredns.fullname" . }}-autoscaler + - --target=Deployment/{{ template "coredns.fullname" . }} + - --logtostderr=true + - --v=2 +{{- end }} diff --git a/charts/rke2-coredns/templates/deployment.yaml.orig b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment.yaml similarity index 93% rename from charts/rke2-coredns/templates/deployment.yaml.orig rename to charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment.yaml index be11dcd..e67dd15 100644 --- a/charts/rke2-coredns/templates/deployment.yaml.orig +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/deployment.yaml @@ -7,7 +7,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name | quote }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" {{- if .Values.isClusterService }} - k8s-app: {{ .Chart.Name | quote }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" {{- end }} @@ -28,14 +28,14 @@ spec: matchLabels: app.kubernetes.io/instance: {{ .Release.Name | quote }} {{- if .Values.isClusterService }} - k8s-app: {{ .Chart.Name | quote }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} {{- end }} app.kubernetes.io/name: {{ template "coredns.name" . }} template: metadata: labels: {{- if .Values.isClusterService }} - k8s-app: {{ .Chart.Name | quote }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} {{- end }} app.kubernetes.io/name: {{ template "coredns.name" . }} app.kubernetes.io/instance: {{ .Release.Name | quote }} @@ -76,7 +76,7 @@ spec: {{- end }} containers: - name: "coredns" - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: diff --git a/charts/rke2-coredns/templates/hpa.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/hpa.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-coredns/templates/hpa.yaml rename to charts/rke2-coredns/rke2-coredns/1.13.800/templates/hpa.yaml diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/poddisruptionbudget.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..1fee2de --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/poddisruptionbudget.yaml @@ -0,0 +1,28 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/podsecuritypolicy.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..4e7a36f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/podsecuritypolicy.yaml @@ -0,0 +1,57 @@ +{{- if .Values.rbac.pspEnable }} +{{ if .Capabilities.APIVersions.Has "policy/v1beta1" }} +apiVersion: policy/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: PodSecurityPolicy +metadata: + name: {{ template "coredns.fullname" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- else }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- end }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # Add back CAP_NET_BIND_SERVICE so that coredns can run on port 53 + allowedCapabilities: + - CAP_NET_BIND_SERVICE + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/service-metrics.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/service-metrics.yaml new file mode 100644 index 0000000..0f99adf --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/service-metrics.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheus.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "coredns.fullname" . }}-metrics + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: +{{ toYaml .Values.prometheus.service.annotations | indent 4 }} +spec: + selector: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + ports: + - name: metrics + port: 9153 + targetPort: 9153 +{{- end }} diff --git a/charts/rke2-coredns/templates/service.yaml.orig b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/service.yaml similarity index 85% rename from charts/rke2-coredns/templates/service.yaml.orig rename to charts/rke2-coredns/rke2-coredns/1.13.800/templates/service.yaml index 0ca5edf..d7124ac 100644 --- a/charts/rke2-coredns/templates/service.yaml.orig +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/service.yaml @@ -7,7 +7,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name | quote }} helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" {{- if .Values.isClusterService }} - k8s-app: {{ .Chart.Name | quote }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" {{- end }} @@ -21,11 +21,13 @@ spec: selector: app.kubernetes.io/instance: {{ .Release.Name | quote }} {{- if .Values.isClusterService }} - k8s-app: {{ .Chart.Name | quote }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} {{- end }} app.kubernetes.io/name: {{ template "coredns.name" . }} {{- if .Values.service.clusterIP }} clusterIP: {{ .Values.service.clusterIP }} + {{ else }} + clusterIP: {{ (lookup "v1" "ConfigMap" "kube-system" "cluster-dns").data.clusterDNS }} {{- end }} {{- if .Values.service.externalIPs }} externalIPs: diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount-autoscaler.yaml new file mode 100644 index 0000000..1b218d2 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount-autoscaler.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name }}-autoscaler + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount.yaml new file mode 100644 index 0000000..23f29a1 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.serviceAccountName" . }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/templates/servicemonitor.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/servicemonitor.yaml new file mode 100644 index 0000000..ca0b691 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "coredns.fullname" . }} + {{- if .Values.prometheus.monitor.namespace }} + namespace: {{ .Values.prometheus.monitor.namespace }} + {{- end }} + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ .Values.k8sApp | default .Chart.Name | quote }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics + endpoints: + - port: metrics +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.13.800/values.yaml b/charts/rke2-coredns/rke2-coredns/1.13.800/values.yaml new file mode 100644 index 0000000..49a1e8b --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.13.800/values.yaml @@ -0,0 +1,259 @@ +# Default values for coredns. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: rancher/hardened-coredns + tag: "v1.7.1" + pullPolicy: IfNotPresent + +replicaCount: 1 + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +## Create HorizontalPodAutoscaler object. +## +# autoscaling: +# minReplicas: 1 +# maxReplicas: 10 +# metrics: +# - type: Resource +# resource: +# name: cpu +# targetAverageUtilization: 60 +# - type: Resource +# resource: +# name: memory +# targetAverageUtilization: 60 + +rollingUpdate: + maxUnavailable: 1 + maxSurge: 25% + +# Under heavy load it takes more that standard time to remove Pod endpoint from a cluster. +# This will delay termination of our pod by `preStopSleep`. To make sure kube-proxy has +# enough time to catch up. +# preStopSleep: 5 +terminationGracePeriodSeconds: 30 + +podAnnotations: {} +# cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + +serviceType: "ClusterIP" + +prometheus: + service: + enabled: false + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9153" + monitor: + enabled: false + additionalLabels: {} + namespace: "" + +service: +# clusterIP: "" +# loadBalancerIP: "" +# externalIPs: [] +# externalTrafficPolicy: "" + annotations: {} + +serviceAccount: + create: true + # The name of the ServiceAccount to use + # If not set and create is true, a name is generated using the fullname template + name: coredns + +rbac: + # If true, create & use RBAC resources + create: true + # If true, create and use PodSecurityPolicy + pspEnable: false + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + # name: + +# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app. +isClusterService: true + +# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set. +priorityClassName: "system-cluster-critical" + +# Default zone is what Kubernetes recommends: +# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options +servers: +- zones: + - zone: . + port: 53 + plugins: + - name: errors + # Serves a /health endpoint on :8080, required for livenessProbe + - name: health + configBlock: |- + lameduck 5s + # Serves a /ready endpoint on :8181, required for readinessProbe + - name: ready + # Required to query kubernetes API for data + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + # Serves a /metrics endpoint on :9153, required for serviceMonitor + - name: prometheus + parameters: 0.0.0.0:9153 + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + +# Complete example with all the options: +# - zones: # the `zones` block can be left out entirely, defaults to "." +# - zone: hello.world. # optional, defaults to "." +# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS) +# - zone: foo.bar. +# scheme: dns:// +# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol +# # Note that this will not work if you are also exposing tls or grpc on the same server +# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS) +# plugins: # the plugins to use for this server block +# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it! +# parameters: foo bar # list of parameters after the plugin +# configBlock: |- # if the plugin supports extra block style config, supply it here +# hello world +# foo bar + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core +# for example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core +# for example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] + +# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +podDisruptionBudget: {} + +# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/ +zoneFiles: [] +# - filename: example.db +# domain: example.com +# contents: | +# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600 +# example.com. IN NS b.iana-servers.net. +# example.com. IN NS a.iana-servers.net. +# example.com. IN A 192.168.99.102 +# *.example.com. IN A 192.168.99.102 + +# optional array of extra volumes to create +extraVolumes: [] +# - name: some-volume-name +# emptyDir: {} +# optional array of mount points for extraVolumes +extraVolumeMounts: [] +# - name: some-volume-name +# mountPath: /etc/wherever + +# optional array of secrets to mount inside coredns container +# possible usecase: need for secure connection with etcd backend +extraSecrets: [] +# - name: etcd-client-certs +# mountPath: /etc/coredns/tls/etcd +# - name: some-fancy-secret +# mountPath: /etc/wherever + +# Custom labels to apply to Deployment, Pod, Service, ServiceMonitor. Including autoscaler if enabled. +customLabels: {} + +## Alternative configuration for HPA deployment if wanted +# +hpa: + enabled: false + minReplicas: 1 + maxReplicas: 2 + metrics: {} + +## Configue a cluster-proportional-autoscaler for coredns +# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler +autoscaler: + # Enabled the cluster-proportional-autoscaler + enabled: false + + # Number of cores in the cluster per coredns replica + coresPerReplica: 256 + # Number of nodes in the cluster per coredns replica + nodesPerReplica: 16 + # Min size of replicaCount + min: 0 + # Max size of replicaCount (default of 0 is no max) + max: 0 + # Whether to include unschedulable nodes in the nodes/cores calculations - this requires version 1.8.0+ of the autoscaler + includeUnschedulableNodes: false + # If true does not allow single points of failure to form + preventSinglePointFailure: true + + image: + repository: k8s.gcr.io/cluster-proportional-autoscaler-amd64 + tag: "1.8.0" + pullPolicy: IfNotPresent + + # Optional priority class to be used for the autoscaler pods. priorityClassName used if not set. + priorityClassName: "" + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core + affinity: {} + + # Node labels for pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core + tolerations: [] + + # resources for autoscaler pod + resources: + requests: + cpu: "20m" + memory: "10Mi" + limits: + cpu: "20m" + memory: "10Mi" + + # Options for autoscaler configmap + configmap: + ## Annotations for the coredns-autoscaler configmap + # i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed + annotations: {} +k8sApp : "kube-dns" + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/.helmignore b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/Chart.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/Chart.yaml new file mode 100644 index 0000000..45c8c14 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +appVersion: 0.30.0 +description: An nginx Ingress controller that uses ConfigMap to store the nginx configuration. +home: https://github.com/kubernetes/ingress-nginx +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png +keywords: +- ingress +- nginx +kubeVersion: '>=1.10.0-0' +maintainers: +- name: ChiefAlexander +- email: Trevor.G.Wood@gmail.com + name: taharah +name: rke2-ingress-nginx +sources: +- https://github.com/kubernetes/ingress-nginx +version: 1.36.300 diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/OWNERS b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/OWNERS new file mode 100644 index 0000000..0001de3 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/OWNERS @@ -0,0 +1,6 @@ +approvers: +- ChiefAlexander +- taharah +reviewers: +- ChiefAlexander +- taharah diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/README.md b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/README.md new file mode 100644 index 0000000..87dfdb4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/README.md @@ -0,0 +1,361 @@ +# nginx-ingress + +[nginx-ingress](https://github.com/kubernetes/ingress-nginx) is an Ingress controller that uses ConfigMap to store the nginx configuration. + +To use, add the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +## TL;DR; + +```console +$ helm install stable/nginx-ingress +``` + +## Introduction + +This chart bootstraps an nginx-ingress deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + + - Kubernetes 1.6+ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/nginx-ingress +``` + +The command deploys nginx-ingress on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the nginx-ingress chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`controller.name` | name of the controller component | `controller` +`controller.image.repository` | controller container image repository | `quay.io/kubernetes-ingress-controller/nginx-ingress-controller` +`controller.image.tag` | controller container image tag | `0.30.0` +`controller.image.pullPolicy` | controller container image pull policy | `IfNotPresent` +`controller.image.runAsUser` | User ID of the controller process. Value depends on the Linux distribution used inside of the container image. | `101` +`controller.useComponentLabel` | Wether to add component label so the HPA can work separately for controller and defaultBackend. *Note: don't change this if you have an already running deployment as it will need the recreation of the controller deployment* | `false` +`controller.containerPort.http` | The port that the controller container listens on for http connections. | `80` +`controller.containerPort.https` | The port that the controller container listens on for https connections. | `443` +`controller.config` | nginx [ConfigMap](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md) entries | none +`controller.hostNetwork` | If the nginx deployment / daemonset should run on the host's network namespace. Do not set this when `controller.service.externalIPs` is set and `kube-proxy` is used as there will be a port-conflict for port `80` | false +`controller.defaultBackendService` | default 404 backend service; needed only if `defaultBackend.enabled = false` and version < 0.21.0| `""` +`controller.dnsPolicy` | If using `hostNetwork=true`, change to `ClusterFirstWithHostNet`. See [pod's dns policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) for details | `ClusterFirst` +`controller.dnsConfig` | custom pod dnsConfig. See [pod's dns config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-config) for details | `{}` +`controller.reportNodeInternalIp` | If using `hostNetwork=true`, setting `reportNodeInternalIp=true`, will pass the flag `report-node-internal-ip-address` to nginx-ingress. This sets the status of all Ingress objects to the internal IP address of all nodes running the NGINX Ingress controller. +`controller.electionID` | election ID to use for the status update | `ingress-controller-leader` +`controller.extraEnvs` | any additional environment variables to set in the pods | `{}` +`controller.extraContainers` | Sidecar containers to add to the controller pod. See [LemonLDAP::NG controller](https://github.com/lemonldap-ng-controller/lemonldap-ng-controller) as example | `{}` +`controller.extraVolumeMounts` | Additional volumeMounts to the controller main container | `{}` +`controller.extraVolumes` | Additional volumes to the controller pod | `{}` +`controller.extraInitContainers` | Containers, which are run before the app containers are started | `[]` +`controller.ingressClass` | name of the ingress class to route through this controller | `nginx` +`controller.maxmindLicenseKey` | Maxmind license key to download GeoLite2 Databases. See [Accessing and using GeoLite2 database](https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases/) | `""` +`controller.scope.enabled` | limit the scope of the ingress controller | `false` (watch all namespaces) +`controller.scope.namespace` | namespace to watch for ingress | `""` (use the release namespace) +`controller.extraArgs` | Additional controller container arguments | `{}` +`controller.kind` | install as Deployment, DaemonSet or Both | `Deployment` +`controller.deploymentAnnotations` | annotations to be added to deployment | `{}` +`controller.autoscaling.enabled` | If true, creates Horizontal Pod Autoscaler | false +`controller.autoscaling.minReplicas` | If autoscaling enabled, this field sets minimum replica count | `2` +`controller.autoscaling.maxReplicas` | If autoscaling enabled, this field sets maximum replica count | `11` +`controller.autoscaling.targetCPUUtilizationPercentage` | Target CPU utilization percentage to scale | `"50"` +`controller.autoscaling.targetMemoryUtilizationPercentage` | Target memory utilization percentage to scale | `"50"` +`controller.daemonset.useHostPort` | If `controller.kind` is `DaemonSet`, this will enable `hostPort` for TCP/80 and TCP/443 | false +`controller.daemonset.hostPorts.http` | If `controller.daemonset.useHostPort` is `true` and this is non-empty, it sets the hostPort | `"80"` +`controller.daemonset.hostPorts.https` | If `controller.daemonset.useHostPort` is `true` and this is non-empty, it sets the hostPort | `"443"` +`controller.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`controller.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`controller.terminationGracePeriodSeconds` | how many seconds to wait before terminating a pod | `60` +`controller.minReadySeconds` | how many seconds a pod needs to be ready before killing the next, during update | `0` +`controller.nodeSelector` | node labels for pod assignment | `{}` +`controller.podAnnotations` | annotations to be added to pods | `{}` +`controller.deploymentLabels` | labels to add to the deployment metadata | `{}` +`controller.podLabels` | labels to add to the pod container metadata | `{}` +`controller.podSecurityContext` | Security context policies to add to the controller pod | `{}` +`controller.replicaCount` | desired number of controller pods | `1` +`controller.minAvailable` | minimum number of available controller pods for PodDisruptionBudget | `1` +`controller.resources` | controller pod resource requests & limits | `{}` +`controller.priorityClassName` | controller priorityClassName | `nil` +`controller.lifecycle` | controller pod lifecycle hooks | `{}` +`controller.service.annotations` | annotations for controller service | `{}` +`controller.service.labels` | labels for controller service | `{}` +`controller.publishService.enabled` | if true, the controller will set the endpoint records on the ingress objects to reflect those on the service | `false` +`controller.publishService.pathOverride` | override of the default publish-service name | `""` +`controller.service.enabled` | if disabled no service will be created. This is especially useful when `controller.kind` is set to `DaemonSet` and `controller.daemonset.useHostPorts` is `true` | true +`controller.service.clusterIP` | internal controller cluster service IP (set to `"-"` to pass an empty value) | `nil` +`controller.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the controller service | `false` +`controller.service.externalIPs` | controller service external IP addresses. Do not set this when `controller.hostNetwork` is set to `true` and `kube-proxy` is used as there will be a port-conflict for port `80` | `[]` +`controller.service.externalTrafficPolicy` | If `controller.service.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable [source IP preservation](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport) | `"Cluster"` +`controller.service.sessionAffinity` | Enables client IP based session affinity. Must be `ClientIP` or `None` if set. | `""` +`controller.service.healthCheckNodePort` | If `controller.service.type` is `NodePort` or `LoadBalancer` and `controller.service.externalTrafficPolicy` is set to `Local`, set this to [the managed health-check port the kube-proxy will expose](https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typenodeport). If blank, a random port in the `NodePort` range will be assigned | `""` +`controller.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.service.enableHttp` | if port 80 should be opened for service | `true` +`controller.service.enableHttps` | if port 443 should be opened for service | `true` +`controller.service.targetPorts.http` | Sets the targetPort that maps to the Ingress' port 80 | `80` +`controller.service.targetPorts.https` | Sets the targetPort that maps to the Ingress' port 443 | `443` +`controller.service.ports.http` | Sets service http port | `80` +`controller.service.ports.https` | Sets service https port | `443` +`controller.service.type` | type of controller service to create | `LoadBalancer` +`controller.service.nodePorts.http` | If `controller.service.type` is either `NodePort` or `LoadBalancer` and this is non-empty, it sets the nodePort that maps to the Ingress' port 80 | `""` +`controller.service.nodePorts.https` | If `controller.service.type` is either `NodePort` or `LoadBalancer` and this is non-empty, it sets the nodePort that maps to the Ingress' port 443 | `""` +`controller.service.nodePorts.tcp` | Sets the nodePort for an entry referenced by its key from `tcp` | `{}` +`controller.service.nodePorts.udp` | Sets the nodePort for an entry referenced by its key from `udp` | `{}` +`controller.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 10 +`controller.livenessProbe.periodSeconds` | How often to perform the probe | 10 +`controller.livenessProbe.timeoutSeconds` | When the probe times out | 5 +`controller.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`controller.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`controller.livenessProbe.port` | The port number that the liveness probe will listen on. | 10254 +`controller.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 10 +`controller.readinessProbe.periodSeconds` | How often to perform the probe | 10 +`controller.readinessProbe.timeoutSeconds` | When the probe times out | 1 +`controller.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`controller.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`controller.readinessProbe.port` | The port number that the readiness probe will listen on. | 10254 +`controller.metrics.enabled` | if `true`, enable Prometheus metrics | `false` +`controller.metrics.service.annotations` | annotations for Prometheus metrics service | `{}` +`controller.metrics.service.clusterIP` | cluster IP address to assign to service (set to `"-"` to pass an empty value) | `nil` +`controller.metrics.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the metrics service | `false` +`controller.metrics.service.externalIPs` | Prometheus metrics service external IP addresses | `[]` +`controller.metrics.service.labels` | labels for metrics service | `{}` +`controller.metrics.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.metrics.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.metrics.service.servicePort` | Prometheus metrics service port | `9913` +`controller.metrics.service.type` | type of Prometheus metrics service to create | `ClusterIP` +`controller.metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` +`controller.metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` +`controller.metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels. | `false` +`controller.metrics.serviceMonitor.namespace` | namespace where servicemonitor resource should be created | `the same namespace as nginx ingress` +`controller.metrics.serviceMonitor.namespaceSelector` | [namespaceSelector](https://github.com/coreos/prometheus-operator/blob/v0.34.0/Documentation/api.md#namespaceselector) to configure what namespaces to scrape | `will scrape the helm release namespace only` +`controller.metrics.serviceMonitor.scrapeInterval` | interval between Prometheus scraping | `30s` +`controller.metrics.prometheusRule.enabled` | Set this to `true` to create prometheusRules for Prometheus operator | `false` +`controller.metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` +`controller.metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `the same namespace as nginx ingress` +`controller.metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be prometheus in YAML format, check values for an example. | `[]` +`controller.admissionWebhooks.enabled` | Create Ingress admission webhooks. Validating webhook will check the ingress syntax. | `false` +`controller.admissionWebhooks.failurePolicy` | Failure policy for admission webhooks | `Fail` +`controller.admissionWebhooks.port` | Admission webhook port | `8080` +`controller.admissionWebhooks.service.annotations` | Annotations for admission webhook service | `{}` +`controller.admissionWebhooks.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the admission webhook service | `false` +`controller.admissionWebhooks.service.clusterIP` | cluster IP address to assign to admission webhook service (set to `"-"` to pass an empty value) | `nil` +`controller.admissionWebhooks.service.externalIPs` | Admission webhook service external IP addresses | `[]` +`controller.admissionWebhooks.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`controller.admissionWebhooks.service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | `[]` +`controller.admissionWebhooks.service.servicePort` | Admission webhook service port | `443` +`controller.admissionWebhooks.service.type` | Type of admission webhook service to create | `ClusterIP` +`controller.admissionWebhooks.patch.enabled` | If true, will use a pre and post install hooks to generate a CA and certificate to use for validating webhook endpoint, and patch the created webhooks with the CA. | `true` +`controller.admissionWebhooks.patch.image.repository` | Repository to use for the webhook integration jobs | `jettech/kube-webhook-certgen` +`controller.admissionWebhooks.patch.image.tag` | Tag to use for the webhook integration jobs | `v1.0.0` +`controller.admissionWebhooks.patch.image.pullPolicy` | Image pull policy for the webhook integration jobs | `IfNotPresent` +`controller.admissionWebhooks.patch.priorityClassName` | Priority class for the webhook integration jobs | `""` +`controller.admissionWebhooks.patch.podAnnotations` | Annotations for the webhook job pods | `{}` +`controller.admissionWebhooks.patch.nodeSelector` | Node selector for running admission hook patch jobs | `{}` +`controller.customTemplate.configMapName` | configMap containing a custom nginx template | `""` +`controller.customTemplate.configMapKey` | configMap key containing the nginx template | `""` +`controller.addHeaders` | configMap key:value pairs containing [custom headers](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers) added before sending response to the client | `{}` +`controller.proxySetHeaders` | configMap key:value pairs containing [custom headers](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#proxy-set-headers) added before sending request to the backends| `{}` +`controller.headers` | DEPRECATED, Use `controller.proxySetHeaders` instead. | `{}` +`controller.updateStrategy` | allows setting of RollingUpdate strategy | `{}` +`controller.configMapNamespace` | The nginx-configmap namespace name | `""` +`controller.tcp.configMapNamespace` | The tcp-services-configmap namespace name | `""` +`controller.udp.configMapNamespace` | The udp-services-configmap namespace name | `""` +`defaultBackend.enabled` | Use default backend component | `true` +`defaultBackend.name` | name of the default backend component | `default-backend` +`defaultBackend.image.repository` | default backend container image repository | `k8s.gcr.io/defaultbackend-amd64` +`defaultBackend.image.tag` | default backend container image tag | `1.5` +`defaultBackend.image.pullPolicy` | default backend container image pull policy | `IfNotPresent` +`defaultBackend.image.runAsUser` | User ID of the controller process. Value depends on the Linux distribution used inside of the container image. By default uses nobody user. | `65534` +`defaultBackend.useComponentLabel` | Whether to add component label so the HPA can work separately for controller and defaultBackend. *Note: don't change this if you have an already running deployment as it will need the recreation of the defaultBackend deployment* | `false` +`defaultBackend.extraArgs` | Additional default backend container arguments | `{}` +`defaultBackend.extraEnvs` | any additional environment variables to set in the defaultBackend pods | `[]` +`defaultBackend.port` | Http port number | `8080` +`defaultBackend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 +`defaultBackend.livenessProbe.periodSeconds` | How often to perform the probe | 10 +`defaultBackend.livenessProbe.timeoutSeconds` | When the probe times out | 5 +`defaultBackend.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`defaultBackend.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 +`defaultBackend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 0 +`defaultBackend.readinessProbe.periodSeconds` | How often to perform the probe | 5 +`defaultBackend.readinessProbe.timeoutSeconds` | When the probe times out | 5 +`defaultBackend.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 +`defaultBackend.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 +`defaultBackend.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`defaultBackend.affinity` | node/pod affinities (requires Kubernetes >=1.6) | `{}` +`defaultBackend.nodeSelector` | node labels for pod assignment | `{}` +`defaultBackend.podAnnotations` | annotations to be added to pods | `{}` +`defaultBackend.deploymentLabels` | labels to add to the deployment metadata | `{}` +`defaultBackend.podLabels` | labels to add to the pod container metadata | `{}` +`defaultBackend.replicaCount` | desired number of default backend pods | `1` +`defaultBackend.minAvailable` | minimum number of available default backend pods for PodDisruptionBudget | `1` +`defaultBackend.resources` | default backend pod resource requests & limits | `{}` +`defaultBackend.priorityClassName` | default backend priorityClassName | `nil` +`defaultBackend.podSecurityContext` | Security context policies to add to the default backend | `{}` +`defaultBackend.service.annotations` | annotations for default backend service | `{}` +`defaultBackend.service.clusterIP` | internal default backend cluster service IP (set to `"-"` to pass an empty value) | `nil` +`defaultBackend.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the default backend service | `false` +`defaultBackend.service.externalIPs` | default backend service external IP addresses | `[]` +`defaultBackend.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`defaultBackend.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`defaultBackend.service.type` | type of default backend service to create | `ClusterIP` +`defaultBackend.serviceAccount.create` | if `true`, create a backend service account. Only useful if you need a pod security policy to run the backend. | `true` +`defaultBackend.serviceAccount.name` | The name of the backend service account to use. If not set and `create` is `true`, a name is generated using the fullname template. Only useful if you need a pod security policy to run the backend. | `` +`imagePullSecrets` | name of Secret resource containing private registry credentials | `nil` +`rbac.create` | if `true`, create & use RBAC resources | `true` +`rbac.scope` | if `true`, do not create & use clusterrole and -binding. Set to `true` in combination with `controller.scope.enabled=true` to disable load-balancer status updates and scope the ingress entirely. | `false` +`podSecurityPolicy.enabled` | if `true`, create & use Pod Security Policy resources | `false` +`serviceAccount.create` | if `true`, create a service account for the controller | `true` +`serviceAccount.name` | The name of the controller service account to use. If not set and `create` is `true`, a name is generated using the fullname template. | `` +`revisionHistoryLimit` | The number of old history to retain to allow rollback. | `10` +`tcp` | TCP service key:value pairs. The value is evaluated as a template. | `{}` +`udp` | UDP service key:value pairs The value is evaluated as a template. | `{}` +`releaseLabelOverride` | If provided, the value will be used as the `release` label instead of .Release.Name | `""` + +These parameters can be passed via Helm's `--set` option +```console +$ helm install stable/nginx-ingress --name my-release \ + --set controller.metrics.enabled=true +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install stable/nginx-ingress --name my-release -f values.yaml +``` + +A useful trick to debug issues with ingress is to increase the logLevel +as described [here](https://github.com/kubernetes/ingress-nginx/blob/master/docs/troubleshooting.md#debug) + +```console +$ helm install stable/nginx-ingress --set controller.extraArgs.v=2 +``` +> **Tip**: You can use the default [values.yaml](values.yaml) + +## PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +## Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics. + +```console +$ helm install stable/nginx-ingress --name my-release \ + --set controller.metrics.enabled=true +``` + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. + +## nginx-ingress nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: +* in [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +* in [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0230) to re-enable the http server + +## ExternalDNS Service configuration + +Add an [ExternalDNS](https://github.com/kubernetes-sigs/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +## AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/master/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +## AWS L4 NLB with SSL Redirection + +`ssl-redirect` and `force-ssl-redirect` flag are not working with AWS Network Load Balancer. You need to turn if off and add additional port with `server-snippet` in order to make it work. + +The port NLB `80` will be mapped to nginx container port `80` and NLB port `443` will be mapped to nginx container port `8000` (special). Then we use `$server_port` to manage redirection on port `80` +``` +controller: + config: + ssl-redirect: "false" # we use `special` port to control ssl redirection + server-snippet: | + listen 8000; + if ( $server_port = 80 ) { + return 308 https://$host$request_uri; + } + containerPort: + http: 80 + https: 443 + special: 8000 + service: + targetPorts: + http: http + https: special + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443" + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "your-arn" + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +``` + +## AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +## Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +## Helm error when upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +``` +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customconfig-values.yaml new file mode 100644 index 0000000..f12eac3 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + config: + use-proxy-protocol: "true" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 0000000..382bc50 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,15 @@ +controller: + kind: DaemonSet + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-headers-values.yaml new file mode 100644 index 0000000..a29690f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-headers-values.yaml @@ -0,0 +1,6 @@ +controller: + kind: DaemonSet + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-nodeport-values.yaml new file mode 100644 index 0000000..ebc8f10 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + service: + type: NodePort diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..3484704 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-values.yaml new file mode 100644 index 0000000..e6866d7 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-udp-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-values.yaml new file mode 100644 index 0000000..f0a6060 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/daemonset-tcp-values.yaml @@ -0,0 +1,6 @@ +controller: + kind: DaemonSet + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-default-values.yaml new file mode 100644 index 0000000..ddb2562 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-default-values.yaml @@ -0,0 +1,2 @@ +controller: + kind: DaemonSet diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-metrics-values.yaml new file mode 100644 index 0000000..5ce435d --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-metrics-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + metrics: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-psp-values.yaml new file mode 100644 index 0000000..b441c1a --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-psp-values.yaml @@ -0,0 +1,5 @@ +controller: + kind: DaemonSet + +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-and-psp-values.yaml new file mode 100644 index 0000000..2cf9d6f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-and-psp-values.yaml @@ -0,0 +1,7 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: true + +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-values.yaml new file mode 100644 index 0000000..2d2cb47 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deamonset-webhook-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-autoscaling-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-autoscaling-values.yaml new file mode 100644 index 0000000..e9701da --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-autoscaling-values.yaml @@ -0,0 +1,3 @@ +controller: + autoscaling: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customconfig-values.yaml new file mode 100644 index 0000000..401aea4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customconfig-values.yaml @@ -0,0 +1,3 @@ +controller: + config: + use-proxy-protocol: "true" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customnodeport-values.yaml new file mode 100644 index 0000000..6958eaa --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,14 @@ +controller: + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-default-values.yaml new file mode 100644 index 0000000..b15f0e4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-default-values.yaml @@ -0,0 +1 @@ +# Left blank to test default values diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-headers-values.yaml new file mode 100644 index 0000000..f3873af --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-headers-values.yaml @@ -0,0 +1,5 @@ +controller: + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-metrics-values.yaml new file mode 100644 index 0000000..9a93fa5 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-metrics-values.yaml @@ -0,0 +1,3 @@ +controller: + metrics: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-nodeport-values.yaml new file mode 100644 index 0000000..ffdc47b --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-nodeport-values.yaml @@ -0,0 +1,3 @@ +controller: + service: + type: NodePort diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-psp-values.yaml new file mode 100644 index 0000000..7aae860 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-psp-values.yaml @@ -0,0 +1,2 @@ +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..7b06c1e --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,13 @@ +controller: + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-values.yaml new file mode 100644 index 0000000..7c55d44 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-udp-values.yaml @@ -0,0 +1,9 @@ +controller: + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-values.yaml new file mode 100644 index 0000000..c8bc204 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-tcp-values.yaml @@ -0,0 +1,3 @@ +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-and-psp-values.yaml new file mode 100644 index 0000000..0590d7c --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-and-psp-values.yaml @@ -0,0 +1,6 @@ +controller: + admissionWebhooks: + enabled: true + +podSecurityPolicy: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-values.yaml new file mode 100644 index 0000000..07e1a92 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/ci/deployment-webhook-values.yaml @@ -0,0 +1,3 @@ +controller: + admissionWebhooks: + enabled: true diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/NOTES.txt b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/NOTES.txt new file mode 100644 index 0000000..e18a901 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/NOTES.txt @@ -0,0 +1,71 @@ +The nginx-ingress controller has been installed. + +{{- if contains "NodePort" .Values.controller.service.type }} +Get the application URL by running these commands: + +{{- if (not (empty .Values.controller.service.nodePorts.http)) }} + export HTTP_NODE_PORT={{ .Values.controller.service.nodePorts.http }} +{{- else }} + export HTTP_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[0].nodePort}" {{ template "nginx-ingress.controller.fullname" . }}) +{{- end }} +{{- if (not (empty .Values.controller.service.nodePorts.https)) }} + export HTTPS_NODE_PORT={{ .Values.controller.service.nodePorts.https }} +{{- else }} + export HTTPS_NODE_PORT=$(kubectl --namespace {{ .Release.Namespace }} get services -o jsonpath="{.spec.ports[1].nodePort}" {{ template "nginx-ingress.controller.fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + + echo "Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP." + echo "Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS." +{{- else if contains "LoadBalancer" .Values.controller.service.type }} +It may take a few minutes for the LoadBalancer IP to be available. +You can watch the status by running 'kubectl --namespace {{ .Release.Namespace }} get services -o wide -w {{ template "nginx-ingress.controller.fullname" . }}' +{{- else if contains "ClusterIP" .Values.controller.service.type }} +Get the application URL by running these commands: + export POD_NAME=$(kubectl --namespace {{ .Release.Namespace }} get pods -o jsonpath="{.items[0].metadata.name}" -l "app={{ template "nginx-ingress.name" . }},component={{ .Values.controller.name }},release={{ template "nginx-ingress.releaseLabel" . }}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 + echo "Visit http://127.0.0.1:8080 to access your application." +{{- end }} + +An example Ingress that makes use of the controller: + + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + annotations: + kubernetes.io/ingress.class: {{ .Values.controller.ingressClass }} + name: example + namespace: foo + spec: + rules: + - host: www.example.com + http: + paths: + - backend: + serviceName: exampleService + servicePort: 80 + path: / + # This section is only required if TLS is to be enabled for the Ingress + tls: + - hosts: + - www.example.com + secretName: example-tls + +If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: + + apiVersion: v1 + kind: Secret + metadata: + name: example-tls + namespace: foo + data: + tls.crt: + tls.key: + type: kubernetes.io/tls + +{{- if .Values.controller.headers }} +################################################################################# +###### WARNING: `controller.headers` has been deprecated! ##### +###### It has been renamed to `controller.proxySetHeaders`. ##### +################################################################################# +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/_helpers.tpl b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/_helpers.tpl new file mode 100644 index 0000000..1881171 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/_helpers.tpl @@ -0,0 +1,134 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nginx-ingress.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nginx-ingress.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.controller.fullname" -}} +{{- printf "%s-%s" (include "nginx-ingress.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* +Allow for the ability to override the release name used as a label in many places. +*/}} +{{- define "nginx-ingress.releaseLabel" -}} +{{- .Values.releaseLabelOverride | default .Release.Name | trunc 63 -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} + +{{- define "nginx-ingress.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" .Release.Namespace (include "nginx-ingress.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx-ingress.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "nginx-ingress.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "nginx-ingress.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "nginx-ingress.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "nginx-ingress.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "nginx-ingress.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "deployment.apiVersion" -}} +{{- if semverCompare ">=1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/templates/addheaders-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/addheaders-configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/addheaders-configmap.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/addheaders-configmap.yaml diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..a248326 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "nginx-ingress.fullname" . }}-admission +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..c99fdf8 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "nginx-ingress.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-createSecret.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..e0d2c04 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,55 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-create + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + {{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 + {{- end }} + template: + metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-create +{{- with .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + containers: + - name: create + image: {{ template "system_default_registry" . }}{{ .Values.controller.admissionWebhooks.patch.image.repository }}:{{ .Values.controller.admissionWebhooks.patch.image.tag }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ template "nginx-ingress.controller.fullname" . }}-admission,{{ template "nginx-ingress.controller.fullname" . }}-admission.{{ .Release.Namespace }}.svc + - --namespace={{ .Release.Namespace }} + - --secret-name={{ template "nginx-ingress.fullname". }}-admission + restartPolicy: OnFailure + serviceAccountName: {{ template "nginx-ingress.fullname" . }}-admission + {{- with .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 2000 +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..4f60fd9 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-patch + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + {{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 + {{- end }} + template: + metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission-patch +{{- with .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + containers: + - name: patch + image: {{ template "system_default_registry" . }}{{ .Values.controller.admissionWebhooks.patch.image.repository }}:{{ .Values.controller.admissionWebhooks.patch.image.tag }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.pullPolicy }} + args: + - patch + - --webhook-name={{ template "nginx-ingress.fullname" . }}-admission + - --namespace={{ .Release.Namespace }} + - --patch-mutating=false + - --secret-name={{ template "nginx-ingress.fullname". }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + restartPolicy: OnFailure + serviceAccountName: {{ template "nginx-ingress.fullname" . }}-admission + {{- with .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 2000 +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..a23f927 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..665769f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..0e4873f --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..c0822f9 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "nginx-ingress.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/validating-webhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..cd962e5 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,31 @@ +{{- if .Values.controller.admissionWebhooks.enabled }} +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app: {{ template "nginx-ingress.name" . }}-admission + chart: {{ template "nginx-ingress.chart" . }} + component: "admission-webhook" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - extensions + - networking.k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + clientConfig: + service: + namespace: {{ .Release.Namespace }} + name: {{ template "nginx-ingress.controller.fullname" . }}-admission + path: /extensions/v1beta1/ingresses +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrole.yaml new file mode 100644 index 0000000..14667eb --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrole.yaml @@ -0,0 +1,71 @@ +{{- if and (.Values.rbac.create) (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..39decda --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and (.Values.rbac.create) (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "nginx-ingress.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-configmap.yaml new file mode 100644 index 0000000..25625b4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-configmap.yaml @@ -0,0 +1,22 @@ +{{- if or .Values.controller.config (or (or .Values.controller.proxySetHeaders .Values.controller.headers) .Values.controller.addHeaders) }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +data: +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.controller.config }} +{{ toYaml .Values.controller.config | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-daemonset.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-daemonset.yaml new file mode 100644 index 0000000..da79809 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-daemonset.yaml @@ -0,0 +1,257 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") }} +{{- $useHostPort := .Values.controller.daemonset.useHostPort -}} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: DaemonSet +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + name: {{ template "nginx-ingress.controller.fullname" . }} + annotations: +{{ toYaml .Values.controller.deploymentAnnotations | indent 4}} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.useComponentLabel }} + app.kubernetes.io/component: controller + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + updateStrategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + component: "{{ .Values.controller.name }}" + app.kubernetes.io/component: controller + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8}} + {{- end }} + spec: +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: "{{ .Values.controller.priorityClassName }}" +{{- end }} + {{- if .Values.controller.podSecurityContext }} + securityContext: +{{ toYaml .Values.controller.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.controller.name }} + image: {{ template "system_default_registry" . }}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }} + imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" + {{- if .Values.controller.lifecycle }} + lifecycle: +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + {{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "nginx-ingress.defaultBackend.fullname" . }} + {{- else }} + {{- if (semverCompare "<0.21.0" .Values.controller.image.tag) }} + - --default-backend-service={{ required ".Values.controller.defaultBackendService is required if .Values.defaultBackend.enabled=false and .Values.controller.image.tag < 0.21.0" .Values.controller.defaultBackendService }} + {{- else if .Values.controller.defaultBackendService }} + - --default-backend-service={{ .Values.controller.defaultBackendService }} + {{- end }} + {{- end }} + {{- if and (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) .Values.controller.publishService.enabled }} + - --publish-service={{ template "nginx-ingress.controller.publishServicePath" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --election-id={{ .Values.controller.electionID }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --ingress-class={{ .Values.controller.ingressClass }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" .Values.controller.image.tag) }} + - --configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- else }} + - --nginx-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- end }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ .Release.Namespace }}/{{ template "nginx-ingress.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- if and (.Values.controller.reportNodeInternalIp) (.Values.controller.hostNetwork)}} + - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + {{- end }} + {{- if .Values.controller.maxmindLicenseKey }} + - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + {{- if (semverCompare ">=0.16.0" .Values.controller.image.tag) }} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ .Values.controller.image.runAsUser }} + allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.extraEnvs }} +{{ toYaml .Values.controller.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.livenessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ index $hostPorts $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + containerPort: {{ $key }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + containerPort: {{ $key }} + protocol: UDP + {{- if $useHostPort }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.readinessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} + volumeMounts: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: "/usr/local/certificates/" + readOnly: true +{{- end }} +{{- if .Values.controller.extraVolumeMounts }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12}} +{{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 12 }} +{{- if .Values.controller.extraContainers }} +{{ toYaml .Values.controller.extraContainers | indent 8}} +{{- end }} +{{- if .Values.controller.extraInitContainers }} + initContainers: +{{ toYaml .Values.controller.extraInitContainers | indent 8}} +{{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: 60 +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} + volumes: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ template "nginx-ingress.fullname". }}-admission +{{- end }} +{{- if .Values.controller.extraVolumes }} +{{ toYaml .Values.controller.extraVolumes | indent 8}} +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-deployment.yaml new file mode 100644 index 0000000..65e8f58 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-deployment.yaml @@ -0,0 +1,255 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") }} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + {{- if .Values.controller.deploymentLabels }} +{{ toYaml .Values.controller.deploymentLabels | indent 4 }} + {{- end }} + name: {{ template "nginx-ingress.controller.fullname" . }} + annotations: +{{ toYaml .Values.controller.deploymentAnnotations | indent 4}} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.useComponentLabel }} + app.kubernetes.io/component: controller + {{- end }} +{{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + strategy: +{{ toYaml .Values.controller.updateStrategy | indent 4 }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + component: "{{ .Values.controller.name }}" + app.kubernetes.io/component: controller + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + spec: +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: "{{ .Values.controller.priorityClassName }}" +{{- end }} + {{- if .Values.controller.podSecurityContext }} + securityContext: +{{ toYaml .Values.controller.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.controller.name }} + image: {{ template "system_default_registry" . }}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }} + imagePullPolicy: "{{ .Values.controller.image.pullPolicy }}" + {{- if .Values.controller.lifecycle }} + lifecycle: +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + args: + - /nginx-ingress-controller + {{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "nginx-ingress.defaultBackend.fullname" . }} + {{- else }} + {{- if (semverCompare "<0.21.0" .Values.controller.image.tag) }} + - --default-backend-service={{ required ".Values.controller.defaultBackendService is required if .Values.defaultBackend.enabled=false and .Values.controller.image.tag < 0.21.0" .Values.controller.defaultBackendService }} + {{- else if .Values.controller.defaultBackendService }} + - --default-backend-service={{ .Values.controller.defaultBackendService }} + {{- end }} + {{- end }} + {{- if and (semverCompare ">=0.9.0-beta.1" (trimPrefix "nginx-" .Values.controller.image.tag)) .Values.controller.publishService.enabled }} + - --publish-service={{ template "nginx-ingress.controller.publishServicePath" . }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" (trimPrefix "nginx-" .Values.controller.image.tag)) }} + - --election-id={{ .Values.controller.electionID }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" (trimPrefix "nginx-" .Values.controller.image.tag)) }} + - --ingress-class={{ .Values.controller.ingressClass }} + {{- end }} + {{- if (semverCompare ">=0.9.0-beta.1" (trimPrefix "nginx-" .Values.controller.image.tag)) }} + - --configmap={{ default .Release.Namespace .Values.controller.configMapNamespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- else }} + - --nginx-configmap={{ default .Release.Namespace .Values.controller.configMapNamespace }}/{{ template "nginx-ingress.controller.fullname" . }} + {{- end }} + {{- if .Values.tcp }} + - --tcp-services-configmap={{ default .Release.Namespace .Values.controller.tcp.configMapNamespace }}/{{ template "nginx-ingress.fullname" . }}-tcp + {{- end }} + {{- if .Values.udp }} + - --udp-services-configmap={{ default .Release.Namespace .Values.controller.udp.configMapNamespace }}/{{ template "nginx-ingress.fullname" . }}-udp + {{- end }} + {{- if .Values.controller.scope.enabled }} + - --watch-namespace={{ default .Release.Namespace .Values.controller.scope.namespace }} + {{- end }} + {{- if and (.Values.controller.scope.enabled) (.Values.rbac.scope) }} + - --update-status=false + {{- end }} + {{- if and (.Values.controller.reportNodeInternalIp) (.Values.controller.hostNetwork) }} + - --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + {{- end }} + {{- if .Values.controller.maxmindLicenseKey }} + - --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} + {{- end }} + {{- range $key, $value := .Values.controller.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + {{- if (semverCompare ">=0.16.0" (trimPrefix "nginx-" .Values.controller.image.tag)) }} + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: {{ .Values.controller.image.runAsUser }} + allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.extraEnvs }} +{{ toYaml .Values.controller.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.livenessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + containerPort: {{ $key }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + containerPort: {{ $key }} + protocol: UDP + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.controller.readinessProbe.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled) }} + volumeMounts: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: "/usr/local/certificates/" + readOnly: true +{{- end }} +{{- if .Values.controller.extraVolumeMounts }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12}} +{{- end }} + resources: +{{ toYaml .Values.controller.resources | indent 12 }} +{{- if .Values.controller.extraContainers }} +{{ toYaml .Values.controller.extraContainers | indent 8}} +{{- end }} +{{- if .Values.controller.extraInitContainers }} + initContainers: +{{ toYaml .Values.controller.extraInitContainers | indent 8}} +{{- end }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} +{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes) }} + volumes: +{{- end }} +{{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ template "nginx-ingress.fullname". }}-admission +{{- end }} +{{- if .Values.controller.extraVolumes }} +{{ toYaml .Values.controller.extraVolumes | indent 8}} +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-hpa.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-hpa.yaml new file mode 100644 index 0000000..77d3533 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-hpa.yaml @@ -0,0 +1,34 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") }} +{{- if .Values.controller.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + scaleTargetRef: + apiVersion: {{ template "deployment.apiVersion" . }} + kind: Deployment + name: {{ template "nginx-ingress.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: +{{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/templates/controller-metrics-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-metrics-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-metrics-service.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-metrics-service.yaml diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..888515a --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (gt (.Values.controller.replicaCount | int) 1) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + app.kubernetes.io/component: controller + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-prometheusrules.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..4a43957 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-prometheusrules.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "nginx-ingress.controller.fullname" . }} + {{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} +{{ toYaml .Values.controller.metrics.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "nginx-ingress.name" $ }} + rules: {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-psp.yaml new file mode 100644 index 0000000..ccbf636 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-psp.yaml @@ -0,0 +1,80 @@ +{{- if .Values.podSecurityPolicy.enabled}} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + #- 'emptyDir' + - 'projected' + - 'secret' + #- 'downwardAPI' + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- if or .Values.controller.hostNetwork .Values.controller.daemonset.useHostPort }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.daemonset.useHostPort }} +{{- range $key, $value := .Values.controller.daemonset.hostPorts }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-role.yaml new file mode 100644 index 0000000..bb9ff14 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-role.yaml @@ -0,0 +1,91 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - update + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - "networking.k8s.io" # k8s 1.14+ + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }}-{{ .Values.controller.ingressClass }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "nginx-ingress.fullname" . }}] +{{- end }} + +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..c1186c0 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-service.yaml new file mode 100644 index 0000000..15d51a0 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-service.yaml @@ -0,0 +1,94 @@ +{{- if .Values.controller.service.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.service.annotations }} + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: +{{- if .Values.controller.service.labels }} +{{ toYaml .Values.controller.service.labels | indent 4 }} +{{- end }} + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.controller.fullname" . }} +spec: +{{- if not .Values.controller.service.omitClusterIP }} + {{- with .Values.controller.service.clusterIP }} + clusterIP: {{ if eq "-" . }}""{{ else }}{{ . | quote }}{{ end }} + {{- end }} +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: +{{ toYaml .Values.controller.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} +{{- if and (semverCompare ">=1.7-0" .Capabilities.KubeVersion.GitVersion) (.Values.controller.service.externalTrafficPolicy) }} + externalTrafficPolicy: "{{ .Values.controller.service.externalTrafficPolicy }}" +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: "{{ .Values.controller.service.sessionAffinity }}" +{{- end }} +{{- if and (semverCompare ">=1.7-0" .Capabilities.KubeVersion.GitVersion) (.Values.controller.service.healthCheckNodePort) }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: "{{ $key }}-tcp" + port: {{ $key }} + protocol: TCP + targetPort: "{{ $key }}-tcp" + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: "{{ $key }}-udp" + port: {{ $key }} + protocol: UDP + targetPort: "{{ $key }}-udp" + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: controller + type: "{{ .Values.controller.service.type }}" +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..7b688e6 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.serviceAccountName" . }} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-servicemonitor.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..f3129ea --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-servicemonitor.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "nginx-ingress.controller.fullname" . }} + {{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.controller.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: +{{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | indent 4 -}} + {{ else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + {{- end }} + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + component: "{{ .Values.controller.name }}" + release: {{ template "nginx-ingress.releaseLabel" . }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/templates/controller-webhook-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-webhook-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-webhook-service.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/controller-webhook-service.yaml diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..a4c8d23 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-deployment.yaml @@ -0,0 +1,110 @@ +{{- if .Values.defaultBackend.enabled }} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + {{- if .Values.defaultBackend.deploymentLabels }} +{{ toYaml .Values.defaultBackend.deploymentLabels | indent 4 }} + {{- end }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + {{- if .Values.defaultBackend.useComponentLabel }} + app.kubernetes.io/component: default-backend + {{- end }} + replicas: {{ .Values.defaultBackend.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: + {{- range $key, $value := .Values.defaultBackend.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + {{- if .Values.defaultBackend.podLabels }} +{{ toYaml .Values.defaultBackend.podLabels | indent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} +{{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: "{{ .Values.defaultBackend.priorityClassName }}" +{{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: +{{ toYaml .Values.defaultBackend.podSecurityContext | indent 8 }} + {{- end }} + containers: + - name: {{ template "nginx-ingress.name" . }}-{{ .Values.defaultBackend.name }} + image: {{ template "system_default_registry" . }}{{ .Values.defaultBackend.image.repository }}:{{ .Values.defaultBackend.image.tag }} + imagePullPolicy: "{{ .Values.defaultBackend.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + securityContext: + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + {{- if .Values.defaultBackend.extraEnvs }} + env: +{{ toYaml .Values.defaultBackend.extraEnvs | indent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + resources: +{{ toYaml .Values.defaultBackend.resources | indent 12 }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: +{{ toYaml .Values.defaultBackend.nodeSelector | indent 8 }} + {{- end }} + serviceAccountName: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: +{{ toYaml .Values.defaultBackend.tolerations | indent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: +{{ toYaml .Values.defaultBackend.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..0713c01 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if gt (.Values.defaultBackend.replicaCount | int) 1 }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + app.kubernetes.io/component: default-backend + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-psp.yaml new file mode 100644 index 0000000..38191d4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-psp.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "nginx-ingress.fullname" . }}-backend + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-role.yaml new file mode 100644 index 0000000..11fbba9 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-role.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-backend +rules: + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "nginx-ingress.fullname" . }}-backend] +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..7d03ef4 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.fullname" . }}-backend +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "nginx-ingress.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-service.yaml new file mode 100644 index 0000000..23dba19 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-service.yaml @@ -0,0 +1,45 @@ +{{- if .Values.defaultBackend.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: + {{- range $key, $value := .Values.defaultBackend.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + component: "{{ .Values.defaultBackend.name }}" + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.fullname" . }} +spec: +{{- if not .Values.defaultBackend.service.omitClusterIP }} + {{- with .Values.defaultBackend.service.clusterIP }} + clusterIP: {{ if eq "-" . }}""{{ else }}{{ . | quote }}{{ end }} + {{- end }} +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: +{{ toYaml .Values.defaultBackend.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.defaultBackend.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + selector: + app: {{ template "nginx-ingress.name" . }} + release: {{ template "nginx-ingress.releaseLabel" . }} + app.kubernetes.io/component: default-backend + type: "{{ .Values.defaultBackend.service.type }}" +{{- end }} diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..94689a6 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "nginx-ingress.name" . }} + chart: {{ template "nginx-ingress.chart" . }} + heritage: {{ .Release.Service }} + release: {{ template "nginx-ingress.releaseLabel" . }} + name: {{ template "nginx-ingress.defaultBackend.serviceAccountName" . }} +{{- end }} diff --git a/charts/rke2-ingress-nginx/templates/proxyheaders-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/proxyheaders-configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/proxyheaders-configmap.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/proxyheaders-configmap.yaml diff --git a/charts/rke2-ingress-nginx/templates/tcp-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/tcp-configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/tcp-configmap.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/tcp-configmap.yaml diff --git a/charts/rke2-ingress-nginx/templates/udp-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/udp-configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/udp-configmap.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/templates/udp-configmap.yaml diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/values.yaml new file mode 100644 index 0000000..da74bbe --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/1.36.300/values.yaml @@ -0,0 +1,578 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md +## +controller: + name: controller + image: + repository: rancher/nginx-ingress-controller + tag: "nginx-0.30.0-rancher1" + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: false + + # Configures the ports the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # Maxmind license key to download GeoLite2 Databases + # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: true + + # Optionally customize the pod dnsConfig. + dnsConfig: {} + + # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirstWithHostNet + + # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + ## Use host ports 80 and 443 + daemonset: + useHostPort: false + + hostPorts: + http: 80 + https: 443 + + ## Required only if defaultBackend.enabled = false + ## Must be / + ## + defaultBackendService: "" + + ## Election ID to use for status update + ## + electionID: ingress-controller-leader + + ## Name of the ingress class to route through this controller + ## + ingressClass: nginx + + # labels to add to the deployment metadata + deploymentLabels: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + ## Allows customization of the external service + ## the ingress will be bound to via DNS + publishService: + enabled: false + ## Allows overriding of the publish service to bind to + ## Must be / + ## + pathOverride: "" + + ## Limit the scope of the controller + ## + scope: + enabled: false + namespace: "" # defaults to .Release.Namespace + + ## Allows customization of the configmap / nginx-configmap namespace + ## + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the tcp-services-configmap namespace + ## + tcp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the udp-services-configmap namespace + ## + udp: + configMapNamespace: "" # defaults to .Release.Namespace + + ## Additional command line arguments to pass to nginx-ingress-controller + ## E.g. to specify the default SSL certificate you can use + ## extraArgs: + ## default-ssl-certificate: "/" + extraArgs: {} + + ## Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + ## DaemonSet or Deployment + ## + kind: Deployment + + ## Annotations to be added to the controller deployment + ## + deploymentAnnotations: {} + + # The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # minReadySeconds to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - nginx-ingress + # topologyKey: "kubernetes.io/hostname" + + ## terminationGracePeriodSeconds + ## + terminationGracePeriodSeconds: 60 + + ## Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 100m + # memory: 64Mi + + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: false + + annotations: {} + labels: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + externalTrafficPolicy: "" + + # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: "" + + healthCheckNodePort: 0 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + # type: NodePort + # nodePorts: + # http: 32080 + # https: 32443 + # tcp: + # 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + extraContainers: [] + ## Additional containers to be added to the controller pod. + ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + extraVolumeMounts: [] + ## Additional volumeMounts to the controller main container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the controller pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraInitContainers: [] + ## Containers, which are run before the app containers are started. + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + admissionWebhooks: + enabled: false + failurePolicy: Fail + port: 8443 + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + patch: + enabled: true + image: + repository: jettech/kube-webhook-certgen + tag: v1.0.0 + pullPolicy: IfNotPresent + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9913 + type: ClusterIP + + serviceMonitor: + enabled: false + additionalLabels: {} + namespace: "" + namespaceSelector: {} + # Default: scrape .Release.Namespace only + # To scrape all, use the following: + # namespaceSelector: + # any: true + scrapeInterval: 30s + # honorLabels: true + + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: TooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 5XXs + # summary: More than 5% of the all requests did return 5XX, this require your attention + # - alert: TooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: Too many 4XXs + # summary: More than 5% of the all requests did return 4XX, this require your attention + + + lifecycle: {} + + priorityClassName: "" + +## Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + + ## If false, controller.defaultBackendService must be provided + ## + enabled: true + + name: default-backend + image: + repository: rancher/nginx-ingress-controller-defaultbackend + tag: "1.5-rancher1" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + + # This will fix the issue of HPA not being able to read the metrics. + # Note that if you enable it for existing deployments, it won't work as the labels are immutable. + # We recommend setting this to true for new deployments. + useComponentLabel: false + + extraArgs: {} + + serviceAccount: + create: true + name: + ## Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + # labels to add to the deployment metadata + deploymentLabels: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + service: + annotations: {} + ## Deprecated, instead simply do not provide a clusterIP value + omitClusterIP: false + # clusterIP: "" + + ## List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + +# If provided, the value will be used as the `release` label instead of .Release.Name +releaseLabelOverride: "" + +## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 +rbac: + create: true + scope: false + +# If true, create & use Pod Security Policy resources +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: + +## Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# TCP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# UDP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp +## +udp: {} +# 53: "kube-system/kube-dns:53" + +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/.helmignore b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rke2-ingress-nginx/Chart.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/Chart.yaml old mode 100755 new mode 100644 similarity index 92% rename from charts/rke2-ingress-nginx/Chart.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/Chart.yaml index 0689764..694932d --- a/charts/rke2-ingress-nginx/Chart.yaml +++ b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/Chart.yaml @@ -1,7 +1,6 @@ apiVersion: v1 appVersion: 0.35.0 -description: Ingress controller for Kubernetes using NGINX as a reverse proxy and - load balancer +description: Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer home: https://github.com/kubernetes/ingress-nginx icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png keywords: diff --git a/charts/rke2-ingress-nginx/OWNERS b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/OWNERS old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/OWNERS rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/OWNERS diff --git a/charts/rke2-ingress-nginx/README.md b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/README.md old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/README.md rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/README.md diff --git a/charts/rke2-ingress-nginx/ci/daemonset-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-customconfig-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-customconfig-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-customconfig-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-customnodeport-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-customnodeport-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-customnodeport-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-headers-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-headers-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-headers-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-internal-lb-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-internal-lb-values.yaml similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-internal-lb-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-internal-lb-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-nodeport-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-nodeport-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-nodeport-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-udp-configMapNamespace-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-tcp-udp-configMapNamespace-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-udp-configMapNamespace-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-udp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-tcp-udp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-udp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/daemonset-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/daemonset-tcp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/daemonset-tcp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deamonset-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-default-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deamonset-default-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-default-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deamonset-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-metrics-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deamonset-metrics-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-metrics-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deamonset-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-psp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deamonset-psp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-psp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-webhook-and-psp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-webhook-and-psp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deamonset-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-webhook-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deamonset-webhook-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deamonset-webhook-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-autoscaling-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-autoscaling-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-autoscaling-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-autoscaling-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-customconfig-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-customconfig-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-customconfig-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-customconfig-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-customnodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-customnodeport-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-customnodeport-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-customnodeport-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-default-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-default-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-default-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-default-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-headers-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-headers-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-headers-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-headers-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-internal-lb-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-internal-lb-values.yaml similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-internal-lb-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-internal-lb-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-metrics-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-metrics-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-metrics-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-metrics-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-nodeport-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-nodeport-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-nodeport-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-nodeport-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-psp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-psp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-psp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-udp-configMapNamespace-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-tcp-udp-configMapNamespace-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-udp-configMapNamespace-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-tcp-udp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-udp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-tcp-udp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-udp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-tcp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-tcp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-tcp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-webhook-and-psp-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-webhook-and-psp-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-webhook-and-psp-values.yaml diff --git a/charts/rke2-ingress-nginx/ci/deployment-webhook-values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-webhook-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/ci/deployment-webhook-values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/ci/deployment-webhook-values.yaml diff --git a/charts/rke2-ingress-nginx/templates/NOTES.txt b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/NOTES.txt old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/NOTES.txt rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/NOTES.txt diff --git a/charts/rke2-ingress-nginx/templates/_helpers.tpl b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/_helpers.tpl old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/_helpers.tpl rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/_helpers.tpl diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/clusterrole.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/clusterrole.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/clusterrolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/clusterrolebinding.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/job-createSecret.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/job-createSecret.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/job-patchWebhook.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/job-patchWebhook.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/psp.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/psp.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/role.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/role.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/role.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/rolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/rolebinding.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/serviceaccount.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/job-patch/serviceaccount.yaml diff --git a/charts/rke2-ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/validating-webhook.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/admission-webhooks/validating-webhook.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/admission-webhooks/validating-webhook.yaml diff --git a/charts/rke2-ingress-nginx/templates/clusterrole.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/clusterrole.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/clusterrole.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/clusterrole.yaml diff --git a/charts/rke2-ingress-nginx/templates/clusterrolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/clusterrolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/clusterrolebinding.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/clusterrolebinding.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-configmap-addheaders.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-addheaders.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-configmap-addheaders.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-addheaders.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-proxyheaders.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-configmap-proxyheaders.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-proxyheaders.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-configmap-tcp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-tcp.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-configmap-tcp.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-tcp.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-configmap-udp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-udp.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-configmap-udp.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap-udp.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-configmap.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-configmap.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-configmap.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-daemonset.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-daemonset.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-daemonset.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-daemonset.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-deployment.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-deployment.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-deployment.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-hpa.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-hpa.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-hpa.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-hpa.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-poddisruptionbudget.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-poddisruptionbudget.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-poddisruptionbudget.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-prometheusrules.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-prometheusrules.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-prometheusrules.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-prometheusrules.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-psp.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-psp.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-psp.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-role.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-role.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-role.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-rolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-rolebinding.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-rolebinding.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-service-internal.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-internal.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-service-internal.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-internal.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-service-metrics.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-metrics.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-service-metrics.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-metrics.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-service-webhook.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-webhook.yaml similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-service-webhook.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service-webhook.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-service.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-service.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-serviceaccount.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-serviceaccount.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-serviceaccount.yaml diff --git a/charts/rke2-ingress-nginx/templates/controller-servicemonitor.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-servicemonitor.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/controller-servicemonitor.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/controller-servicemonitor.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-deployment.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-deployment.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-deployment.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-deployment.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-poddisruptionbudget.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-poddisruptionbudget.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-poddisruptionbudget.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-psp.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-psp.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-psp.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-psp.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-role.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-role.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-role.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-role.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-rolebinding.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-rolebinding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-rolebinding.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-rolebinding.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-service.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-service.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-service.yaml diff --git a/charts/rke2-ingress-nginx/templates/default-backend-serviceaccount.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-serviceaccount.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/templates/default-backend-serviceaccount.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/templates/default-backend-serviceaccount.yaml diff --git a/charts/rke2-ingress-nginx/values.yaml b/charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-ingress-nginx/values.yaml rename to charts/rke2-ingress-nginx/rke2-ingress-nginx/3.3.000/values.yaml diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/Chart.yaml new file mode 100644 index 0000000..c2fa2dd --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.10 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.10 diff --git a/charts/rke2-kube-proxy/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/NOTES.txt similarity index 100% rename from charts/rke2-kube-proxy/templates/NOTES.txt rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/NOTES.txt diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/rbac.yaml similarity index 100% rename from charts/rke2-kube-proxy/templates/rbac.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/rbac.yaml diff --git a/charts/rke2-kube-proxy/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/serviceaccount.yaml similarity index 100% rename from charts/rke2-kube-proxy/templates/serviceaccount.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/templates/serviceaccount.yaml diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/values.yaml new file mode 100644 index 0000000..5674ac7 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.10/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.10 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/Chart.yaml new file mode 100644 index 0000000..6f63cb8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.12 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.12 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/values.yaml new file mode 100644 index 0000000..e5f9bbc --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.12/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.12 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/Chart.yaml new file mode 100644 index 0000000..f42e9e7 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.13 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.13 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/values.yaml new file mode 100644 index 0000000..07afd93 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.13/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.13 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/Chart.yaml new file mode 100644 index 0000000..3605f53 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.15 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.15 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/_helpers.tpl old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-kube-proxy/templates/_helpers.tpl rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/_helpers.tpl diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/config.yaml new file mode 100644 index 0000000..20a215e --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/daemonset.yaml similarity index 100% rename from charts/rke2-kube-proxy/templates/daemonset.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/daemonset.yaml diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/values.yaml new file mode 100644 index 0000000..59a6be0 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.15/values.yaml @@ -0,0 +1,221 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.15 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/Chart.yaml new file mode 100644 index 0000000..9281912 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.16 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.16 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/_helpers.tpl new file mode 100644 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/config.yaml similarity index 100% rename from charts/rke2-kube-proxy/templates/config.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/config.yaml diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/daemonset.yaml new file mode 100644 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/values.yaml new file mode 100644 index 0000000..32d2f92 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.16/values.yaml @@ -0,0 +1,142 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.16 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/Chart.yaml new file mode 100644 index 0000000..64b0584 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.4 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.4 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/values.yaml new file mode 100644 index 0000000..670bb47 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.4/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/kube-proxy + tag: v1.18.4 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/Chart.yaml new file mode 100644 index 0000000..625dfde --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.8 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.8 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/values.yaml new file mode 100644 index 0000000..544723e --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.8/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.8 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/Chart.yaml new file mode 100644 index 0000000..b46eee9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.18.9 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.18.9 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/_helpers.tpl new file mode 100644 index 0000000..b647c75 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/config.yaml new file mode 100644 index 0000000..c0bf1a9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ .Values.clientConnection.kubeconfig | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/daemonset.yaml new file mode 100644 index 0000000..6a65734 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/rancher/rke2/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: /var/lib/rancher/rke2/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/values.yaml new file mode 100644 index 0000000..904c70a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.18.9/values.yaml @@ -0,0 +1,223 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.18.9 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicAuditing: false + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + ResourceLimitsPriorityFunction: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/Chart.yaml new file mode 100644 index 0000000..46d5eae --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.19.5 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.19.5 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/_helpers.tpl new file mode 100644 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/config.yaml new file mode 100644 index 0000000..20a215e --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddresses | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/daemonset.yaml new file mode 100644 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/values.yaml new file mode 100644 index 0000000..9fe3cb4 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.5/values.yaml @@ -0,0 +1,221 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.19.5 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/Chart.yaml new file mode 100644 index 0000000..5478cf9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.19.7 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.19.7 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/_helpers.tpl new file mode 100644 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/config.yaml new file mode 100644 index 0000000..536a12a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddress | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/daemonset.yaml new file mode 100644 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/values.yaml new file mode 100644 index 0000000..bc250f1 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.7/values.yaml @@ -0,0 +1,221 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.19.7 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + APIListChunking: true + APIPriorityAndFairness: false + APIResponseCompression: true + AllAlpha: false + AllBeta: false + AllowInsecureBackendProxy: true + AnyVolumeDataSource: false + AppArmor: true + BalanceAttachedNodeVolumes: false + BoundServiceAccountTokenVolume: false + CPUManager: true + CRIContainerLogRotation: true + CSIInlineVolume: true + CSIMigration: true + CSIMigrationAWS: false + CSIMigrationAWSComplete: false + CSIMigrationAzureDisk: false + CSIMigrationAzureDiskComplete: false + CSIMigrationAzureFile: false + CSIMigrationAzureFileComplete: false + CSIMigrationGCE: false + CSIMigrationGCEComplete: false + CSIMigrationOpenStack: false + CSIMigrationOpenStackComplete: false + ConfigurableFSGroupPolicy: false + CustomCPUCFSQuotaPeriod: false + DefaultIngressClass: true + DevicePlugins: true + DryRun: true + DynamicKubeletConfig: true + EndpointSlice: true + EndpointSliceProxying: false + EphemeralContainers: false + EvenPodsSpread: true + ExpandCSIVolumes: true + ExpandInUsePersistentVolumes: true + ExpandPersistentVolumes: true + ExperimentalHostUserNamespaceDefaulting: false + HPAScaleToZero: false + HugePageStorageMediumSize: false + HyperVContainer: false + IPv6DualStack: false + ImmutableEphemeralVolumes: false + KubeletPodResources: true + LegacyNodeRoleBehavior: true + LocalStorageCapacityIsolation: true + LocalStorageCapacityIsolationFSQuotaMonitoring: false + NodeDisruptionExclusion: false + NonPreemptingPriority: false + PodDisruptionBudget: true + PodOverhead: true + ProcMountType: false + QOSReserved: false + RemainingItemCount: true + RemoveSelfLink: false + RotateKubeletClientCertificate: true + RotateKubeletServerCertificate: true + RunAsGroup: true + RuntimeClass: true + SCTPSupport: false + SelectorIndex: false + ServerSideApply: true + ServiceAccountIssuerDiscovery: false + ServiceAppProtocol: false + ServiceNodeExclusion: false + ServiceTopology: false + StartupProbe: true + StorageVersionHash: true + SupportNodePidsLimit: true + SupportPodPidsLimit: true + Sysctls: true + TTLAfterFinished: false + TokenRequest: true + TokenRequestProjection: true + TopologyManager: true + ValidateProxyRedirects: true + VolumeSnapshotDataSource: true + WinDSR: false + WinOverlay: false + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-kube-proxy/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/Chart.yaml similarity index 55% rename from charts/rke2-kube-proxy/Chart.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/Chart.yaml index 367c94e..b970eaa 100644 --- a/charts/rke2-kube-proxy/Chart.yaml +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/Chart.yaml @@ -1,12 +1,12 @@ apiVersion: v1 -name: rke2-kube-proxy -description: Install Kube Proxy. -version: v1.19.8 appVersion: v1.19.8 +description: Install Kube Proxy. keywords: - - kube-proxy -sources: - - https://github.com/rancher/rke2-charts +- kube-proxy maintainers: - - name: Rancher Labs - email: charts@rancher.com +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.19.8 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/_helpers.tpl new file mode 100644 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/config.yaml new file mode 100644 index 0000000..536a12a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddress | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/daemonset.yaml new file mode 100644 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/values.yaml similarity index 100% rename from charts/rke2-kube-proxy/values.yaml rename to charts/rke2-kube-proxy/rke2-kube-proxy/v1.19.8/values.yaml diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/Chart.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/Chart.yaml new file mode 100644 index 0000000..89613c9 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: v1.20.2 +description: Install Kube Proxy. +keywords: +- kube-proxy +maintainers: +- email: charts@rancher.com + name: Rancher Labs +name: rke2-kube-proxy +sources: +- https://github.com/rancher/rke2-charts +version: v1.20.2 diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/NOTES.txt b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/NOTES.txt new file mode 100644 index 0000000..2da0e24 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/NOTES.txt @@ -0,0 +1,2 @@ +Kube-proxy has been installed. + diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/_helpers.tpl b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/_helpers.tpl new file mode 100644 index 0000000..cb64d1f --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} +{{- define "rke2_data_dir" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s" .Values.global.rke2DataDir -}} +{{- else -}} +{{- "/var/lib/rancher/rke2" -}} +{{- end -}} +{{- end -}} +{{- define "kubeproxy_kubeconfig" -}} +{{- if .Values.global.rke2DataDir -}} +{{- printf "%s/agent/kubeproxy.kubeconfig" .Values.global.rke2DataDir -}} +{{- else -}} +{{- printf "%s" .Values.clientConnection.kubeconfig -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/config.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/config.yaml new file mode 100644 index 0000000..536a12a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/config.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: {{ .Values.bindAddress | quote }} + clientConnection: + acceptContentTypes: {{ .Values.clientConnection.acceptContentTypes | quote }} + burst: {{ .Values.clientConnection.burst }} + contentType: {{ .Values.clientConnection.contentType | quote }} + kubeconfig: {{ include "kubeproxy_kubeconfig" . | quote }} + qps: {{ .Values.clientConnection.qps }} + clusterCIDR: {{ .Values.clusterCIDR | quote }} + configSyncPeriod: {{ .Values.configSyncPeriod }} + conntrack: + maxPerCore: {{ .Values.conntrack.maxPerCore }} + min: {{ .Values.conntrack.min }} + tcpCloseWaitTimeout: {{ .Values.conntrack.tcpCloseWaitTimeout | quote }} + tcpEstablishedTimeout: {{ .Values.conntrack.tcpEstablishedTimeout | quote }} + detectLocalMode: {{ .Values.detectLocalMode | quote }} + enableProfiling: {{ .Values.enableProfiling | quote }} + healthzBindAddress: {{ .Values.healthzBindAddress | quote }} + hostnameOverride: {{ .Values.hostnameOverride | quote }} + iptables: + masqueradeAll: {{ .Values.iptables.masqueradeAll | quote }} + masqueradeBit: {{ .Values.iptables.masqueradeBit }} + {{ if .Values.iptables.minSyncPeriod }} + minSyncPeriod: {{ .Values.iptables.minSyncPeriod }} + {{ end }} + syncPeriod: {{ .Values.iptables.syncPeriod }} + ipvs: + excludeCIDRs: {{ .Values.ipvs.excludeCIDRs | quote }} + {{ if .Values.ipvs.minSyncPeriod }} + minSyncPeriod: {{ .Values.ipvs.minSyncPeriod }} + {{ end }} + scheduler: {{ .Values.ipvs.scheduler | quote }} + strictARP: {{ .Values.ipvs.strictARP | quote }} + syncPeriod: {{ .Values.ipvs.syncPeriod }} + {{ if .Values.ipvs.tcpFinTimeout }} + tcpFinTimeout: {{ .Values.ipvs.tcpFinTimeout }} + {{ end }} + {{ if .Values.ipvs.tcpTimeout }} + tcpTimeout: {{ .Values.ipvs.tcpTimeout }} + {{ end }} + {{ if .Values.ipvs.udpTimeout }} + udpTimeout: {{ .Values.ipvs.udpTimeout }} + {{ end }} + kind: KubeProxyConfiguration + metricsBindAddress: {{ .Values.metricsBindAddress | quote }} + mode: {{ .Values.proxy.mode | quote }} + nodePortAddresses: null + oomScoreAdj: {{ .Values.oomScoreAdj }} + portRange: {{ .Values.proxy.portRange | quote }} + showHiddenMetricsForVersion: {{ .Values.showHiddenMetricsForVersion | quote }} + udpIdleTimeout: {{ .Values.udpTimeout | quote }} + featureGates: + {{- range $key, $value := .Values.featureGates }} + {{ $key }}: {{ $value }} + {{- end }} + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/daemonset.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/daemonset.yaml new file mode 100644 index 0000000..1267df8 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/daemonset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-proxy + template: + metadata: + labels: + k8s-app: kube-proxy + spec: + containers: + - command: + - /usr/local/bin/kube-proxy + - --config=/var/lib/kube-proxy/config.conf + - --hostname-override=$(NODE_NAME) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + name: kube-proxy + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: {{ template "rke2_data_dir" . }}/agent + name: rke2config + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-proxy + serviceAccountName: kube-proxy + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - hostPath: + path: {{ template "rke2_data_dir" . }}/agent + type: "" + name: rke2config + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - hostPath: + path: /lib/modules + type: "" + name: lib-modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/rbac.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/rbac.yaml new file mode 100644 index 0000000..d98f84c --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/rbac.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rke2:node-proxier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/serviceaccount.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/serviceaccount.yaml new file mode 100644 index 0000000..59408a2 --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-proxy + namespace: kube-system diff --git a/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/values.yaml b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/values.yaml new file mode 100644 index 0000000..892511a --- /dev/null +++ b/charts/rke2-kube-proxy/rke2-kube-proxy/v1.20.2/values.yaml @@ -0,0 +1,142 @@ +--- + +# image for kubeproxy +image: + repository: rancher/hardened-kube-proxy + tag: v1.20.2 + +# The IP address for the proxy server to serve on +# (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) +bindAddress: 0.0.0.0 + +# If true cleanup iptables and ipvs rules and exit. +cleanup: + +# The CIDR range of pods in the cluster. +# When configured, traffic sent to a Service cluster IP from outside this range +# will be masqueraded and traffic sent from pods to an external +# LoadBalancer IP will be directed to the respective cluster IP instead +clusterCIDR: 10.42.0.0/16 + +# The path to the configuration file. +config: + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +configSyncPeriod: 15m0s + +conntrack: + # Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min). + maxPerCore: 32768 + # Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is). + min: 131072 + # NAT timeout for TCP connections in the CLOSE_WAIT state + tcpTimeoutCloseWait: 1h0m0s + # Idle timeout for established TCP connections (0 to leave as-is) + tcpTimeoutEstablished: 24h0m0s + +# Mode to use to detect local traffic +detectLocalMode: + +# A set of key=value pairs that describe feature gates for alpha/experimental features: +featureGates: + +# The IP address with port for the health check server to serve on +# (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. +healthzBindAddress: 0.0.0.0:10256 + +# help for kube-proxy +help: + +# If non-empty, will use this string as identification instead of the actual hostname. +hostnameOverride: + +iptables: + # If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed) + masqueradeAll: + # If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31]. + masqueradeBit: 14 + # The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + +ipvs: + # A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. + excludeCidrs: + # The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m'). + minSyncPeriod: + # The ipvs scheduler type when proxy mode is ipvs + scheduler: + # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2 + strictArp: + # The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + syncPeriod: 30s + # The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpTimeout: + # The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + tcpfinTimeout: + # The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + udpTimeout: + + +clientConnection: + # Burst to use while talking with kubernetes apiserver + apiBurst: 10 + # Content type of requests sent to apiserver. + apiContentType: + # QPS to use while talking with kubernetes apiserver + qps: 5 + # Path to kubeconfig file with authorization information (the master location is set by the master flag). + kubeconfig: /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +# Maximum number of seconds between log flushes +logFlushFrequency: 5s + + + +# The address of the Kubernetes API server (overrides any value in kubeconfig) +master: + +# The IP address with port for the metrics server to serve on +# (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. +metricsBindAddress: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. +nodeportAddresses: + +# The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +oomScoreAdj: -999 + +# If true enables profiling via web interface on /debug/pprof handler. +profiling: + +proxy: + # Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs'. + # If blank, use the best-available proxy (currently iptables). + # If the iptables proxy is selected, regardless of how, but the system's + # kernel or iptables versions are insufficient, this always falls back to the userspace proxy. + mode: +# Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) +# that may be consumed in order to proxy service traffic. +# If (unspecified, 0, or 0-0) then ports will be randomly chosen. + portRange: + +# The previous version for which you want to show hidden metrics. +# Only the previous minor version is meaningful, other values will not be allowed. +# The format is ., e.g.: '1.16'. The purpose of this format is make +# sure you have the opportunity to notice if the next release hides additional metrics, +# rather than being surprised when they are permanently removed in the release after that. +showHiddenMetricsForVersion: + +# How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxy-mode=userspace +udpTimeout: 250ms + +# Print version information and quit +version: + +# If set, write the default configuration values to this file and exit. +writeConfigTo: +global: + systemDefaultRegistry: "" diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/.helmignore b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/.helmignore new file mode 100644 index 0000000..37ea1d7 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +OWNERS +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rke2-metrics-server/Chart.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/Chart.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/Chart.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/Chart.yaml diff --git a/charts/rke2-metrics-server/README.md b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/README.md old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/README.md rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/README.md diff --git a/charts/rke2-metrics-server/ci/ci-values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/ci/ci-values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/ci/ci-values.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/ci/ci-values.yaml diff --git a/charts/rke2-metrics-server/templates/NOTES.txt b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/NOTES.txt old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/NOTES.txt rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/NOTES.txt diff --git a/charts/rke2-metrics-server/templates/_helpers.tpl b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/_helpers.tpl old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/_helpers.tpl rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/_helpers.tpl diff --git a/charts/rke2-metrics-server/templates/aggregated-metrics-reader-cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/aggregated-metrics-reader-cluster-role.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/aggregated-metrics-reader-cluster-role.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/aggregated-metrics-reader-cluster-role.yaml diff --git a/charts/rke2-metrics-server/templates/auth-delegator-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/auth-delegator-crb.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/auth-delegator-crb.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/auth-delegator-crb.yaml diff --git a/charts/rke2-metrics-server/templates/cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/cluster-role.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/cluster-role.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/cluster-role.yaml diff --git a/charts/rke2-metrics-server/templates/metric-server-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metric-server-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/metric-server-service.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metric-server-service.yaml diff --git a/charts/rke2-metrics-server/templates/metrics-api-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-api-service.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/metrics-api-service.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-api-service.yaml diff --git a/charts/rke2-metrics-server/templates/metrics-server-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-crb.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/metrics-server-crb.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-crb.yaml diff --git a/charts/rke2-metrics-server/templates/metrics-server-deployment.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-deployment.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/metrics-server-deployment.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-deployment.yaml diff --git a/charts/rke2-metrics-server/templates/metrics-server-serviceaccount.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-serviceaccount.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/metrics-server-serviceaccount.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/metrics-server-serviceaccount.yaml diff --git a/charts/rke2-metrics-server/templates/pdb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/pdb.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/pdb.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/pdb.yaml diff --git a/charts/rke2-metrics-server/templates/psp.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/psp.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/psp.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/psp.yaml diff --git a/charts/rke2-metrics-server/templates/role-binding.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/role-binding.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/role-binding.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/role-binding.yaml diff --git a/charts/rke2-metrics-server/templates/tests/test-version.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/tests/test-version.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/templates/tests/test-version.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/templates/tests/test-version.yaml diff --git a/charts/rke2-metrics-server/values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/values.yaml old mode 100755 new mode 100644 similarity index 100% rename from charts/rke2-metrics-server/values.yaml rename to charts/rke2-metrics-server/rke2-metrics-server/2.11.100-build2021022300/values.yaml diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/.helmignore b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/.helmignore new file mode 100644 index 0000000..37ea1d7 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +OWNERS +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/Chart.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/Chart.yaml new file mode 100644 index 0000000..0abfceb --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +appVersion: 0.3.6 +description: Metrics Server is a cluster-wide aggregator of resource usage data. +home: https://github.com/kubernetes-incubator/metrics-server +keywords: +- metrics-server +maintainers: +- email: o.with@sportradar.com + name: olemarkus +- email: k.aasan@sportradar.com + name: kennethaasan +name: rke2-metrics-server +sources: +- https://github.com/kubernetes-incubator/metrics-server +version: 2.11.100 diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/README.md b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/README.md new file mode 100644 index 0000000..678f084 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/README.md @@ -0,0 +1,39 @@ +# metrics-server + +[Metrics Server](https://github.com/kubernetes-incubator/metrics-server) is a cluster-wide aggregator of resource usage data. Resource metrics are used by components like `kubectl top` and the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale) to scale workloads. To autoscale based upon a custom metric, see the [Prometheus Adapter chart](https://github.com/helm/charts/blob/master/stable/prometheus-adapter). + +## Configuration + +Parameter | Description | Default +--- | --- | --- +`rbac.create` | Enable Role-based authentication | `true` +`rbac.pspEnabled` | Enable pod security policy support | `false` +`serviceAccount.create` | If `true`, create a new service account | `true` +`serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | `` +`apiService.create` | Create the v1beta1.metrics.k8s.io API service | `true` +`hostNetwork.enabled` | Enable hostNetwork mode | `false` +`image.repository` | Image repository | `k8s.gcr.io/metrics-server-amd64` +`image.tag` | Image tag | `v0.3.2` +`image.pullPolicy` | Image pull policy | `IfNotPresent` +`imagePullSecrets` | Image pull secrets | `[]` +`args` | Command line arguments | `[]` +`resources` | CPU/Memory resource requests/limits. | `{}` +`tolerations` | List of node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`nodeSelector` | Node labels for pod assignment | `{}` +`affinity` | Node affinity | `{}` +`replicas` | Number of replicas | `1` +`extraVolumeMounts` | Ability to provide volume mounts to the pod | `[]` +`extraVolumes` | Ability to provide volumes to the pod | `[]` +`livenessProbe` | Container liveness probe | See values.yaml +`podLabels` | Labels to be added to pods | `{}` +`podAnnotations` | Annotations to be added to pods | `{}` +`priorityClassName` | Pod priority class | `""` +`readinessProbe` | Container readiness probe | See values.yaml +`service.annotations` | Annotations to add to the service | `{}` +`service.labels` | Labels to be added to the metrics-server service | `{}` +`service.port` | Service port to expose | `443` +`service.type` | Type of service to create | `ClusterIP` +`podDisruptionBudget.enabled` | Create a PodDisruptionBudget | `false` +`podDisruptionBudget.minAvailable` | Minimum available instances; ignored if there is no PodDisruptionBudget | +`podDisruptionBudget.maxUnavailable` | Maximum unavailable instances; ignored if there is no PodDisruptionBudget | +`extraContainers` | Add additional containers | `[]` diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/ci/ci-values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/ci/ci-values.yaml new file mode 100644 index 0000000..a9d81b4 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/ci/ci-values.yaml @@ -0,0 +1,5 @@ +# CI is running on GKE, which already ships metrics-server. This cause +# conflicts on the apiService resource. + +apiService: + create: false diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/NOTES.txt b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/NOTES.txt new file mode 100644 index 0000000..1034c12 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/NOTES.txt @@ -0,0 +1,11 @@ +The metric server has been deployed. +{{ if .Values.apiService.create }} +In a few minutes you should be able to list metrics using the following +command: + + kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes" +{{ else }} +NOTE: You have disabled the API service creation for this release. The metrics +API will not work with this release unless you configure the metrics API +service outside of this Helm chart. +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/_helpers.tpl b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/_helpers.tpl new file mode 100644 index 0000000..b59ca03 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/_helpers.tpl @@ -0,0 +1,59 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "metrics-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "metrics-server.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "metrics-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a service name that defaults to app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "metrics-server.service.fullname" -}} +{{- .Values.service.nameOverride | default .Chart.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "metrics-server.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "metrics-server.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/aggregated-metrics-reader-cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/aggregated-metrics-reader-cluster-role.yaml new file mode 100644 index 0000000..e91a3d8 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/aggregated-metrics-reader-cluster-role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:{{ template "metrics-server.name" . }}-aggregated-reader + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["metrics.k8s.io"] + resources: ["pods","nodes"] + verbs: ["get", "list", "watch"] +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/auth-delegator-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/auth-delegator-crb.yaml new file mode 100644 index 0000000..e82fca0 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/auth-delegator-crb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "metrics-server.fullname" . }}:system:auth-delegator + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/cluster-role.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/cluster-role.yaml new file mode 100644 index 0000000..8763acd --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/cluster-role.yaml @@ -0,0 +1,34 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:{{ template "metrics-server.fullname" . }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + - namespaces + verbs: + - get + - list + - watch + {{- if .Values.rbac.pspEnabled }} + - apiGroups: + - extensions + - policy + resources: + - podsecuritypolicies + resourceNames: + - privileged-{{ template "metrics-server.fullname" . }} + verbs: + - use + {{- end -}} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metric-server-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metric-server-service.yaml new file mode 100644 index 0000000..0d64cd1 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metric-server-service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- with .Values.service.labels -}} + {{ toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .Values.service.annotations | trim | nindent 4 }} +spec: + ports: + - port: {{ .Values.service.port }} + protocol: TCP + targetPort: https + selector: + app: {{ template "metrics-server.name" . }} + release: {{ .Release.Name }} + type: {{ .Values.service.type }} + diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-api-service.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-api-service.yaml new file mode 100644 index 0000000..552ffea --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-api-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.apiService.create -}} +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.metrics.k8s.io + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + service: + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + group: metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-crb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-crb.yaml new file mode 100644 index 0000000..eb04c6f --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-crb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:{{ template "metrics-server.fullname" . }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:{{ template "metrics-server.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-deployment.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-deployment.yaml new file mode 100644 index 0000000..2e54f27 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-deployment.yaml @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "metrics-server.name" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.replicas }} + template: + metadata: + labels: + app: {{ template "metrics-server.name" . }} + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + {{- with .Values.podAnnotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "metrics-server.serviceAccountName" . }} +{{- if .Values.hostNetwork.enabled }} + hostNetwork: true +{{- end }} + containers: + {{- if .Values.extraContainers }} + {{- ( tpl (toYaml .Values.extraContainers) . ) | nindent 8 }} + {{- end }} + - name: metrics-server + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /metrics-server + - --cert-dir=/tmp + - --logtostderr + - --secure-port=8443 + {{- range .Values.args }} + - {{ . }} + {{- end }} + ports: + - containerPort: 8443 + name: https + livenessProbe: + {{- toYaml .Values.livenessProbe | trim | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | trim | nindent 12 }} + resources: + {{- toYaml .Values.resources | trim | nindent 12 }} + securityContext: + {{- toYaml .Values.securityContext | trim | nindent 12 }} + volumeMounts: + - name: tmp + mountPath: /tmp + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 10 }} + {{- end }} + nodeSelector: + {{- toYaml .Values.nodeSelector | trim | nindent 8 }} + affinity: + {{- toYaml .Values.affinity | trim | nindent 8 }} + tolerations: + {{- toYaml .Values.tolerations | trim | nindent 8 }} + volumes: + - name: tmp + emptyDir: {} + {{- with .Values.extraVolumes }} + {{- toYaml . | nindent 6}} + {{- end }} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-serviceaccount.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-serviceaccount.yaml new file mode 100644 index 0000000..4d748ed --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/metrics-server-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/pdb.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/pdb.yaml new file mode 100644 index 0000000..3831097 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.podDisruptionBudget.enabled -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "metrics-server.name" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/psp.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/psp.yaml new file mode 100644 index 0000000..b5cb7da --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/psp.yaml @@ -0,0 +1,26 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: privileged-{{ template "metrics-server.fullname" . }} +spec: + allowedCapabilities: + - '*' + fsGroup: + rule: RunAsAny + privileged: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - '*' + hostPID: true + hostIPC: true + hostNetwork: true + hostPorts: + - min: 1 + max: 65536 +{{- end }} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/role-binding.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/role-binding.yaml new file mode 100644 index 0000000..3169f24 --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/role-binding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "metrics-server.fullname" . }}-auth-reader + namespace: kube-system + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: {{ template "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/tests/test-version.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/tests/test-version.yaml new file mode 100644 index 0000000..3648e6d --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/templates/tests/test-version.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "metrics-server.fullname" . }}-test + labels: + app: {{ template "metrics-server.name" . }} + chart: {{ template "metrics-server.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['/bin/sh'] + args: + - -c + - 'wget -qO- https://{{ include "metrics-server.fullname" . }}:{{ .Values.service.port }}/version | grep -F {{ .Values.image.tag }}' + restartPolicy: Never + diff --git a/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/values.yaml b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/values.yaml new file mode 100644 index 0000000..30ca72e --- /dev/null +++ b/charts/rke2-metrics-server/rke2-metrics-server/2.11.100/values.yaml @@ -0,0 +1,113 @@ +rbac: + # Specifies whether RBAC resources should be created + create: true + pspEnabled: false + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +apiService: + # Specifies if the v1beta1.metrics.k8s.io API service should be created. + # + # You typically want this enabled! If you disable API service creation you have to + # manage it outside of this chart for e.g horizontal pod autoscaling to + # work with this release. + create: true + +hostNetwork: + # Specifies if metrics-server should be started in hostNetwork mode. + # + # You would require this enabled if you use alternate overlay networking for pods and + # API server unable to communicate with metrics-server. As an example, this is required + # if you use Weave network on EKS + enabled: false + +image: + repository: rancher/hardened-k8s-metrics-server + tag: v0.3.6 + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - registrySecretName + +args: +# enable this if you have self-signed certificates, see: https://github.com/kubernetes-incubator/metrics-server +# - --kubelet-insecure-tls + - --kubelet-preferred-address-types=InternalIP + +resources: {} + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +replicas: 1 + +extraContainers: [] + +podLabels: {} + +podAnnotations: {} +# The following annotations guarantee scheduling for critical add-on pods. +# See more at: https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ +# scheduler.alpha.kubernetes.io/critical-pod: '' + +## Set a pod priorityClassName +priorityClassName: system-node-critical + +extraVolumeMounts: [] +# - name: secrets +# mountPath: /etc/kubernetes/secrets +# readOnly: true + +extraVolumes: [] +# - name: secrets +# secret: +# secretName: kube-apiserver + +livenessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + +readinessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["all"] + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + +service: + annotations: {} + labels: {} + # Add these labels to have metrics-server show up in `kubectl cluster-info` + # kubernetes.io/cluster-service: "true" + # kubernetes.io/name: "Metrics-server" + port: 443 + type: ClusterIP + +podDisruptionBudget: + # https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + enabled: false + minAvailable: + maxUnavailable: + +global: + systemDefaultRegistry: "" diff --git a/configuration.yaml b/configuration.yaml new file mode 100644 index 0000000..56d6667 --- /dev/null +++ b/configuration.yaml @@ -0,0 +1,8 @@ +template: live + +sync: +- url: https://github.com/rancher/rke2-charts.git + branch: main-source + dropReleaseCandidates: true +helmRepo: + cname: rke2-charts.rancher.io \ No newline at end of file diff --git a/index.yaml b/index.yaml index 215b65c..dee5a53 100644 --- a/index.yaml +++ b/index.yaml @@ -3,7 +3,7 @@ entries: rke2-canal: - apiVersion: v1 appVersion: v3.13.3 - created: "2021-02-24T21:41:48.737080031Z" + created: "2021-02-25T17:59:12.931728-08:00" description: Install Canal Network Plugin. digest: 4b6ac74aec73a70d12186701660c1f221fdbcb582571029a6c8fbc2738065742 home: https://www.projectcalico.org/ @@ -20,7 +20,7 @@ entries: version: v3.13.300-build20210223 - apiVersion: v1 appVersion: v3.13.3 - created: "2021-02-19T16:11:27.472930693Z" + created: "2021-02-25T17:59:12.931223-08:00" description: Install Canal Network Plugin. digest: 2396b0aca28a6d4a373a251b02e4efa12bbfedf29e37e45904b860176d0c80f8 home: https://www.projectcalico.org/ @@ -38,8 +38,9 @@ entries: rke2-coredns: - apiVersion: v1 appVersion: 1.7.1 - created: "2021-01-08T18:12:00.296423364Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services + created: "2021-02-25T17:59:12.935081-08:00" + description: CoreDNS is a DNS server that chains plugins and provides Kubernetes + DNS Services digest: 335099356a98589e09f1bb940913b0ed6abb8d2c4db91720f87d1cf7697a5cf7 home: https://coredns.io icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png @@ -55,8 +56,9 @@ entries: version: 1.13.800 - apiVersion: v1 appVersion: 1.6.9 - created: "2021-01-22T21:35:45.403680219Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services + created: "2021-02-25T17:59:12.934088-08:00" + description: CoreDNS is a DNS server that chains plugins and provides Kubernetes + DNS Services digest: be60a62ec184cf6ca7b0ed917e6962e8a2578fa1eeef6a835e82d2b7709933d5 home: https://coredns.io icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png @@ -81,8 +83,9 @@ entries: version: 1.10.101 - apiVersion: v1 appVersion: 1.6.9 - created: "2021-02-24T21:41:48.738290233Z" - description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services + created: "2021-02-25T17:59:12.933187-08:00" + description: CoreDNS is a DNS server that chains plugins and provides Kubernetes + DNS Services digest: 869cb592cac545f579b6de6b35de82de4904566fd91826bc16546fddc48fe1c4 home: https://coredns.io icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png @@ -108,8 +111,9 @@ entries: rke2-ingress-nginx: - apiVersion: v1 appVersion: 0.35.0 - created: "2021-02-24T21:42:02.60663315Z" - description: Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + created: "2021-02-25T17:59:12.938912-08:00" + description: Ingress controller for Kubernetes using NGINX as a reverse proxy + and load balancer digest: 2480ed0be9032f8f839913e12f0528128a15483ced57c851baed605156532782 home: https://github.com/kubernetes/ingress-nginx icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png @@ -127,8 +131,9 @@ entries: version: 3.3.000 - apiVersion: v1 appVersion: 0.30.0 - created: "2021-02-19T16:11:27.47593126Z" - description: An nginx Ingress controller that uses ConfigMap to store the nginx configuration. + created: "2021-02-25T17:59:12.93704-08:00" + description: An nginx Ingress controller that uses ConfigMap to store the nginx + configuration. digest: 768ce303918a97a2d0f9a333f4eb0f2ebb3b7f54b849e83c6bdd52f8b513af9b home: https://github.com/kubernetes/ingress-nginx icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c5/Nginx_logo.svg/500px-Nginx_logo.svg.png @@ -149,7 +154,7 @@ entries: rke2-kube-proxy: - apiVersion: v1 appVersion: v1.20.2 - created: "2021-01-25T23:01:11.589999085Z" + created: "2021-02-25T17:59:12.952328-08:00" description: Install Kube Proxy. digest: 68f08c49c302bfe23e9c6f8074a21a6a3e0c90fdb16f5e6fb32a5a3ee3f7c717 keywords: @@ -165,7 +170,7 @@ entries: version: v1.20.2 - apiVersion: v1 appVersion: v1.19.8 - created: "2021-02-24T21:41:48.739048333Z" + created: "2021-02-25T17:59:12.951821-08:00" description: Install Kube Proxy. digest: f2bace51d33062e3ac713ebbedd48dd4df56c821dfa52da9fdf71891d601bcde keywords: @@ -181,7 +186,7 @@ entries: version: v1.19.8 - apiVersion: v1 appVersion: v1.19.7 - created: "2021-01-22T21:35:45.405178128Z" + created: "2021-02-25T17:59:12.951293-08:00" description: Install Kube Proxy. digest: def9baa9bc5c12267d3575a03a2e5f2eccc907a6058202ed09a6cd39967790ca keywords: @@ -197,7 +202,7 @@ entries: version: v1.19.7 - apiVersion: v1 appVersion: v1.19.5 - created: "2020-12-17T19:20:49.383692056Z" + created: "2021-02-25T17:59:12.950165-08:00" description: Install Kube Proxy. digest: f74f820857b79601f3b8e498e701297d71f3b37bbf94dc3ae96dfcca50fb80df keywords: @@ -213,7 +218,7 @@ entries: version: v1.19.5 - apiVersion: v1 appVersion: v1.18.16 - created: "2021-02-19T17:03:49.957724823Z" + created: "2021-02-25T17:59:12.94611-08:00" description: Install Kube Proxy. digest: a57acde11e30a9a15330ffec38686b605325b145f21935e79843b28652d46a21 keywords: @@ -229,7 +234,7 @@ entries: version: v1.18.16 - apiVersion: v1 appVersion: v1.18.15 - created: "2021-01-14T18:05:30.822746229Z" + created: "2021-02-25T17:59:12.94555-08:00" description: Install Kube Proxy. digest: 3a6429d05a3d22e3959ceac27db15f922f1033553e8e6b5da2eb7cd18ed9309f keywords: @@ -245,7 +250,7 @@ entries: version: v1.18.15 - apiVersion: v1 appVersion: v1.18.13 - created: "2020-12-10T22:07:42.184767459Z" + created: "2021-02-25T17:59:12.944801-08:00" description: Install Kube Proxy. digest: 15d192f5016b8573d2c6f17ab55fa6f14fa1352fcdef2c391a6a477b199867ec keywords: @@ -261,7 +266,7 @@ entries: version: v1.18.13 - apiVersion: v1 appVersion: v1.18.12 - created: "2020-12-07T21:17:34.244857883Z" + created: "2021-02-25T17:59:12.944083-08:00" description: Install Kube Proxy. digest: e1da2b245da23aaa526cb94c04ed48cd3e730b848c0d33e420dcfd5b15374f5e keywords: @@ -277,7 +282,7 @@ entries: version: v1.18.12 - apiVersion: v1 appVersion: v1.18.10 - created: "2020-10-15T22:21:23.252729387Z" + created: "2021-02-25T17:59:12.939693-08:00" description: Install Kube Proxy. digest: 1ae84231365f19d82a4ea7c6b069ce90308147ba77bef072290ef7464ff1694e keywords: @@ -293,7 +298,7 @@ entries: version: v1.18.10 - apiVersion: v1 appVersion: v1.18.9 - created: "2020-10-14T23:04:28.48143194Z" + created: "2021-02-25T17:59:12.948298-08:00" description: Install Kube Proxy. digest: e1e5b6f98c535fa5d90469bd3f731d331bdaa3f9154157d7625b367a7023f399 keywords: @@ -309,7 +314,7 @@ entries: version: v1.18.9 - apiVersion: v1 appVersion: v1.18.8 - created: "2020-09-29T00:14:59.633896455Z" + created: "2021-02-25T17:59:12.947562-08:00" description: Install Kube Proxy. digest: 7765237ddc39c416178242e7a6798d679a50f466ac18d3a412207606cd0d66ed keywords: @@ -325,7 +330,7 @@ entries: version: v1.18.8 - apiVersion: v1 appVersion: v1.18.4 - created: "2020-09-29T00:14:59.632610835Z" + created: "2021-02-25T17:59:12.946814-08:00" description: Install Kube Proxy. digest: b859363c5ecab8c46b53efa34d866b9c27840737ad1afec0eb9729b8968304fb keywords: @@ -342,7 +347,7 @@ entries: rke2-metrics-server: - apiVersion: v1 appVersion: 0.3.6 - created: "2021-02-19T16:11:27.477610954Z" + created: "2021-02-25T17:59:12.953693-08:00" description: Metrics Server is a cluster-wide aggregator of resource usage data. digest: 295435f65cc6c0c5ed8fd6b028cac5614b761789c5e09c0483170c3fd46f6e59 home: https://github.com/kubernetes-incubator/metrics-server @@ -361,7 +366,7 @@ entries: version: 2.11.100 - apiVersion: v1 appVersion: 0.3.6 - created: "2021-02-24T21:41:48.739850734Z" + created: "2021-02-25T17:59:12.952919-08:00" description: Metrics Server is a cluster-wide aggregator of resource usage data. digest: a7cbec2f4764c99db298fb4e1f5297246253a3228daf2747281c953059160fc9 home: https://github.com/kubernetes-incubator/metrics-server @@ -378,4 +383,4 @@ entries: urls: - assets/rke2-metrics-server/rke2-metrics-server-2.11.100-build2021022300.tgz version: 2.11.100-build2021022300 -generated: "2021-02-24T21:42:02.60300284Z" +generated: "2021-02-25T17:59:12.927381-08:00" diff --git a/scripts/pull-scripts b/scripts/pull-scripts new file mode 100755 index 0000000..0b5835f --- /dev/null +++ b/scripts/pull-scripts @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +cd $(dirname $0) + +source ./version + +echo "Pulling in charts-build-scripts version ${CHARTS_BUILD_SCRIPT_VERSION}" + +rm -rf ../bin +cd .. + +rm -rf charts-build-scripts +git clone --depth 1 --branch $CHARTS_BUILD_SCRIPT_VERSION https://github.com/rancher/charts-build-scripts.git 2>/dev/null + +cd charts-build-scripts +./scripts/build +mv bin ../bin +cd .. + +rm -rf charts-build-scripts +chmod +x ./bin/charts-build-scripts +./bin/charts-build-scripts --version \ No newline at end of file diff --git a/scripts/regenerate-assets b/scripts/regenerate-assets new file mode 100755 index 0000000..cfcb16d --- /dev/null +++ b/scripts/regenerate-assets @@ -0,0 +1,71 @@ +#!/bin/bash +set -e + +cd $(dirname $0) + +if [[ -z ${BRANCH} ]]; then + branch=$(git rev-parse --abbrev-ref HEAD) +else + echo "Using branch ${BRANCH}" + branch=${BRANCH} +fi + +if [[ -z ${REPOSITORY} ]]; then + echo "Need to provide REPOSITORY as environment variable" + exit 1 +fi + +cd .. + +# Setup +rm -rf ./repository +mkdir -p ./repository +cd repository + +# Pull in branch +echo "Pulling in ${REPOSITORY}@${branch}" +git clone --depth 1 --branch ${branch} ${REPOSITORY} . > /dev/null 2>&1 + +if ! (test -d assets && test -d charts); then + echo "There are no charts or assets in this repository" + cd .. + rm -rf ./repository + exit 1 +fi + +# Copy assets and charts into the new format +for package_assets in assets/*; do + cp -R ${package_assets} ../assets + package_name=$(basename -- ${package_assets}) + for asset in ${package_assets}/*; do + if [[ ${asset} =~ .*\.tgz ]]; then + # Parse structure + asset_name=$(basename -- ${asset%.*}) + chart_name=$(echo ${asset_name} | rev | cut -d- -f2- | rev) + chart_name=$(echo ${chart_name} | sed -r 's/-[[:digit:]\.]+$//') + chart_version=${asset_name#${chart_name}-} + + # Fix chart version for rc version + # e.g. 0.0.0-rc100 -> 0.0.000-rc1 to keep the drop release candidate version logic simple + if [[ ${chart_version} =~ [0-9]{2}$ ]] && [[ ${chart_version} =~ -rc ]]; then + actual_version=${chart_version%-*} + package_version=${chart_version: -2} + chart_version_without_package_version=${chart_version%${package_version}} + rc_version=${chart_version_without_package_version#${actual_version}} + chart_version=${actual_version}${package_version}${rc_version} + fi + + # Dump archives as charts + chart_path=../charts/${package_name}/${chart_name}/${chart_version} + echo "Unarchiving ${asset} to ${chart_path}" + mkdir -p ${chart_path} + tar xvzf ${asset} -C ${chart_path} --strip-components=1 > /dev/null 2>&1 + fi + done +done + +# Go back +cd .. +helm repo index --merge ./assets/index.yaml --url assets assets +mv ./assets/index.yaml ./index.yaml +rm -rf ./repository \ No newline at end of file diff --git a/scripts/version b/scripts/version new file mode 100755 index 0000000..3ca1e4d --- /dev/null +++ b/scripts/version @@ -0,0 +1,4 @@ +#!/bin/bash +set -e + +CHARTS_BUILD_SCRIPT_VERSION=v0.0.4 diff --git a/sha256sum/rke2-canal/rke2-canal.sum b/sha256sum/rke2-canal/rke2-canal.sum deleted file mode 100644 index 9545488..0000000 --- a/sha256sum/rke2-canal/rke2-canal.sum +++ /dev/null @@ -1,9 +0,0 @@ -df6f4924e4f542dba5947e64b9528260b681db8475167b88977464dccabb3651 packages/rke2-canal/charts/values.yaml -04a3caab7086b0fef6894bf8f9c29d7eecefad1cf3eaed4742e9305cfc9f6b8a packages/rke2-canal/charts/templates/serviceaccount.yaml -ac1baa8b6d7b02c713b80bc38a853d7a8c70f67847f08f1c5e81d4157ccf85e4 packages/rke2-canal/charts/templates/rbac.yaml -707744e262fccd7ff3e9e138be55aebedba32a29206deb0948d97b0ea9f0f08d packages/rke2-canal/charts/templates/crd.yaml -ebbce05e2f4f53227016183e3638bbd576897598b91639ce3942bc738e7ec15c packages/rke2-canal/charts/templates/config.yaml -bf4bfa1018dda5895521ac50a4a6102d6f3a1659e9c52a37b70b4b8001452ea1 packages/rke2-canal/charts/templates/NOTES.txt -90f1d8a3a944e4b0fde9f2842b2a3531a95102d5ce3e3a893652e200c26b947f packages/rke2-canal/charts/templates/_helpers.tpl -7878a574ad9f673266ce585d3688428796e3e182a7f672026258ef6b92393dd0 packages/rke2-canal/charts/templates/daemonset.yaml -f9520bfc03135b7293e8e7fc13933331fd2e71e7f23077a0bdd814d17d2f2dc3 packages/rke2-canal/charts/Chart.yaml diff --git a/sha256sum/rke2-coredns/rke2-coredns.sum b/sha256sum/rke2-coredns/rke2-coredns.sum deleted file mode 100644 index 95bb05f..0000000 --- a/sha256sum/rke2-coredns/rke2-coredns.sum +++ /dev/null @@ -1,2 +0,0 @@ -771f526366bc9f1758d13e7807a51512fc03ad6d4ed48c162218a776292c11da packages/rke2-coredns/package.yaml -52156705d3c82cd638e0a00aa3d836ba90cecf293dec11a48ce7fac08466692f packages/rke2-coredns/rke2-coredns.patch diff --git a/sha256sum/rke2-ingress-nginx/rke2-ingress-nginx.sum b/sha256sum/rke2-ingress-nginx/rke2-ingress-nginx.sum deleted file mode 100644 index f0354a5..0000000 --- a/sha256sum/rke2-ingress-nginx/rke2-ingress-nginx.sum +++ /dev/null @@ -1,2 +0,0 @@ -3ab8fe9454ce25221d16fc687c3942744c1ddb73e1dd0ada9ddf7bc732935e3f packages/rke2-ingress-nginx/rke2-ingress-nginx.patch -816cccb4bd7850999be977f709b2ed8fb16396e3e89baeefa3237069be6a2e85 packages/rke2-ingress-nginx/package.yaml diff --git a/sha256sum/rke2-kube-proxy/rke2-kube-proxy.sum b/sha256sum/rke2-kube-proxy/rke2-kube-proxy.sum deleted file mode 100644 index 0bcb7eb..0000000 --- a/sha256sum/rke2-kube-proxy/rke2-kube-proxy.sum +++ /dev/null @@ -1,8 +0,0 @@ -134f693f1882219122df41796027b464f724089d4ab07a0af589fc9cc98083be packages/rke2-kube-proxy/charts/values.yaml -d1de4d515c8b9d215a9410a4c7ce04b0593583f9edaa16ea3a5b939ed6d44560 packages/rke2-kube-proxy/charts/templates/serviceaccount.yaml -3ea61d8092a4c6841e1a28d9b5db86110a263b693fb2845679cd2fc1891b5763 packages/rke2-kube-proxy/charts/templates/rbac.yaml -b75c849a1d9d324710abf0972e3d64392b45a7fca8a09dbe056390699eecf659 packages/rke2-kube-proxy/charts/templates/config.yaml -41b28ddbe82a714e88b3e3f62bd1f96b794f02384a560c7d21b4a0d680400451 packages/rke2-kube-proxy/charts/templates/NOTES.txt -00e5a2a05f04e3a96f0d7b68516ca830bc35dae1ce8a61bbe78adc3b53f02c6d packages/rke2-kube-proxy/charts/templates/_helpers.tpl -6a1dcaed632df8e1c4478255d9fba792f4ee7a10aa5fe88861d0eed4357ca392 packages/rke2-kube-proxy/charts/templates/daemonset.yaml -eb338f6abc3affcbe1eccbbfca0aa89c8a2a9374bb5b368a529e40515fa1e36e packages/rke2-kube-proxy/charts/Chart.yaml diff --git a/sha256sum/rke2-metrics-server/rke2-metrics-server.sum b/sha256sum/rke2-metrics-server/rke2-metrics-server.sum deleted file mode 100644 index 2841321..0000000 --- a/sha256sum/rke2-metrics-server/rke2-metrics-server.sum +++ /dev/null @@ -1,2 +0,0 @@ -d70eeb3eed4cbf768778e64cb74fe0341b0ad05ced3685b82d4e62af25fe150a packages/rke2-metrics-server/rke2-metrics-server.patch -87c58bd17bd974b3956e43df568d87fc6ea7b690c68e88d9fa8d238702030062 packages/rke2-metrics-server/package.yaml