(dev-v2.6-archive) Rebase to 843d2f3d

(partially cherry picked from commit 52b63ffeb0)
pull/1680/head
Jiaqi Luo 2021-10-13 16:36:18 -07:00 committed by Arvind Iyengar
parent cde00ac47a
commit b91330c7c3
No known key found for this signature in database
GPG Key ID: A8DD9BFD6C811498
5 changed files with 155 additions and 11 deletions

View File

@ -1,6 +1,6 @@
--- charts-original/Chart.yaml
+++ charts/Chart.yaml
@@ -1,5 +1,11 @@
@@ -1,9 +1,15 @@
+annotations:
+ catalog.rancher.io/certified: rancher
+ catalog.rancher.io/namespace: cattle-monitoring-system
@ -9,7 +9,23 @@
+ catalog.cattle.io/os: linux
apiVersion: v1
-name: prometheus-adapter
-version: 2.14.0
-appVersion: v0.8.4
+name: rancher-prometheus-adapter
version: 2.14.0
appVersion: v0.8.4
+version: 2.17.0
+appVersion: v0.9.0
description: A Helm chart for k8s prometheus adapter
-home: https://github.com/DirectXMan12/k8s-prometheus-adapter
+home: https://github.com/kubernetes-sigs/prometheus-adapter
keywords:
- hpa
- metrics
@@ -11,7 +17,7 @@
- adapter
sources:
- https://github.com/kubernetes/charts
- - https://github.com/DirectXMan12/k8s-prometheus-adapter
+ - https://github.com/kubernetes-sigs/prometheus-adapter
maintainers:
- name: mattiasgees
email: mattias.gees@jetstack.io

View File

@ -0,0 +1,37 @@
--- charts-original/README.md
+++ charts/README.md
@@ -1,6 +1,6 @@
# Prometheus Adapter
-Installs the [Prometheus Adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) for the Custom Metrics API. Custom metrics are used in Kubernetes by [Horizontal Pod Autoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to scale workloads based upon your own metric pulled from an external metrics provider like Prometheus. This chart complements the [metrics-server](https://github.com/helm/charts/tree/master/stable/metrics-server) chart that provides resource only metrics.
+Installs the [Prometheus Adapter](https://github.com/kubernetes-sigs/prometheus-adapter) for the Custom Metrics API. Custom metrics are used in Kubernetes by [Horizontal Pod Autoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to scale workloads based upon your own metric pulled from an external metrics provider like Prometheus. This chart complements the [metrics-server](https://github.com/helm/charts/tree/master/stable/metrics-server) chart that provides resource only metrics.
## Prerequisites
@@ -70,7 +70,7 @@
### Adapter Rules
-Additionally, the chart comes with a set of default rules out of the box but they may pull in too many metrics or not map them correctly for your needs. Therefore, it is recommended to populate `rules.custom` with a list of rules (see the [config document](https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md) for the proper format).
+Additionally, the chart comes with a set of default rules out of the box but they may pull in too many metrics or not map them correctly for your needs. Therefore, it is recommended to populate `rules.custom` with a list of rules (see the [config document](https://github.com/kubernetes-sigs/prometheus-adapter/blob/master/docs/config.md) for the proper format).
### Horizontal Pod Autoscaler Metrics
@@ -122,7 +122,7 @@
nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[3m])) by (<<.GroupBy>>)
resources:
overrides:
- instance:
+ node:
resource: node
namespace:
resource: namespace
@@ -134,7 +134,7 @@
nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>)
resources:
overrides:
- instance:
+ node:
resource: node
namespace:
resource: namespace

View File

@ -1,6 +1,14 @@
--- charts-original/templates/deployment.yaml
+++ charts/templates/deployment.yaml
@@ -40,7 +40,7 @@
@@ -10,6 +10,7 @@
namespace: {{ .Release.Namespace }}
spec:
replicas: {{ .Values.replicas }}
+ strategy: {{ toYaml .Values.strategy | nindent 4 }}
selector:
matchLabels:
app: {{ template "k8s-prometheus-adapter.name" . }}
@@ -40,7 +41,7 @@
{{- end}}
containers:
- name: {{ .Chart.Name }}
@ -9,7 +17,22 @@
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- /adapter
@@ -102,13 +102,17 @@
@@ -67,12 +68,14 @@
port: https
scheme: HTTPS
initialDelaySeconds: 30
+ timeoutSeconds: 5
readinessProbe:
httpGet:
path: /healthz
port: https
scheme: HTTPS
initialDelaySeconds: 30
+ timeoutSeconds: 5
{{- if .Values.resources }}
resources:
{{- toYaml .Values.resources | nindent 10 }}
@@ -102,13 +105,21 @@
name: volume-serving-cert
readOnly: true
{{- end }}
@ -24,10 +47,14 @@
priorityClassName: {{ .Values.priorityClassName }}
- tolerations:
- {{- toYaml .Values.tolerations | nindent 8 }}
+ {{- if .Values.podSecurityContext }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ {{- end }}
+ tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
+{{- if .Values.tolerations }}
+{{- toYaml .Values.tolerations | nindent 8 }}
+{{- end }}
+ {{- if .Values.tolerations }}
+ {{- toYaml .Values.tolerations | nindent 8 }}
+ {{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}

View File

@ -0,0 +1,10 @@
--- charts-original/templates/service.yaml
+++ charts/templates/service.yaml
@@ -19,4 +19,6 @@
app: {{ template "k8s-prometheus-adapter.name" . }}
release: {{ .Release.Name }}
type: {{ .Values.service.type }}
-
+ {{- if .Values.service.clusterIP }}
+ clusterIP: {{ .Values.service.clusterIP }}
+ {{- end }}

View File

@ -1,6 +1,6 @@
--- charts-original/values.yaml
+++ charts/values.yaml
@@ -1,8 +1,12 @@
@@ -1,9 +1,13 @@
# Default values for k8s-prometheus-adapter..
+global:
+ cattle:
@ -10,7 +10,61 @@
image:
- repository: directxman12/k8s-prometheus-adapter-amd64
+ repository: rancher/mirrored-directxman12-k8s-prometheus-adapter
tag: v0.8.4
- tag: v0.8.4
+ repository: rancher/mirrored-prometheus-adapter-prometheus-adapter
+ tag: v0.9.0
pullPolicy: IfNotPresent
logLevel: 4
@@ -25,6 +29,11 @@
replicas: 1
+# k8s 1.21 needs fsGroup to be set for non root deployments
+# ref: https://github.com/kubernetes/kubernetes/issues/70679
+podSecurityContext:
+ fsGroup: 10001
+
rbac:
# Specifies whether RBAC resources should be created
create: true
@@ -90,7 +99,7 @@
# nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[3m])) by (<<.GroupBy>>)
# resources:
# overrides:
-# instance:
+# node:
# resource: node
# namespace:
# resource: namespace
@@ -102,7 +111,7 @@
# nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>)
# resources:
# overrides:
-# instance:
+# node:
# resource: node
# namespace:
# resource: namespace
@@ -115,6 +124,7 @@
annotations: {}
port: 443
type: ClusterIP
+# clusterIP: 1.2.3.4
tls:
enable: false
@@ -168,6 +178,13 @@
# When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet
# dnsPolicy: ClusterFirstWithHostNet
+# Deployment strategy type
+strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 25%
+ maxSurge: 25%
+
podDisruptionBudget:
# Specifies if PodDisruptionBudget should be enabled
# When enabled, minAvailable or maxUnavailable should also be defined.