clean up main

pull/737/head
Caleb Bron 2020-09-30 16:45:24 -07:00
parent eeccb10836
commit 2087de28b5
138 changed files with 0 additions and 7763 deletions

View File

@ -1,16 +0,0 @@
name: CI-pullrequest
on:
pull_request:
branches:
- dev-v2.5
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Validate
run: sudo make validate

View File

@ -1,20 +0,0 @@
name: CI-push
on:
push:
branches:
- dev-v2.5
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Run CI scripts
run: |-
sudo make ci
- uses: stefanzweifel/git-auto-commit-action@480e111bf8274f3bbd429aef51504f71c808e3e4
with:
commit_message: Generated changes

5
.gitignore vendored
View File

@ -1,5 +0,0 @@
/packages/**/charts-original
/packages/*/charts/charts/*.tgz
/.idea
.DS_Store
requirements.lock

View File

@ -1,26 +0,0 @@
ci: bootstrap
./scripts/ci
prepare: bootstrap
./scripts/prepare
bootstrap:
./scripts/bootstrap
charts: bootstrap prepare
./scripts/generate-charts
patch: bootstrap
./scripts/generate-patch
validate: bootstrap
./scripts/validate
mirror: bootstrap
./scripts/image-mirror
clean:
./scripts/clean
.DEFAULT_GOAL := ci

View File

@ -1,15 +0,0 @@
apiVersion: v1
appVersion: v0.1.0-rc1
description: Backup-restore-operator can be used to backup kubernetes cluster metadata
for certain applications and restore from it
name: backup-restore-operator
keywords:
- applications
- infrastructure
version: 0.1.0
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/namespace: cattle-resources-system
catalog.cattle.io/release-name: backup-restore-operator
catalog.cattle.io/ui-component: backup-restore-operator
catalog.cattle.io/provides-gvr: resources.cattle.io.resourceset/v1

View File

@ -1,119 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: backups.resources.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .status.storageLocation
name: Location
type: string
- JSONPath: .status.backupType
name: Type
type: string
- JSONPath: .status.filename
name: Latest-Backup
type: string
- JSONPath: .spec.resourceSetName
name: ResourceSet
type: string
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
- JSONPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
group: resources.cattle.io
names:
kind: Backup
plural: backups
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
encryptionConfigSecretName:
description: Name of the Secret containing the encryption config
type: string
resourceSetName:
description: Name of the ResourceSet CR to use for backup
type: string
retentionCount:
minimum: 1
type: integer
schedule:
description: Cron schedule for recurring backups
example:
Descriptors: '@midnight'
Standard crontab specs: 0 0 * * *
type: string
storageLocation:
nullable: true
properties:
s3:
nullable: true
properties:
bucketName:
type: string
credentialSecretName:
type: string
credentialSecretNamespace:
type: string
endpoint:
type: string
endpointCA:
type: string
folder:
type: string
insecureTLSSkipVerify:
type: boolean
region:
type: string
type: object
type: object
required:
- resourceSetName
type: object
status:
properties:
backupType:
type: string
conditions:
items:
properties:
lastTransitionTime:
type: string
lastUpdateTime:
type: string
message:
type: string
reason:
type: string
status:
type: string
type:
type: string
type: object
nullable: true
type: array
filename:
type: string
lastSnapshotTs:
type: string
nextSnapshotAt:
type: string
observedGeneration:
type: integer
storageLocation:
type: string
summary:
type: string
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -1,94 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: resourcesets.resources.cattle.io
spec:
group: resources.cattle.io
names:
kind: ResourceSet
plural: resourcesets
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
controllerReferences:
items:
properties:
apiVersion:
type: string
name:
type: string
namespace:
type: string
replicas:
type: integer
resource:
type: string
type: object
nullable: true
type: array
resourceSelectors:
items:
properties:
apiVersion:
type: string
kinds:
items:
type: string
nullable: true
type: array
kindsRegexp:
type: string
labelSelectors:
nullable: true
properties:
matchExpressions:
items:
properties:
key:
type: string
operator:
type: string
values:
items:
type: string
nullable: true
type: array
type: object
nullable: true
type: array
matchLabels:
additionalProperties:
type: string
nullable: true
type: object
type: object
namespaceRegexp:
type: string
namespaces:
items:
type: string
nullable: true
type: array
resourceNameRegexp:
type: string
resourceNames:
items:
type: string
nullable: true
type: array
type: object
nullable: true
required:
- apiVersion
type: array
required:
- resourceSelectors
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -1,102 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: restores.resources.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .status.backupSource
name: Backup-Source
type: string
- JSONPath: .spec.backupFilename
name: Backup-File
type: string
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
- JSONPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
group: resources.cattle.io
names:
kind: Restore
plural: restores
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
backupFilename:
type: string
deleteTimeoutSeconds:
maximum: 10
type: integer
encryptionConfigSecretName:
type: string
prune:
nullable: true
type: boolean
storageLocation:
nullable: true
properties:
s3:
nullable: true
properties:
bucketName:
type: string
credentialSecretName:
type: string
credentialSecretNamespace:
type: string
endpoint:
type: string
endpointCA:
type: string
folder:
type: string
insecureTLSSkipVerify:
type: boolean
region:
type: string
type: object
type: object
required:
- backupFilename
type: object
status:
properties:
backupSource:
type: string
conditions:
items:
properties:
lastTransitionTime:
type: string
lastUpdateTime:
type: string
message:
type: string
reason:
type: string
status:
type: string
type:
type: string
type: object
nullable: true
type: array
observedGeneration:
type: integer
restoreCompletionTs:
type: string
summary:
type: string
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -1,76 +0,0 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "backupRestore.fullname" -}}
{{- .Chart.Name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "backupRestore.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "backupRestore.labels" -}}
helm.sh/chart: {{ include "backupRestore.chart" . }}
{{ include "backupRestore.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "backupRestore.selectorLabels" -}}
app.kubernetes.io/name: {{ include "backupRestore.fullname" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
resources.cattle.io/operator: backup-restore
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "backupRestore.serviceAccountName" -}}
{{ include "backupRestore.fullname" . }}
{{- end }}
{{- define "backupRestore.s3SecretName" -}}
{{- printf "%s-%s" .Chart.Name "s3" | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "backupRestore.pvcName" -}}
{{ include "backupRestore.fullname" . }}
{{- end }}
{{- define "backupRestore.nfsPVName" -}}
{{ include "backupRestore.fullname" . }}
{{- end }}

View File

@ -1,14 +0,0 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "backupRestore.fullname" . }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "backupRestore.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io

View File

@ -1,58 +0,0 @@
{{- if and .Values.s3.enabled .Values.persistence.enabled }}
{{- fail "\n\nCannot configure both s3 and PV for storing backups" }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "backupRestore.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "backupRestore.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "backupRestore.selectorLabels" . | nindent 8 }}
annotations:
checksum/secret: {{ include (print $.Template.BasePath "/s3-secret.yaml") . | sha256sum }}
spec:
serviceAccountName: {{ include "backupRestore.serviceAccountName" . }}
containers:
- name: {{ .Chart.Name }}
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: Always
env:
- name: CHART_NAMESPACE
value: {{ .Release.Namespace }}
{{- if .Values.s3.enabled }}
- name: DEFAULT_S3_BACKUP_STORAGE_LOCATION
value: {{ include "backupRestore.s3SecretName" . }}
{{- end }}
{{- if .Values.persistence.enabled }}
- name: DEFAULT_PERSISTENCE_ENABLED
value: "persistence-enabled"
volumeMounts:
- mountPath: "/var/lib/backups"
name: pv-storage
volumes:
- name: pv-storage
persistentVolumeClaim:
claimName: {{ include "backupRestore.pvcName" . }}
{{- end }}
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.nodeSelector }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
tolerations:
{{- include "linux-node-tolerations" . | nindent 8}}
{{- with .Values.tolerations }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,27 +0,0 @@
{{- if and .Values.persistence.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "backupRestore.pvcName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
spec:
accessModes:
- ReadWriteOnce
resources:
{{- with .Values.persistence }}
requests:
storage: {{ .size | quote }}
{{- if .storageClass }}
{{- if (eq "-" .storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: {{ .storageClass | quote }}
{{- end }}
{{- end }}
{{- if .volumeName }}
volumeName: {{ .volumeName | quote }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,54 +0,0 @@
apiVersion: resources.cattle.io/v1
kind: ResourceSet
metadata:
name: rancher-resource-set
resourceSelectors:
- apiVersion: "v1"
kindsRegexp: "^namespaces$"
resourceNameRegexp: "^cattle-|^p-|^c-|^user-|^u-"
resourceNames:
- "local"
- apiVersion: "v1"
kindsRegexp: "^Secret$|^serviceaccounts$"
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
- apiVersion: "v1"
kindsRegexp: "^configmaps$"
namespaces:
- "cattle-system"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^roles$|^rolebindings$"
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNameRegexp: "^cattle-|^clusterrolebinding-|^globaladmin-user-|^grb-u-"
resourceNames:
- "eks-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNameRegexp: "^cattle-|^p-|^c-|^local-|^user-|^u-|^project-|^create-ns$"
resourceNames:
- "eks-operator"
- apiVersion: "apiextensions.k8s.io/v1beta1"
kindsRegexp: "."
resourceNameRegexp: "management.cattle.io$|project.cattle.io$|catalog.cattle.io$|eks.cattle.io$|resources.cattle.io$"
- apiVersion: "management.cattle.io/v3"
kindsRegexp: "."
- apiVersion: "project.cattle.io/v3"
kindsRegexp: "."
- apiVersion: "catalog.cattle.io/v1"
kindsRegexp: "^clusterrepos$"
- apiVersion: "resources.cattle.io/v1"
kindsRegexp: "^ResourceSet$"
- apiVersion: "eks.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
resourceNames:
- "eks-config-operator"
namespaces:
- "cattle-system"
controllerReferences:
- apiVersion: "apps/v1"
resource: "deployments"
name: "rancher"
namespace: "cattle-system"

View File

@ -1,33 +0,0 @@
{{- if .Values.s3.enabled -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "backupRestore.s3SecretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
type: Opaque
stringData:
{{- with .Values.s3 }}
{{- if .credentialSecretName }}
credentialSecretName: {{ .credentialSecretName }}
{{- end }}
{{- if .credentialSecretNamespace }}
credentialSecretNamespace: {{ .credentialSecretNamespace }}
{{- end }}
{{- if .region }}
region: {{ .region }}
{{- end }}
bucketName: {{ .bucketName }}
{{- if .folder }}
folder: {{ .folder }}
{{- end }}
endpoint: {{ .endpoint }}
{{- if .endpointCA }}
endpointCA: {{ .endpointCA }}
{{- end }}
{{- if .insecureTLSSkipVerify }}
insecureTLSSkipVerify: {{ .insecureTLSSkipVerify }}
{{- end }}
{{- end }}
{{ end }}

View File

@ -1,7 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "backupRestore.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}

View File

@ -1,49 +0,0 @@
image:
repository: rancher/backup-restore-operator
tag: v0.1.0-rc1
## Default s3 bucket for storing all backup files created by the backup-restore-operator
s3:
enabled: false
## credentialSecretName if set, should be the name of the Secret containing AWS credentials.
## To use IAM Role, don't set this field
credentialSecretName: creds
credentialSecretNamespace: ""
region: us-west-2
bucketName: rancherbackups
folder: base folder
endpoint: s3.us-west-2.amazonaws.com
endpointCA: base64 encoded CA cert
# insecureTLSSkipVerify: optional
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
## If persistence is enabled, operator will create a PVC with mountPath /var/lib/backups
persistence:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack).
## Refer https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
##
storageClass: "-"
## If you want to disable dynamic provisioning by setting storageClass to "-" above,
## and want to target a particular PV, provide name of the target volume
volumeName: ""
## Only certain StorageClasses allow resizing PVs; Refer https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/
size: 2Gi
global:
cattle:
systemDefaultRegistry: ""
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1,2 +0,0 @@
generateCRDChart:
enabled: true

View File

@ -1,2 +0,0 @@
url: https://github.com/rancher/fleet/releases/download/v0.3.0-beta2/fleet-agent-0.3.0-beta2.tgz
packageVersion: 00

View File

@ -1,2 +0,0 @@
url: https://github.com/rancher/fleet/releases/download/v0.3.0-beta2/fleet-crd-0.3.0-beta2.tgz
packageVersion: 00

View File

@ -1,2 +0,0 @@
url: https://github.com/rancher/fleet/releases/download/v0.3.0-beta2/fleet-0.3.0-beta2.tgz
packageVersion: 00

View File

@ -1,14 +0,0 @@
apiVersion: v1
appVersion: v0.0.1
description: The cis-operator enables running CIS benchmark security scans on a kubernetes
cluster
name: rancher-cis-benchmark
version: 0.0.1
keywords:
- security
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/namespace: cis-operator-system
catalog.cattle.io/release-name: rancher-cis-benchmark
catalog.cattle.io/ui-component: rancher-cis-benchmark
catalog.cattle.io/provides-gvr: cis.cattle.io.clusterscans/v1

View File

@ -1,9 +0,0 @@
# Rancher CIS Benchmark Chart
The cis-operator enables running CIS benchmark security scans on a kubernetes cluster and generate compliance reports.
# Installation
```
helm install rancher-cis-benchmark ./ --create-namespace -n cis-operator-system
```

View File

@ -1,112 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscans.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .status.lastRunScanProfileName
name: ClusterScanProfile
type: string
- JSONPath: .status.summary.total
name: Total
type: string
- JSONPath: .status.summary.pass
name: Pass
type: string
- JSONPath: .status.summary.fail
name: Fail
type: string
- JSONPath: .status.summary.skip
name: Skip
type: string
- JSONPath: .status.summary.notApplicable
name: Not Applicable
type: string
- JSONPath: .status.lastRunTimestamp
name: LastRunTimestamp
type: string
group: cis.cattle.io
names:
kind: ClusterScan
plural: clusterscans
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
scanProfileName:
nullable: true
type: string
type: object
status:
properties:
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
display:
nullable: true
properties:
error:
type: boolean
message:
nullable: true
type: string
state:
nullable: true
type: string
transitioning:
type: boolean
type: object
lastRunScanProfileName:
nullable: true
type: string
lastRunTimestamp:
nullable: true
type: string
observedGeneration:
type: integer
summary:
nullable: true
properties:
fail:
type: integer
notApplicable:
type: integer
pass:
type: integer
skip:
type: integer
total:
type: integer
type: object
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -1,49 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscanbenchmarks.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.clusterProvider
name: ClusterProvider
type: string
- JSONPath: .spec.minKubernetesVersion
name: MinKubernetesVersion
type: string
- JSONPath: .spec.maxKubernetesVersion
name: MaxKubernetesVersion
type: string
group: cis.cattle.io
names:
kind: ClusterScanBenchmark
plural: clusterscanbenchmarks
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
clusterProvider:
nullable: true
type: string
customBenchmarkConfigMapName:
nullable: true
type: string
customBenchmarkConfigMapNameSpace:
nullable: true
type: string
maxKubernetesVersion:
nullable: true
type: string
minKubernetesVersion:
nullable: true
type: string
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -1,37 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscanprofiles.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.benchmarkVersion
name: BenchmarkVersion
type: string
group: cis.cattle.io
names:
kind: ClusterScanProfile
plural: clusterscanprofiles
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
benchmarkVersion:
nullable: true
type: string
skipTests:
items:
nullable: true
type: string
nullable: true
type: array
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -1,40 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterscanreports.cis.cattle.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.lastRunTimestamp
name: LastRunTimestamp
type: string
- JSONPath: .spec.benchmarkVersion
name: BenchmarkVersion
type: string
group: cis.cattle.io
names:
kind: ClusterScanReport
plural: clusterscanreports
scope: Cluster
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
properties:
benchmarkVersion:
nullable: true
type: string
lastRunTimestamp:
nullable: true
type: string
reportJSON:
nullable: true
type: string
type: object
type: object
version: v1
versions:
- name: v1
served: true
storage: true

View File

@ -1,23 +0,0 @@
{{/* Ensure namespace is set the same everywhere */}}
{{- define "cis.namespace" -}}
{{- .Release.Namespace | default "cis-operator-system" -}}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux_node_tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}

View File

@ -1,8 +0,0 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: cis-1.5
spec:
clusterProvider: ""
minKubernetesVersion: "1.15.0"

View File

@ -1,8 +0,0 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: eks-1.0
spec:
clusterProvider: eks
minKubernetesVersion: "1.15.0"

View File

@ -1,8 +0,0 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: gke-1.0
spec:
clusterProvider: gke
minKubernetesVersion: "1.15.0"

View File

@ -1,8 +0,0 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.5-hardened
spec:
clusterProvider: rke
minKubernetesVersion: "1.15.0"

View File

@ -1,8 +0,0 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanBenchmark
metadata:
name: rke-cis-1.5-permissive
spec:
clusterProvider: rke
minKubernetesVersion: "1.15.0"

View File

@ -1,51 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: cis-admin
namespace: {{ template "cis.namespace" . }}
rules:
- apiGroups:
- cis.cattle.io
resources:
- clusterscanbenchmarks
- clusterscanprofiles
- clusterscans
- clusterscanreports
verbs: ["create", "update", "delete", "patch","get", "watch", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
namespace: {{ template "cis.namespace" . }}
name: cis-edit
rules:
- apiGroups:
- cis.cattle.io
resources:
- clusterscanbenchmarks
- clusterscanprofiles
- clusterscans
- clusterscanreports
verbs: ["create", "update", "delete", "patch","get", "watch", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
namespace: {{ template "cis.namespace" . }}
name: cis-view
rules:
- apiGroups:
- cis.cattle.io
resources:
- clusterscanbenchmarks
- clusterscanprofiles
- clusterscans
- clusterscanreports
verbs: ["get", "watch", "list"]

View File

@ -1,11 +0,0 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: default-clusterscanprofiles
namespace: {{ template "cis.namespace" . }}
data:
# Default ClusterScanProfiles per cluster provider type
rke: "rke-profile-permissive"
eks: "eks-profile"
gke: "gke-profile"
default: "cis-1.5-profile"

View File

@ -1,46 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: cis-operator
namespace: {{ template "cis.namespace" . }}
labels:
cis.cattle.io/operator: cis-operator
spec:
selector:
matchLabels:
cis.cattle.io/operator: cis-operator
template:
metadata:
labels:
cis.cattle.io/operator: cis-operator
spec:
serviceAccountName: cis-operator-serviceaccount
containers:
- name: cis-operator
image: '{{ template "system_default_registry" . }}{{ .Values.image.cisoperator.repository }}:{{ .Values.image.cisoperator.tag }}'
imagePullPolicy: Always
env:
- name: SECURITY_SCAN_IMAGE
value: {{ template "system_default_registry" . }}{{ .Values.image.securityScan.repository }}
- name: SECURITY_SCAN_IMAGE_TAG
value: {{ .Values.image.securityScan.tag }}
- name: SONOBUOY_IMAGE
value: {{ template "system_default_registry" . }}{{ .Values.image.sonobuoy.repository }}
- name: SONOBUOY_IMAGE_TAG
value: {{ .Values.image.sonobuoy.tag }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.nodeSelector }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
tolerations:
{{- include "linux_node_tolerations" . | nindent 8}}
{{- with .Values.tolerations }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,15 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-allow-all
namespace: {{ template "cis.namespace" . }}
spec:
podSelector: {}
ingress:
- {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -1,20 +0,0 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: patch-sa
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
template:
spec:
serviceAccountName: cis-operator-serviceaccount
restartPolicy: Never
containers:
- name: sa
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
args: ["-n", {{ template "cis.namespace" . }}]
backoffLimit: 1

View File

@ -1,43 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-operator-role
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-operator-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cis-operator-role
subjects:
- kind: ServiceAccount
name: cis-serviceaccount
namespace: {{ template "cis.namespace" . }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cis-operator-installer
subjects:
- kind: ServiceAccount
name: cis-operator-serviceaccount
namespace: {{ template "cis.namespace" . }}
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io

View File

@ -1,9 +0,0 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: cis-1.5-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: cis-1.5

View File

@ -1,9 +0,0 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke-profile-hardened
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke-cis-1.5-hardened

View File

@ -1,9 +0,0 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: rke-profile-permissive
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: rke-cis-1.5-permissive

View File

@ -1,9 +0,0 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: eks-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: eks-1.0

View File

@ -1,9 +0,0 @@
---
apiVersion: cis.cattle.io/v1
kind: ClusterScanProfile
metadata:
name: gke-profile
annotations:
clusterscanprofile.cis.cattle.io/builtin: "true"
spec:
benchmarkVersion: gke-1.0

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: {{ template "cis.namespace" . }}
name: cis-operator-serviceaccount
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: {{ template "cis.namespace" . }}
labels:
app.kubernetes.io/name: rancher-cis-benchmark
app.kubernetes.io/instance: release-name
name: cis-serviceaccount

View File

@ -1,39 +0,0 @@
# Default values for rancher-cis-benchmark.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
cisoperator:
repository: rancher/cis-operator
tag: v0.0.6
securityScan:
repository: rancher/security-scan
tag: v0.2.0
sonobuoy:
repository: rancher/sonobuoy-sonobuoy
tag: v0.16.3
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
global:
cattle:
systemDefaultRegistry: ""
kubectl:
repository: rancher/kubectl
tag: v1.18.6

View File

@ -1,2 +0,0 @@
generateCRDChart:
enabled: true

View File

@ -1,15 +0,0 @@
# Changelog
All notable changes from the upstream OPA Gatekeeper chart will be added to this file
## [Package Version 00] - 2020-09-10
### Added
- Enabled the CRD chart generator in `package.yaml`
### Modified
- Updated namespace to `cattle-gatekeeper-system`
- Updated for Helm 3 compatibility
- Moved crds to `crds` directory
- Removed `crd-install` hooks and templates from crds
### Removed
- Removed `gatekeeper-system-namespace.yaml` as Rancher handles namespaces for chart installation

View File

@ -1,35 +0,0 @@
apiVersion: templates.gatekeeper.sh/v1beta1
kind: ConstraintTemplate
metadata:
name: k8sallowedrepos
spec:
crd:
spec:
names:
kind: K8sAllowedRepos
validation:
# Schema for the `parameters` field
openAPIV3Schema:
properties:
repos:
type: array
items:
type: string
targets:
- target: admission.k8s.gatekeeper.sh
rego: |
package k8sallowedrepos
violation[{"msg": msg}] {
container := input.review.object.spec.containers[_]
satisfied := [good | repo = input.parameters.repos[_] ; good = startswith(container.image, repo)]
not any(satisfied)
msg := sprintf("container <%v> has an invalid image repo <%v>, allowed repos are %v", [container.name, container.image, input.parameters.repos])
}
violation[{"msg": msg}] {
container := input.review.object.spec.initContainers[_]
satisfied := [good | repo = input.parameters.repos[_] ; good = startswith(container.image, repo)]
not any(satisfied)
msg := sprintf("container <%v> has an invalid image repo <%v>, allowed repos are %v", [container.name, container.image, input.parameters.repos])
}

View File

@ -1,19 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
namespace: {{ .Release.Namespace }}
name: gatekeeper-delete-constraints-crd-job
annotations:
"helm.sh/hook": "pre-delete"
"helm.sh/hook-delete-policy": "hook-succeeded, before-hook-creation, hook-failed"
spec:
template:
spec:
serviceAccountName: gatekeeper-admin
containers:
- name: gatekeeper-delete-constraints-crd
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
command: ["kubectl", "delete", "constrainttemplates", "--all"]
restartPolicy: Never
backoffLimit: 1

View File

@ -1,57 +0,0 @@
apiVersion: templates.gatekeeper.sh/v1beta1
kind: ConstraintTemplate
metadata:
name: k8srequiredlabels
spec:
crd:
spec:
names:
kind: K8sRequiredLabels
validation:
# Schema for the `parameters` field
openAPIV3Schema:
properties:
message:
type: string
labels:
type: array
items:
type: object
properties:
key:
type: string
allowedRegex:
type: string
targets:
- target: admission.k8s.gatekeeper.sh
rego: |
package k8srequiredlabels
get_message(parameters, _default) = msg {
not parameters.message
msg := _default
}
get_message(parameters, _default) = msg {
msg := parameters.message
}
violation[{"msg": msg, "details": {"missing_labels": missing}}] {
provided := {label | input.review.object.metadata.labels[label]}
required := {label | label := input.parameters.labels[_].key}
missing := required - provided
count(missing) > 0
def_msg := sprintf("you must provide labels: %v", [missing])
msg := get_message(input.parameters, def_msg)
}
violation[{"msg": msg}] {
value := input.review.object.metadata.labels[key]
expected := input.parameters.labels[_]
expected.key == key
# do not match if allowedRegex is not defined, or is an empty string
expected.allowedRegex != ""
not re_match(expected.allowedRegex, value)
def_msg := sprintf("Label <%v: %v> does not satisfy allowed regex: %v", [key, value, expected.allowedRegex])
msg := get_message(input.parameters, def_msg)
}

View File

@ -1,4 +0,0 @@
url: https://open-policy-agent.github.io/gatekeeper/charts/gatekeeper-3.1.1.tgz
packageVersion: 00
generateCRDChart:
enabled: true

View File

@ -1,292 +0,0 @@
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/Chart.yaml packages/rancher-gatekeeper/charts/Chart.yaml
--- packages/rancher-gatekeeper/charts-original/Chart.yaml
+++ packages/rancher-gatekeeper/charts/Chart.yaml
@@ -1,10 +1,17 @@
apiVersion: v1
appVersion: v3.1.1
-description: A Helm chart for Gatekeeper
+description: Modifies Open Policy Agent's upstream gatekeeper chart that provides policy-based control for cloud native environments
home: https://github.com/open-policy-agent/gatekeeper
keywords:
-- open policy agent
-name: gatekeeper
+ - open policy agent
+ - security
+name: rancher-gatekeeper
sources:
-- https://github.com/open-policy-agent/gatekeeper.git
+ - https://github.com/open-policy-agent/gatekeeper.git
version: 3.1.1
+icon: https://charts.rancher.io/assets/logos/gatekeeper.svg
+annotations:
+ catalog.cattle.io/certified: rancher
+ catalog.cattle.io/namespace: cattle-gatekeeper-system
+ catalog.cattle.io/release-name: rancher-gatekeeper
+ catalog.cattle.io/provides-gvr: config.gatekeeper.sh.config/v1alpha1
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/README.md packages/rancher-gatekeeper/charts/README.md
--- packages/rancher-gatekeeper/charts-original/README.md
+++ packages/rancher-gatekeeper/charts/README.md
@@ -4,7 +4,7 @@
| Parameter | Description | Default |
| :------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------ |
-| auditInterval | The frequency with which audit is run | `60` |
+| auditInterval | The frequency with which audit is run | `300` |
| constraintViolationsLimit | The maximum # of audit violations reported on a constraint | `20` |
| auditFromCache | Take the roster of resources to audit from the OPA cache | `false` |
| auditChunkSize | Chunk size for listing cluster resources for audit (alpha feature) | `0` |
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/crds/config-customresourcedefinition.yaml packages/rancher-gatekeeper/charts/crds/config-customresourcedefinition.yaml
--- packages/rancher-gatekeeper/charts-original/crds/config-customresourcedefinition.yaml
+++ packages/rancher-gatekeeper/charts/crds/config-customresourcedefinition.yaml
@@ -3,8 +3,6 @@
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.3.0
- helm.sh/hook: crd-install
- helm.sh/hook-delete-policy: before-hook-creation
creationTimestamp: null
labels:
gatekeeper.sh/system: "yes"
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/crds/constraintpodstatus-customresourcedefinition.yaml packages/rancher-gatekeeper/charts/crds/constraintpodstatus-customresourcedefinition.yaml
--- packages/rancher-gatekeeper/charts-original/crds/constraintpodstatus-customresourcedefinition.yaml
+++ packages/rancher-gatekeeper/charts/crds/constraintpodstatus-customresourcedefinition.yaml
@@ -3,8 +3,6 @@
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.3.0
- helm.sh/hook: crd-install
- helm.sh/hook-delete-policy: before-hook-creation
creationTimestamp: null
labels:
gatekeeper.sh/system: "yes"
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/crds/constrainttemplate-customresourcedefinition.yaml packages/rancher-gatekeeper/charts/crds/constrainttemplate-customresourcedefinition.yaml
--- packages/rancher-gatekeeper/charts-original/crds/constrainttemplate-customresourcedefinition.yaml
+++ packages/rancher-gatekeeper/charts/crds/constrainttemplate-customresourcedefinition.yaml
@@ -1,9 +1,6 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
- annotations:
- helm.sh/hook: crd-install
- helm.sh/hook-delete-policy: before-hook-creation
creationTimestamp: null
labels:
gatekeeper.sh/system: "yes"
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/crds/constrainttemplatepodstatus-customresourcedefinition.yaml packages/rancher-gatekeeper/charts/crds/constrainttemplatepodstatus-customresourcedefinition.yaml
--- packages/rancher-gatekeeper/charts-original/crds/constrainttemplatepodstatus-customresourcedefinition.yaml
+++ packages/rancher-gatekeeper/charts/crds/constrainttemplatepodstatus-customresourcedefinition.yaml
@@ -3,8 +3,6 @@
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.3.0
- helm.sh/hook: crd-install
- helm.sh/hook-delete-policy: before-hook-creation
creationTimestamp: null
labels:
gatekeeper.sh/system: "yes"
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/_helpers.tpl packages/rancher-gatekeeper/charts/templates/_helpers.tpl
--- packages/rancher-gatekeeper/charts-original/templates/_helpers.tpl
+++ packages/rancher-gatekeeper/charts/templates/_helpers.tpl
@@ -42,3 +42,11 @@
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
+
+{{- define "system_default_registry" -}}
+{{- if .Values.global.cattle.systemDefaultRegistry -}}
+{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
+{{- else -}}
+{{- "" -}}
+{{- end -}}
+{{- end -}}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/crds.yaml packages/rancher-gatekeeper/charts/templates/crds.yaml
--- packages/rancher-gatekeeper/charts-original/templates/crds.yaml
+++ packages/rancher-gatekeeper/charts/templates/crds.yaml
@@ -1,6 +0,0 @@
-{{- if .Values.customResourceDefinitions.create }}
-{{- range $path, $bytes := .Files.Glob "crds/*.yaml" }}
-{{ $.Files.Get $path }}
----
-{{- end }}
-{{- end }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/gatekeeper-admin-serviceaccount.yaml packages/rancher-gatekeeper/charts/templates/gatekeeper-admin-serviceaccount.yaml
--- packages/rancher-gatekeeper/charts-original/templates/gatekeeper-admin-serviceaccount.yaml
+++ packages/rancher-gatekeeper/charts/templates/gatekeeper-admin-serviceaccount.yaml
@@ -8,4 +8,4 @@
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-admin
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/gatekeeper-audit-deployment.yaml packages/rancher-gatekeeper/charts/templates/gatekeeper-audit-deployment.yaml
--- packages/rancher-gatekeeper/charts-original/templates/gatekeeper-audit-deployment.yaml
+++ packages/rancher-gatekeeper/charts/templates/gatekeeper-audit-deployment.yaml
@@ -10,7 +10,7 @@
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-audit
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
spec:
replicas: 1
selector:
@@ -58,7 +58,7 @@
valueFrom:
fieldRef:
fieldPath: metadata.name
- image: '{{ .Values.image.repository }}:{{ .Values.image.release }}'
+ image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}'
imagePullPolicy: '{{ .Values.image.pullPolicy }}'
livenessProbe:
httpGet:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/gatekeeper-controller-manager-deployment.yaml packages/rancher-gatekeeper/charts/templates/gatekeeper-controller-manager-deployment.yaml
--- packages/rancher-gatekeeper/charts-original/templates/gatekeeper-controller-manager-deployment.yaml
+++ packages/rancher-gatekeeper/charts/templates/gatekeeper-controller-manager-deployment.yaml
@@ -10,7 +10,7 @@
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-controller-manager
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
spec:
replicas: {{ .Values.replicas }}
selector:
@@ -67,7 +67,7 @@
valueFrom:
fieldRef:
fieldPath: metadata.name
- image: '{{ .Values.image.repository }}:{{ .Values.image.release }}'
+ image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}'
imagePullPolicy: '{{ .Values.image.pullPolicy }}'
livenessProbe:
httpGet:
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/gatekeeper-manager-role-role.yaml packages/rancher-gatekeeper/charts/templates/gatekeeper-manager-role-role.yaml
--- packages/rancher-gatekeeper/charts-original/templates/gatekeeper-manager-role-role.yaml
+++ packages/rancher-gatekeeper/charts/templates/gatekeeper-manager-role-role.yaml
@@ -9,7 +9,7 @@
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-manager-role
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
rules:
- apiGroups:
- ""
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml packages/rancher-gatekeeper/charts/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml
--- packages/rancher-gatekeeper/charts-original/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml
+++ packages/rancher-gatekeeper/charts/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml
@@ -15,4 +15,4 @@
subjects:
- kind: ServiceAccount
name: gatekeeper-admin
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/gatekeeper-manager-rolebinding-rolebinding.yaml packages/rancher-gatekeeper/charts/templates/gatekeeper-manager-rolebinding-rolebinding.yaml
--- packages/rancher-gatekeeper/charts-original/templates/gatekeeper-manager-rolebinding-rolebinding.yaml
+++ packages/rancher-gatekeeper/charts/templates/gatekeeper-manager-rolebinding-rolebinding.yaml
@@ -8,7 +8,7 @@
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-manager-rolebinding
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -16,4 +16,4 @@
subjects:
- kind: ServiceAccount
name: gatekeeper-admin
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/gatekeeper-system-namespace.yaml packages/rancher-gatekeeper/charts/templates/gatekeeper-system-namespace.yaml
--- packages/rancher-gatekeeper/charts-original/templates/gatekeeper-system-namespace.yaml
+++ packages/rancher-gatekeeper/charts/templates/gatekeeper-system-namespace.yaml
@@ -1,12 +0,0 @@
-apiVersion: v1
-kind: Namespace
-metadata:
- labels:
- admission.gatekeeper.sh/ignore: no-self-managing
- app: '{{ template "gatekeeper.name" . }}'
- chart: '{{ template "gatekeeper.name" . }}'
- control-plane: controller-manager
- gatekeeper.sh/system: "yes"
- heritage: '{{ .Release.Service }}'
- release: '{{ .Release.Name }}'
- name: gatekeeper-system
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml packages/rancher-gatekeeper/charts/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml
--- packages/rancher-gatekeeper/charts-original/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml
+++ packages/rancher-gatekeeper/charts/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml
@@ -15,7 +15,7 @@
caBundle: Cg==
service:
name: gatekeeper-webhook-service
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
path: /v1/admit
failurePolicy: Ignore
name: validation.gatekeeper.sh
@@ -39,7 +39,7 @@
caBundle: Cg==
service:
name: gatekeeper-webhook-service
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
path: /v1/admitlabel
failurePolicy: Fail
name: check-ignore-label.gatekeeper.sh
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/gatekeeper-webhook-server-cert-secret.yaml packages/rancher-gatekeeper/charts/templates/gatekeeper-webhook-server-cert-secret.yaml
--- packages/rancher-gatekeeper/charts-original/templates/gatekeeper-webhook-server-cert-secret.yaml
+++ packages/rancher-gatekeeper/charts/templates/gatekeeper-webhook-server-cert-secret.yaml
@@ -8,4 +8,4 @@
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-webhook-server-cert
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/templates/gatekeeper-webhook-service-service.yaml packages/rancher-gatekeeper/charts/templates/gatekeeper-webhook-service-service.yaml
--- packages/rancher-gatekeeper/charts-original/templates/gatekeeper-webhook-service-service.yaml
+++ packages/rancher-gatekeeper/charts/templates/gatekeeper-webhook-service-service.yaml
@@ -8,7 +8,7 @@
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-webhook-service
- namespace: gatekeeper-system
+ namespace: '{{ .Release.Namespace }}'
spec:
ports:
- port: 443
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-gatekeeper/charts-original/values.yaml packages/rancher-gatekeeper/charts/values.yaml
--- packages/rancher-gatekeeper/charts-original/values.yaml
+++ packages/rancher-gatekeeper/charts/values.yaml
@@ -1,5 +1,5 @@
replicas: 3
-auditInterval: 60
+auditInterval: 300
constraintViolationsLimit: 20
auditFromCache: false
disableValidatingWebhook: false
@@ -8,8 +8,8 @@
emitAdmissionEvents: false
emitAuditEvents: false
image:
- repository: openpolicyagent/gatekeeper
- release: v3.1.1
+ repository: rancher/openpolicyagent-gatekeeper
+ tag: v3.1.1
pullPolicy: IfNotPresent
nodeSelector: { kubernetes.io/os: linux }
affinity: {}
@@ -23,5 +23,9 @@
requests:
cpu: 100m
memory: 256Mi
-customResourceDefinitions:
- create: true
+global:
+ cattle:
+ systemDefaultRegistry: ""
+ kubectl:
+ repository: rancher/kubectl
+ tag: v1.18.6

View File

@ -1,17 +0,0 @@
apiVersion: v1
appVersion: 1.7.1
description: A basic Istio setup that installs with the istioctl. Refer to https://istio.io/latest/ for details.
name: rancher-istio
version: 1.7.100
icon: https://charts.rancher.io/assets/logos/istio.svg
keywords:
- networking
- infrastructure
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/namespace: istio-system
catalog.cattle.io/release-name: rancher-istio
catalog.cattle.io/ui-component: istio
catalog.cattle.io/requires-gvr: monitoring.coreos.com.prometheus/v1
catalog.cattle.io/provides-gvr: networking.istio.io.virtualservice/v1beta1
catalog.cattle.io/auto-install: rancher-kiali-server-crd=1.22.001

View File

@ -1,20 +0,0 @@
# Rancher Istio Installers
A Rancher created chart that packages the istioctl binary to install via a helm chart.
# Installation
### Requirements
This chart depends on the rancher-kiali-server-crd chart.
It also depends on the `rancher-monitoring` chart being installed with default values for `nameOverride`, `namespaceOverride`, and `prometheus.service.port`.
If those values are modified on the rancher-monitoring deployment, please adjust the `kiali.external_services.prometheus` url settings:
```
http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc:{{ prometheus.service.port }}
```
### Installation
```
helm install rancher-istio ./ --create-namespace -n istio-system
```

View File

@ -1,91 +0,0 @@
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
addonComponents:
istiocoredns:
enabled: {{ .Values.istiocoredns.enabled }}
components:
base:
enabled: {{ .Values.base.enabled }}
cni:
enabled: {{ .Values.cni.enabled }}
egressGateways:
- enabled: {{ .Values.egressGateways.enabled }}
name: istio-egressgateway
ingressGateways:
- enabled: {{ .Values.ingressGateways.enabled }}
name: istio-ingressgateway
k8s:
service:
ports:
- name: status-port
port: 15021
targetPort: 15021
- name: http2
port: 80
targetPort: 8080
nodePort: 31380
- name: https
port: 443
targetPort: 8443
nodePort: 31390
- name: tcp
port: 31400
targetPort: 31400
nodePort: 31400
- name: tls
port: 15443
targetPort: 15443
istiodRemote:
enabled: {{ .Values.istiodRemote.enabled }}
pilot:
enabled: {{ .Values.pilot.enabled }}
policy:
enabled: {{ .Values.policy.enabled }}
telemetry:
enabled: {{ .Values.telemetry.enabled }}
hub: {{ .Values.systemDefaultRegistry | default "docker.io" }}
profile: default
tag: {{ .Values.tag }}
revision: {{ .Values.revision }}
meshConfig:
enablePrometheusMerge: {{ .Values.meshConfig.enablePrometheusMerge }}
values:
gateways:
istio-egressgateway:
name: istio-egressgateway
type: {{ .Values.egressGateways.type }}
istio-ingressgateway:
name: istio-ingressgateway
type: {{ .Values.ingressGateways.type }}
global:
istioNamespace: {{ template "istio.namespace" . }}
proxy:
image: {{ template "system_default_registry" . }}{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }}
proxy_init:
image: {{ template "system_default_registry" . }}{{ .Values.global.proxy_init.repository}}:{{ .Values.global.proxy_init.tag }}
{{- if .Values.global.defaultPodDisruptionBudget.enabled }}
defaultPodDisruptionBudget:
enabled: {{ .Values.global.defaultPodDisruptionBudget.enabled }}
{{- end }}
istiocoredns:
coreDNSImage: {{ template "system_default_registry" . }}{{ .Values.istiocoredns.image.repository }}
coreDNSPluginImage: {{ template "system_default_registry" . }}{{ .Values.istiocoredns.pluginImage.repository }}:{{ .Values.istiocoredns.pluginImage.tag }}
coreDNSTag: {{ .Values.istiocoredns.image.tag }}
mixer:
{{- if .Values.policy.enabled }}
policy:
image: {{ template "system_default_registry" . }}{{ .Values.policy.repository}}:{{ .Values.policy.tag }}
{{- end }}
{{- if .Values.telemetry.enabled }}
telemetry:
image: {{ template "system_default_registry" . }}{{ .Values.telemetry.repository}}:{{ .Values.telemetry.tag }}
{{- end }}
{{- if .Values.pilot.enabled }}
pilot:
image: {{ template "system_default_registry" . }}{{ .Values.pilot.repository}}:{{ .Values.pilot.tag }}
{{- end }}
{{- if .Values.cni.enabled }}
cni:
image: {{ template "system_default_registry" . }}{{ .Values.cni.repository }}:{{ .Values.cni.tag }}
{{- end }}

View File

@ -1,7 +0,0 @@
dependencies:
- name: rancher-kiali-server
alias: kiali
condition: kiali.enabled
version: 1.23.0
repository: file://../../rancher-kiali-server/charts

View File

@ -1,37 +0,0 @@
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
components:
ingressGateways:
- enabled: true
name: ilb-gateway
namespace: user-ingressgateway-ns
k8s:
resources:
requests:
cpu: 200m
service:
ports:
- name: tcp-citadel-grpc-tls
port: 8060
targetPort: 8060
- name: tcp-dns
port: 5353
serviceAnnotations:
cloud.google.com/load-balancer-type: internal
- enabled: true
name: other-gateway
namespace: cattle-istio-system
k8s:
resources:
requests:
cpu: 200m
service:
ports:
- name: tcp-citadel-grpc-tls
port: 8060
targetPort: 8060
- name: tcp-dns
port: 5353
serviceAnnotations:
cloud.google.com/load-balancer-type: internal

View File

@ -1,12 +0,0 @@
{{/* Ensure namespace is set the same everywhere */}}
{{- define "istio.namespace" -}}
{{- .Release.Namespace | default "istio-system" -}}
{{- end -}}
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}

View File

@ -1,43 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: istio-admin
namespace: {{ template "istio.namespace" . }}
rules:
- apiGroups:
- config.istio.io
resources:
- adapters
- attributemanifests
- handlers
- httpapispecbindings
- httpapispecs
- instances
- quotaspecbindings
- quotaspecs
- rules
- templates
verbs: ["get", "watch", "list"]
- apiGroups:
- networking.istio.io
resources:
- destinationrules
- envoyfilters
- gateways
- serviceentries
- sidecars
- virtualservices
- workloadentries
verbs:
- '*'
- apiGroups:
- security.istio.io
resources:
- authorizationpolicies
- peerauthentications
- requestauthentications
verbs:
- '*'

View File

@ -1,7 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: istio-installer-base
namespace: {{ template "istio.namespace" . }}
data:
{{ tpl (.Files.Glob "configs/*").AsConfig . | indent 2 }}

View File

@ -1,112 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: istio-installer
rules:
# istio groups
- apiGroups:
- authentication.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- config.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- install.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- networking.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- rbac.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- security.istio.io
resources:
- '*'
verbs:
- '*'
# k8s groups
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions.apiextensions.k8s.io
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- daemonsets
- deployments
- deployments/finalizers
- ingresses
- replicasets
- statefulsets
verbs:
- '*'
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- get
- create
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
- roles
- rolebindings
verbs:
- '*'
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- events
- namespaces
- pods
- pods/exec
- persistentvolumeclaims
- secrets
- services
- serviceaccounts
verbs:
- '*'

View File

@ -1,12 +0,0 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: istio-installer
subjects:
- kind: ServiceAccount
name: istio-installer
namespace: {{ template "istio.namespace" . }}
roleRef:
kind: ClusterRole
name: istio-installer
apiGroup: rbac.authorization.k8s.io

View File

@ -1,43 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
namespace: {{ template "istio.namespace" . }}
name: istio-edit
rules:
- apiGroups:
- config.istio.io
resources:
- adapters
- attributemanifests
- handlers
- httpapispecbindings
- httpapispecs
- instances
- quotaspecbindings
- quotaspecs
- rules
- templates
verbs: ["get", "watch", "list"]
- apiGroups:
- networking.istio.io
resources:
- destinationrules
- envoyfilters
- gateways
- serviceentries
- sidecars
- virtualservices
- workloadentries
verbs:
- '*'
- apiGroups:
- security.istio.io
resources:
- authorizationpolicies
- peerauthentications
- requestauthentications
verbs:
- '*'

View File

@ -1,45 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: istioctl-installer
namespace: {{ template "istio.namespace" . }}
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
spec:
backoffLimit: 1
template:
spec:
containers:
- name: istioctl-installer
image: {{ template "system_default_registry" . }}{{ .Values.installer.repository }}:{{ .Values.installer.tag }}
env:
- name: RELEASE_NAME
value: {{ .Release.Name }}
- name: ISTIO_NAMESPACE
value: {{ template "istio.namespace" . }}
- name: FORCE_INSTALL
value: {{ .Values.forceInstall | default "false" | quote }}
command: ["/bin/sh","-c"]
args: ["/usr/local/app/scripts/run.sh"]
volumeMounts:
- name: config-volume
mountPath: /app/istio-base.yaml
subPath: istio-base.yaml
{{- if .Values.overlayFile }}
- name: overlay-volume
mountPath: /app/overlay-config.yaml
subPath: overlay-config.yaml
{{- end }}
volumes:
- name: config-volume
configMap:
name: istio-installer-base
{{- if .Values.overlayFile }}
- name: overlay-volume
configMap:
name: istio-installer-overlay
{{- end }}
serviceAccountName: istio-installer
restartPolicy: Never

View File

@ -1,42 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: istioctl-uninstaller
namespace: {{ template "istio.namespace" . }}
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded
spec:
template:
spec:
containers:
- name: istioctl-uninstaller
image: {{ template "system_default_registry" . }}{{ .Values.installer.repository }}:{{ .Values.installer.tag }}
env:
- name: RELEASE_NAME
value: {{ .Release.Name }}
- name: ISTIO_NAMESPACE
value: {{ template "istio.namespace" . }}
command: ["/bin/sh","-c"]
args: ["/usr/local/app/scripts/uninstall_istio_system.sh"]
volumeMounts:
- name: config-volume
mountPath: /app/istio-base.yaml
subPath: istio-base.yaml
{{- if .Values.overlayFile }}
- name: overlay-volume
mountPath: /app/overlay-config.yaml
subPath: overlay-config.yaml
{{ end }}
volumes:
- name: config-volume
configMap:
name: istio-installer-base
{{- if .Values.overlayFile }}
- name: overlay-volume
configMap:
name: istio-installer-overlay
{{ end }}
serviceAccountName: istio-installer
restartPolicy: OnFailure

View File

@ -1,9 +0,0 @@
{{- if .Values.overlayFile }}
apiVersion: v1
kind: ConfigMap
metadata:
name: istio-installer-overlay
namespace: {{ template "istio.namespace" . }}
data:
overlay-config.yaml: {{ toYaml .Values.overlayFile | indent 2 }}
{{- end }}

View File

@ -1,51 +0,0 @@
{{- if .Values.kiali.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: envoy-stats-monitor
namespace: {{ template "istio.namespace" . }}
labels:
monitoring: istio-proxies
spec:
selector:
matchExpressions:
- {key: istio-prometheus-ignore, operator: DoesNotExist}
namespaceSelector:
any: true
jobLabel: envoy-stats
endpoints:
- path: /stats/prometheus
targetPort: 15090
interval: 15s
relabelings:
- sourceLabels: [__meta_kubernetes_pod_container_port_name]
action: keep
regex: '.*-envoy-prom'
- action: labeldrop
regex: "__meta_kubernetes_pod_label_(.+)"
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: namespace
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: pod_name
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: istio-component-monitor
namespace: {{ template "istio.namespace" . }}
labels:
monitoring: istio-components
spec:
jobLabel: istio
targetLabels: [app]
selector:
matchExpressions:
- {key: istio, operator: In, values: [pilot]}
namespaceSelector:
any: true
endpoints:
- port: http-monitoring
interval: 15s
{{- end -}}

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: istio-installer
namespace: {{ template "istio.namespace" . }}

View File

@ -1,41 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
namespace: {{ template "istio.namespace" . }}
name: istio-view
rules:
- apiGroups:
- config.istio.io
resources:
- adapters
- attributemanifests
- handlers
- httpapispecbindings
- httpapispecs
- instances
- quotaspecbindings
- quotaspecs
- rules
- templates
verbs: ["get", "watch", "list"]
- apiGroups:
- networking.istio.io
resources:
- destinationrules
- envoyfilters
- gateways
- serviceentries
- sidecars
- virtualservices
- workloadentries
verbs: ["get", "watch", "list"]
- apiGroups:
- security.istio.io
resources:
- authorizationpolicies
- peerauthentications
- requestauthentications
verbs: ["get", "watch", "list"]

View File

@ -1,89 +0,0 @@
overlayFile: ""
tag: 1.7.1
forceInstall: false
installer:
repository: rancher/istio-installer
tag: 1.7.1-rancher1
istiocoredns:
enabled: false
image:
repository: rancher/coredns-coredns
tag: 1.6.2
pluginImage:
repository: rancher/istio-coredns-plugin
tag: 0.2-istio-1.1
base:
enabled: true
cni:
enabled: false
repository: rancher/istio-install-cni
tag: 1.7.1
egressGateways:
enabled: false
type: NodePort
ingressGateways:
enabled: true
type: NodePort
istiodRemote:
enabled: false
pilot:
enabled: true
repository: rancher/istio-pilot
tag: 1.7.1
policy:
enabled: true
repository: rancher/istio-mixer
tag: 1.7.1
telemetry:
enabled: true
repository: rancher/istio-mixer
tag: 1.7.1
sidecarInjectorWebhook:
enableNamespacesByDefault: false
objectSelector:
enabled: true
autoInject: true
rewriteAppHTTPProbe: true
global:
cattle:
systemDefaultRegistry: ""
proxy:
repository: rancher/istio-proxyv2
tag: 1.7.1
proxy_init:
repository: rancher/istio-proxyv2
tag: 1.7.1
defaultPodDisruptionBudget:
enabled: true
# this can be removed in 1.7 as it is default
meshConfig:
enablePrometheusMerge: true
# Kiali subchart from rancher-kiali-server
kiali:
enabled: true
auth:
strategy: anonymous
deployment:
ingress_enabled: false
repository: rancher/kiali-kiali
tag: v1.23.0
external_services:
prometheus:
custom_metrics_url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090"
url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090"
tracing:
enabled: false

View File

@ -1,5 +0,0 @@
url: https://kiali.org/helm-charts/kiali-server-1.23.0.tgz
packageVersion: 01
generateCRDChart:
enabled: true

View File

@ -1,164 +0,0 @@
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-kiali-server/charts-original/Chart.yaml packages/rancher-kiali-server/charts/Chart.yaml
--- packages/rancher-kiali-server/charts-original/Chart.yaml
+++ packages/rancher-kiali-server/charts/Chart.yaml
@@ -1,20 +1,27 @@
apiVersion: v2
appVersion: v1.23.0
-description: Kiali is an open source project for service mesh observability, refer
- to https://www.kiali.io for details.
+description: Kiali is an open source project for service mesh observability, refer to https://www.kiali.io for details. This is installed as sub-chart with customized values in Rancher's Istio.
home: https://github.com/kiali/kiali
icon: https://raw.githubusercontent.com/kiali/kiali.io/master/themes/kiali/static/img/kiali_logo_masthead.png
keywords:
-- istio
-- kiali
+ - istio
+ - kiali
+ - networking
+ - infrastructure
maintainers:
-- email: kiali-users@googlegroups.com
- name: Kiali
- url: https://kiali.io
-name: kiali-server
+ - email: kiali-users@googlegroups.com
+ name: Kiali
+ url: https://kiali.io
+name: rancher-kiali-server
sources:
-- https://github.com/kiali/kiali
-- https://github.com/kiali/kiali-ui
-- https://github.com/kiali/kiali-operator
-- https://github.com/kiali/helm-charts
+ - https://github.com/kiali/kiali
+ - https://github.com/kiali/kiali-ui
+ - https://github.com/kiali/kiali-operator
+ - https://github.com/kiali/helm-charts
version: 1.23.0
+annotations:
+ catalog.cattle.io/requires-gvr: monitoring.coreos.com.prometheus/v1
+ catalog.rancher.io/namespace: cattle-istio-system
+ catalog.rancher.io/release-name: rancher-kiali-server
+ catalog.cattle.io/hidden: true
+ catalog.cattle.io/provides-gvr: monitoringdashboards.monitoring.kiali.io/v1alpha1
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-kiali-server/charts-original/templates/_helpers.tpl packages/rancher-kiali-server/charts/templates/_helpers.tpl
--- packages/rancher-kiali-server/charts-original/templates/_helpers.tpl
+++ packages/rancher-kiali-server/charts/templates/_helpers.tpl
@@ -17,11 +17,7 @@
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
- {{- if contains $name .Release.Name }}
- {{- .Release.Name | trunc 63 | trimSuffix "-" }}
- {{- else }}
- {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
- {{- end }}
+ {{- printf "%s" $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
@@ -170,3 +166,11 @@
{{- end }}
{{- end }}
{{- end }}
+
+{{- define "system_default_registry" -}}
+{{- if .Values.global.cattle.systemDefaultRegistry -}}
+{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
+{{- else -}}
+{{- "" -}}
+{{- end -}}
+{{- end -}}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-kiali-server/charts-original/templates/deployment.yaml packages/rancher-kiali-server/charts/templates/deployment.yaml
--- packages/rancher-kiali-server/charts-original/templates/deployment.yaml
+++ packages/rancher-kiali-server/charts/templates/deployment.yaml
@@ -45,7 +45,7 @@
{{- end }}
{{- end }}
containers:
- - image: "{{ .Values.deployment.image_name }}:{{ .Values.deployment.image_version }}"
+ - image: "{{ template "system_default_registry" . }}{{ .Values.deployment.repository }}:{{ .Values.deployment.tag }}"
imagePullPolicy: {{ .Values.deployment.image_pull_policy | default "Always" }}
name: {{ include "kiali-server.fullname" . }}
command:
@@ -89,6 +89,11 @@
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
+ {{- if .Values.web_root_override }}
+ - name: kiali-console
+ subPath: env.js
+ mountPath: /opt/kiali/console/env.js
+ {{- end }}
- name: {{ include "kiali-server.fullname" . }}-configuration
mountPath: "/kiali-configuration"
- name: {{ include "kiali-server.fullname" . }}-cert
@@ -104,6 +109,14 @@
{{- toYaml .Values.deployment.resources | nindent 10 }}
{{- end }}
volumes:
+ {{- if .Values.web_root_override }}
+ - name: kiali-console
+ configMap:
+ name: kiali-console
+ items:
+ - key: env.js
+ path: env.js
+ {{- end }}
- name: {{ include "kiali-server.fullname" . }}-configuration
configMap:
name: {{ include "kiali-server.fullname" . }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-kiali-server/charts-original/templates/web-root-configmap.yaml packages/rancher-kiali-server/charts/templates/web-root-configmap.yaml
--- packages/rancher-kiali-server/charts-original/templates/web-root-configmap.yaml
+++ packages/rancher-kiali-server/charts/templates/web-root-configmap.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.web_root_override }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kiali-console
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "kiali-server.labels" . | nindent 4 }}
+data:
+ env.js: |
+ window.WEB_ROOT='/k8s/clusters/{{ .Values.global.cattle.clusterId }}/api/v1/namespaces/{{ .Release.Namespace }}/services/http:rancher-istio-kiali:20001/proxy';
+{{- end }}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-kiali-server/charts-original/values.yaml packages/rancher-kiali-server/charts/values.yaml
--- packages/rancher-kiali-server/charts-original/values.yaml
+++ packages/rancher-kiali-server/charts/values.yaml
@@ -9,6 +9,9 @@
# do this, a PR would be welcome.
kiali_route_url: ""
+# rancher specific override that allows proxy access to kiali url
+web_root_override: true
+
#
# Settings that mimic the Kiali CR which are placed in the ConfigMap.
# Note that only those values used by the Helm Chart will be here.
@@ -34,10 +37,10 @@
custom_dashboards:
excludes: ['']
includes: ['*']
- image_name: quay.io/kiali/kiali
+ repository: rancher/kiali-kiali
image_pull_policy: "Always"
image_pull_secrets: []
- image_version: v1.23.0
+ tag: v1.23.0
ingress_enabled: true
node_selector: {}
override_ingress_yaml:
@@ -66,3 +69,11 @@
metrics_enabled: true
metrics_port: 9090
web_root: ""
+
+# Common settings used among istio subcharts.
+global:
+ # Specify rancher clusterId of external tracing config
+ # https://github.com/istio/istio.io/issues/4146#issuecomment-493543032
+ cattle:
+ systemDefaultRegistry: ""
+ clusterId:

View File

@ -1,27 +0,0 @@
{{- if and .Values.additionalLoggingSources.eks.enabled }}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-eks
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "eks"
Path: "/var/log/messages"
Parser: "syslog"
{{ with .Values.fluentbit_tolerations }}
tolerations:
{{ toYaml . | nindent 6 }}
{{ end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
{{- end }}

View File

@ -1,30 +0,0 @@
{{- if and .Values.additionalLoggingSources.k3s.enabled (eq .Values.additionalLoggingSources.k3s.container_engine "openrc")}}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-k3s
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "k3s"
Path: "/var/log/k3s.log"
extraVolumeMounts:
- source: "/var/log/"
destination: "/var/log"
readOnly: true
{{ with .Values.fluentbit_tolerations }}
tolerations:
{{ toYaml . | nindent 6 }}
{{ end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
{{- end }}

View File

@ -1,30 +0,0 @@
{{- if and .Values.additionalLoggingSources.k3s.enabled (eq .Values.additionalLoggingSources.k3s.container_engine "systemd")}}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-k3s
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "k3s"
Path: "/var/log/syslog"
extraVolumeMounts:
- source: "/var/log/"
destination: "/var/log"
readOnly: true
{{ with .Values.fluentbit_tolerations }}
tolerations:
{{ toYaml . | nindent 6 }}
{{ end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
{{- end }}

View File

@ -1,33 +0,0 @@
{{- if .Values.additionalLoggingSources.rke.enabled }}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-rke-containers
namespace: {{ .Release.Namespace }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "rke"
Path: "/var/log/containers/*rke*.log"
extraVolumeMounts:
- source: "/var/log/containers/"
destination: "/var/log/containers/"
readOnly: true
{{ with .Values.fluentbit_tolerations }}
tolerations:
{{ toYaml . | nindent 6 }}
{{ end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- end }}

View File

@ -1,34 +0,0 @@
{{- if .Values.additionalLoggingSources.rke.enabled }}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-rke
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "rke"
Path: "/rke/*.log"
extraVolumeMounts:
- source: "/var/lib/rancher/rke/log"
destination: "/rke"
readOnly: true
{{ with .Values.fluentbit_tolerations }}
tolerations:
{{ toYaml . | nindent 6 }}
{{ end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if .Values.additionalLoggingSources.rke2.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-rke2
labels:
{{ include "logging-operator.labels" . | indent 4 }}
data:
fluent-bit.conf: |
[INPUT]
Name systemd
Tag rke2
Systemd_Filter _SYSTEMD_UNIT=rke2.service
[OUTPUT]
Name file
Path /etc/rancher/logging/rke2.log
{{- end }}

View File

@ -1,33 +0,0 @@
{{- if .Values.additionalLoggingSources.rke2.enabled }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
spec:
selector:
matchLabels:
name: {{ .Release.Name }}-rke2-journald-aggregator
template:
metadata:
name: "{{ .Release.Name }}-rke2-journald-aggregator"
namespace: "{{ .Release.Namespace }}"
labels:
name: {{ .Release.Name }}-rke2-journald-aggregator
spec:
containers:
- name: fluentd
image: "{{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}:{{ .Values.images.fluentbit.tag }}"
volumeMounts:
- mountPath: /etc/rancher/logging/logs/
name: logdir
- mountPath: /fluent-bit/etc/
name: config
volumes:
- name: logdir
hostPath:
path: /etc/rancher/logging/logs/
- name: config
configMap:
name: "{{ .Release.Name }}-rke2"
{{- end }}

View File

@ -1,32 +0,0 @@
{{- if .Values.additionalLoggingSources.rke2.enabled }}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-rke2-containers
namespace: {{ .Release.Namespace }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "rke2"
Path: "/var/log/containers/*rke*.log"
extraVolumeMounts:
- source: "/var/log/containers/"
destination: "/var/log/containers/"
readOnly: true
{{ with .Values.fluentbit_tolerations }}
tolerations:
{{ toYaml . | nindent 6 }}
{{ end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- end }}

View File

@ -1,32 +0,0 @@
{{- if .Values.additionalLoggingSources.rke2.enabled }}
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}-rke2-journald
namespace: {{ .Release.Namespace }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
inputTail:
Tag: "rke2"
Path: "/etc/rancher/logging/logs/*.log"
extraVolumeMounts:
- source: "/etc/rancher/logging/logs/"
destination: "/etc/rancher/logging/logs/"
readOnly: true
{{ with .Values.fluentbit_tolerations }}
tolerations:
{{ toYaml . | nindent 6 }}
{{ end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}
{{- end }}

View File

@ -1,25 +0,0 @@
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
{{ include "logging-operator.labels" . | indent 4 }}
spec:
controlNamespace: {{ .Release.Namespace }}
fluentbit:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}
tag: {{ .Values.images.fluentbit.tag }}
{{ with .Values.fluentbit_tolerations }}
tolerations:
{{ toYaml . | nindent 6 }}
{{ end }}
fluentd:
image:
repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }}
tag: {{ .Values.images.fluentd.tag }}
configReloaderImage:
repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }}
tag: {{ .Values.images.config_reloader.tag }}
disablePvc: {{ .Values.disablePvc }}

View File

@ -1,46 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "logging-admin"
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups:
- "logging.banzaicloud.io"
resources:
- flows
- outputs
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "logging-edit"
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups:
- "logging.banzaicloud.io"
resources:
- flows
- outputs
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "logging-view"
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups:
- "logging.banzaicloud.io"
resources:
- flows
- outputs
verbs:
- get
- list
- watch

View File

@ -1,4 +0,0 @@
url: https://kubernetes-charts.banzaicloud.com/charts/logging-operator-3.6.0.tgz
packageVersion: 00
generateCRDChart:
enabled: true

View File

@ -1,98 +0,0 @@
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-logging/charts-original/Chart.yaml packages/rancher-logging/charts/Chart.yaml
--- packages/rancher-logging/charts-original/Chart.yaml
+++ packages/rancher-logging/charts/Chart.yaml
@@ -1,5 +1,16 @@
apiVersion: v1
appVersion: 3.6.0
-description: A Helm chart to install Banzai Cloud logging-operator
-name: logging-operator
+description: Collects and filter logs using highly configurable CRDs. Powered by Banzai Cloud Logging Operator.
+name: rancher-logging
version: 3.6.0
+icon: https://charts.rancher.io/assets/logos/logging.svg
+keywords:
+ - logging
+ - monitoring
+ - security
+annotations:
+ catalog.cattle.io/certified: rancher
+ catalog.cattle.io/namespace: cattle-logging-system
+ catalog.cattle.io/release-name: rancher-logging
+ catalog.cattle.io/ui-component: logging
+ catalog.cattle.io/provides-gvr: logging.banzaicloud.io.clusterflow/v1beta1
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-logging/charts-original/templates/_helpers.tpl packages/rancher-logging/charts/templates/_helpers.tpl
--- packages/rancher-logging/charts-original/templates/_helpers.tpl
+++ packages/rancher-logging/charts/templates/_helpers.tpl
@@ -56,3 +56,11 @@
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
+
+{{- define "system_default_registry" -}}
+{{- if .Values.global.cattle.systemDefaultRegistry -}}
+{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
+{{- else -}}
+{{- "" -}}
+{{- end -}}
+{{- end -}}
diff -x '*.tgz' -x '*.lock' -uNr packages/rancher-logging/charts-original/values.yaml packages/rancher-logging/charts/values.yaml
--- packages/rancher-logging/charts-original/values.yaml
+++ packages/rancher-logging/charts/values.yaml
@@ -5,7 +5,7 @@
replicaCount: 1
image:
- repository: banzaicloud/logging-operator
+ repository: rancher/banzaicloud-logging-operator
tag: 3.6.0
pullPolicy: IfNotPresent
@@ -18,7 +18,7 @@
## Deploy CRDs used by Logging Operator.
##
-createCustomResource: true
+createCustomResource: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
@@ -76,4 +76,37 @@
monitoring:
# Create a Prometheus Operator ServiceMonitor object
serviceMonitor:
- enabled: false
\ No newline at end of file
+ enabled: true
+
+disablePvc: true
+
+additionalLoggingSources:
+ rke:
+ enabled: false
+ rke2:
+ enabled: false
+ k3s:
+ enabled: false
+ container_engine: "systemd"
+ eks:
+ enabled: false
+
+images:
+ config_reloader:
+ repository: rancher/jimmidyson-configmap-reload
+ tag: v0.2.2
+ fluentbit:
+ repository: rancher/fluent-fluent-bit
+ tag: 1.5.4
+ fluentd:
+ repository: rancher/banzaicloud-fluentd
+ tag: v1.11.2-alpine-2
+ syslog_forwarder:
+ repository: rancher/fluent-bit-out-syslog
+ tag: 0.1.0
+
+global:
+ cattle:
+ systemDefaultRegistry: ""
+
+fluentbit_tolerations: []

View File

@ -1,41 +0,0 @@
# Changelog
All notable changes from the upstream Prometheus Operator chart will be added to this file.
## [Package Version 00] - 2020-07-19
### Added
- Added [Prometheus Adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) as a dependency to the upstream Prometheus Operator chart to allow users to expose custom metrics from the default Prometheus instance deployed by this chart
- Remove `prometheus-operator/cleanup-crds.yaml` and `prometheus-operator/crds.yaml` from the Prometheus Operator upstream chart in favor of just using the CRD directory to install the CRDs.
- Added support for `rkeControllerManager`, `rkeScheduler`, `rkeProxy`, and `rkeEtcd` PushProx exporters for monitoring k8s components within RKE clusters
- Added support for `k3sControllerManager`, `k3sScheduler`, and `k3sProxy` PushProx exporters for monitoring k8s components within k3s clusters
- Added support for `kubeAdmControllerManager`, `kubeAdmScheduler`, `kubeAdmProxy`, and `kubeAdmEtcd` PushProx exporters for monitoring k8s components within kubeAdm clusters
- Added support for `rke2ControllerManager`, `rke2Scheduler`, `rke2Proxy`, and `rke2Etcd` PushProx exporters for monitoring k8s components within rke2 clusters
- Exposed `prometheus.prometheusSpec.ignoreNamespaceSelectors` on values.yaml and set it to `true` by default. This value instructs the default Prometheus server deployed with this chart to ignore the `namespaceSelector` field within any created ServiceMonitor or PodMonitor CRs that it selects. This prevents ServiceMonitors and PodMonitors from configuring the Prometheus scrape configuration to monitor resources outside the namespace that they are deployed in; if a user needs to have one ServiceMonitor / PodMonitor monitor resources within several namespaces, they will need to either disable this default option or create one ServiceMonitor / PodMonitor CR per namespace that they would like to monitor. Relevant fields were also updated in the default README.md
- Added `grafana.sidecar.dashboards.searchNamespace` to `values.yaml` with a default value of `cattle-dashboards`. The namespace provided should contain all ConfigMaps with the label `grafana_dashboard` and will be searched by the Grafana Dashboards sidecar for updates. The namespace specified is also created along with this deployment. All default dashboard ConfigMaps have been relocated from the deployment namespace to the namespace specified
- Added `monitoring-admin`, `monitoring-edit`, and `monitoring-view` default `ClusterRoles` to allow admins to assign roles to users to interact with Prometheus Operator CRs. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`). In a typical RBAC setup, you might want to use a `ClusterRoleBinding` to bind these roles to a Subject to allow them to set up or view `ServiceMonitors` / `PodMonitors` / `PrometheusRules` and view `Prometheus` or `Alertmanager` CRs across the cluster. If `.Values.global.rbac.userRoles.aggregateRolesForRBAC` is enabled, these ClusterRoles will aggregate into the respective default ClusterRoles provided by Kubernetes
- Added `monitoring-config-admin`, `monitoring-config-edit` and `monitoring-config-view` default `Roles` to allow admins to assign roles to users to be able to edit / view `Secrets` and `ConfigMaps` within the `cattle-monitoring-system` namespace. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`). In a typical RBAC setup, you might want to use a `RoleBinding` to bind these roles to a Subject within the `cattle-monitoring-system` namespace to allow them to modify Secrets / ConfigMaps tied to the deployment, such as your Alertmanager Config Secret.
- Added `monitoring-dashboard-admin`, `monitoring-dashboard-edit` and `monitoring-dashboard-view` default `Roles` to allow admins to assign roles to users to be able to edit / view `ConfigMaps` within the `cattle-dashboards` namespace. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`) and deploying Grafana as part of this chart. In a typical RBAC setup, you might want to use a `RoleBinding` to bind these roles to a Subject within the `cattle-dashboards` namespace to allow them to create / modify ConfigMaps that contain the JSON used to persist Grafana Dashboards on the cluster.
- Added default resource limits for `Prometheus Operator`, `Prometheus`, `AlertManager`, `Grafana`, `kube-state-metrics`, `node-exporter`
- Added a default template `rancher_defaults.tmpl` to AlertManager that Rancher will offer to users in order to help configure the way alerts are rendered on a notifier. Also updated the default template deployed with this chart to reference that template and added an example of a Slack config using this template as a comment in the `values.yaml`.
- Added support for private registries via introducing a new field for `global.cattle.systemDefaultRegistry` that, if supplied, will automatically be prepended onto every image used by the chart.
### Modified
- Updated the chart name from `prometheus-operator` to `rancher-monitoring` and added the `io.rancher.certified: rancher` annotation to `Chart.yaml`
- Modified the default `node-exporter` port from `9100` to `9796`
- Modified the default `nameOverride` to `rancher-monitoring`. This change is necessary as the Prometheus Adapter's default URL (`http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc`) is based off of the value used here; if modified, the default Adapter URL must also be modified
- Modified the default `namespaceOverride` to `cattle-monitoring-system`. This change is necessary as the Prometheus Adapter's default URL (`http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc`) is based off of the value used here; if modified, the default Adapter URL must also be modified
- Configured some default values for `grafana.service` values and exposed them in the default README.md
- The default namespaces the following ServiceMonitors were changed from the deployment namespace to allow them to continue to monitor metrics when `prometheus.prometheusSpec.ignoreNamespaceSelectors` is enabled:
- `core-dns`: `kube-system`
- `api-server`: `default`
- `kube-controller-manager`: `kube-system`
- `kubelet`: `{{ .Values.kubelet.namespace }}`
- Disabled the following deployments by default (can be enabled if required):
- `AlertManager`
- `kube-controller-manager` metrics exporter
- `kube-etcd` metrics exporter
- `kube-scheduler` metrics exporter
- `kube-proxy` metrics exporter
- Updated default Grafana `deploymentStrategy` to `Recreate` to prevent deployments from being stuck on upgrade if a PV is attached to Grafana
- Modified the default `<serviceMonitor|podMonitor|rule>SelectorNilUsesHelmValues` to default to `false`. As a result, we look for all CRs with any labels in all namespaces by default rather than just the ones tagged with the label `release: rancher-monitoring`.
- Modified the default images used by the `rancher-monitoring` chart to point to Rancher mirrors of the original images from upstream.
- Modified the behavior of the chart to create the Alertmanager Config Secret via a pre-install hook instead of using the normal Helm lifecycle to manage the secret. The benefit of this approach is that all changes to the Config Secret done on a live cluster will never get overridden on a `helm upgrade` since the secret only gets created on a `helm install`. If you would like the secret to be cleaned up on an `helm uninstall`, enable `alertmanager.cleanupOnUninstall`; however, this is disabled by default to prevent the loss of alerting configuration on an uninstall. This secret will never be modified on a `helm upgrade`.
- Modified the default `securityContext` for `Pod` templates across the chart to `{"runAsNonRoot": "true", "runAsUser": "1000"}` and set `grafana.rbac.pspUseAppArmor=false` in order to make it possible to deploy this chart on a hardened cluster without AppArmor installed.

View File

@ -1,16 +0,0 @@
# Rancher Monitoring
This chart is based off of the upstream [Prometheus Operator](https://github.com/helm/charts/tree/master/stable/prometheus-operator) chart. It supports the following functionality to enable monitoring within your cluster:
- [[Prometheus Operator](https://github.com/coreos/prometheus-operator)] Provides easy monitoring definitions for Kubernetes services and the deployment and management of one or more [Prometheus / Alertmanager](https://prometheus.io/) instances and deploys default monitors / alerts onto the cluster
- [[Prometheus Operator](https://github.com/coreos/prometheus-operator)] Deploys the upstream [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) chart and deploys default dashboards onto the cluster
- [[Prometheus Operator](https://github.com/coreos/prometheus-operator)] Monitors internal Kubernetes components by deploying components such as [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) and [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics)
- [[rancher-pushprox](https://github.com/rancher/charts/tree/dev-v2.5/packages/rancher-pushprox/charts)] Sets up default Deployments and DaemonSets to monitor `kube-scheduler`, `kube-controller-manager`, `kube-proxy`, and `kube-etcd` components via nodeSelectors / tolerations for certain cluster types
- [[Prometheus Adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter)] Exposes custom metrics, resource metrics, and external metrics on the default [Prometheus](https://prometheus.io/) instance launched by [Prometheus Operator](https://github.com/coreos/prometheus-operator)
You must install the Prometheus Operator CRDs first using the `rancher-monitoring-crd` chart before installing this chart.
```bash
helm install rancher-monitoring-crd rancher/stable
```
For more information, see the README.md of this chart.

View File

@ -1,93 +0,0 @@
{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: monitoring-admin
labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
{{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }}
rbac.authorization.k8s.io/aggregate-to-admin: "true"
{{- end }}
rules:
- apiGroups:
- monitoring.coreos.com
resources:
- alertmanagers
- prometheuses
- prometheuses/finalizers
- alertmanagers/finalizers
verbs:
- 'get'
- 'list'
- 'watch'
- apiGroups:
- monitoring.coreos.com
resources:
- thanosrulers
- thanosrulers/finalizers
- servicemonitors
- podmonitors
- prometheusrules
- podmonitors
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: monitoring-edit
labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
{{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }}
rbac.authorization.k8s.io/aggregate-to-edit: "true"
{{- end }}
rules:
rules:
- apiGroups:
- monitoring.coreos.com
resources:
- alertmanagers
- prometheuses
- prometheuses/finalizers
- alertmanagers/finalizers
verbs:
- 'get'
- 'list'
- 'watch'
- apiGroups:
- monitoring.coreos.com
resources:
- thanosrulers
- thanosrulers/finalizers
- servicemonitors
- podmonitors
- prometheusrules
- podmonitors
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: monitoring-view
labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
{{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }}
rbac.authorization.k8s.io/aggregate-to-view: "true"
{{- end }}
rules:
- apiGroups:
- monitoring.coreos.com
resources:
- alertmanagers
- prometheuses
- prometheuses/finalizers
- alertmanagers/finalizers
- thanosrulers
- thanosrulers/finalizers
- servicemonitors
- podmonitors
- prometheusrules
- podmonitors
verbs:
- 'get'
- 'list'
- 'watch'
{{- end }}

View File

@ -1,48 +0,0 @@
{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: monitoring-config-admin
namespace: {{ template "kube-prometheus-stack.namespace" . }}
labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: monitoring-config-edit
namespace: {{ template "kube-prometheus-stack.namespace" . }}
labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: monitoring-config-view
namespace: {{ template "kube-prometheus-stack.namespace" . }}
labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
verbs:
- 'get'
- 'list'
- 'watch'
{{- end }}

View File

@ -1,47 +0,0 @@
{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create .Values.grafana.enabled }}
{{- if or .Values.grafana.sidecar.dashboards.enabled .Values.grafana.defaultDashboardsEnabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: monitoring-dashboard-admin
namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: monitoring-dashboard-edit
namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: monitoring-dashboard-view
namespace: {{ .Values.grafana.sidecar.dashboards.searchNamespace }}
labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- 'get'
- 'list'
- 'watch'
{{- end }}
{{- end }}

View File

@ -1,5 +0,0 @@
url: https://github.com/prometheus-community/helm-charts/releases/download/kube-prometheus-stack-9.4.2/kube-prometheus-stack-9.4.2.tgz
packageVersion: 00
generateCRDChart:
enabled: true
assumeOwnershipOfCRDs: true

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +0,0 @@
url: https://github.com/rancher/rancher-operator/releases/download/v0.1.0-alpha8/rancher-operator-crd-0.1.0-alpha8.tgz
packageVersion: 00

View File

@ -1,2 +0,0 @@
url: https://github.com/rancher/rancher-operator/releases/download/v0.1.0-alpha8/rancher-operator-0.1.0-alpha8.tgz
packageVersion: 00

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,20 +0,0 @@
apiVersion: v1
version: 0.1.0
appVersion: 0.1.0
annotations:
catalog.rancher.io/certified: rancher
catalog.rancher.io/namespace: cattle-monitoring-system
catalog.rancher.io/release-name: rancher-pushprox
catalog.cattle.io/hidden: "true"
description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx clients.
name: rancher-pushprox
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application

View File

@ -1,54 +0,0 @@
# rancher-pushprox
A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster.
Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy.
Using an instance of this chart is suitable for the following scenarios:
- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster)
- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics)
- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath`
- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`)
- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`)
The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project.
## Configuration
The following tables list the configurable parameters of the rancher-pushprox chart and their default values.
### General
#### Required
| Parameter | Description | Example |
| ----- | ----------- | ------ |
| `component` | The component that is being monitored | `kube-etcd`
| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://<HOST_IP>:<metricsPort>/metrics`) | `2379` |
#### Optional
| Parameter | Description | Default |
| ----- | ----------- | ------ |
| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` |
| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` |
| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` |
| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` |
| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` |
| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` |
| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` |
| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` |
| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` |
| `clients.resources` | Set resource limits and requests for the client container | `{}` |
| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` |
| `clients.tolerations` | Specify tolerations for clients | `[]` |
| `proxy.enabled` | Deploys the proxy that each client will register with | `true` |
| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` |
| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` |
| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` |
| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` |
*Tip: The filepaths set in `clients.https.<cert|key|caCert>File` can include wildcard characters*.
See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used.

View File

@ -1,65 +0,0 @@
# Rancher
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- end -}}
{{- end -}}
# General
{{- define "pushprox.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{- define "pushProxy.commonLabels" -}}
release: {{ .Release.Name }}
component: {{ .Values.component | quote }}
provider: kubernetes
{{- end -}}
{{- define "pushProxy.proxyUrl" -}}
{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}}
{{- if .Values.clients.proxyUrl -}}
{{ printf "%s" .Values.clients.proxyUrl }}
{{- else -}}
{{ printf "http://%s.%s.svc.cluster.local:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }}
{{- end -}}{{- end -}}
# Client
{{- define "pushProxy.client.name" -}}
{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.client.labels" -}}
k8s-app: {{ template "pushProxy.client.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
# Proxy
{{- define "pushProxy.proxy.name" -}}
{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.proxy.labels" -}}
k8s-app: {{ template "pushProxy.proxy.name" . }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}
# ServiceMonitor
{{- define "pushprox.serviceMonitor.name" -}}
{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}}
{{- end -}}
{{- define "pushProxy.serviceMonitor.labels" -}}
app: {{ template "pushprox.serviceMonitor.name" . }}
release: {{ .Release.Name | quote }}
{{ template "pushProxy.commonLabels" . }}
{{- end -}}

View File

@ -1,74 +0,0 @@
{{- if .Values.clients }}{{- if .Values.clients.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "pushProxy.client.name" . }}
{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }}
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "pushProxy.client.name" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "pushProxy.client.name" . }}
subjects:
- kind: ServiceAccount
name: {{ template "pushProxy.client.name" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ .Release.Namespace }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ .Release.Namespace }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
spec:
privileged: false
hostNetwork: true
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 0
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 0
max: 65535
readOnlyRootFilesystem: false
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
volumes:
- 'emptyDir'
- 'secret'
- 'hostPath'
allowedHostPaths:
- pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }}
readOnly: true
{{- end }}
{{- end }}{{- end }}

View File

@ -1,134 +0,0 @@
{{- if .Values.clients }}{{- if .Values.clients.enabled }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ template "pushProxy.client.name" . }}
namespace: {{ template "pushprox.namespace" . }}
labels: {{ include "pushProxy.client.labels" . | nindent 4 }}
pushprox-exporter: "client"
spec:
selector:
matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }}
template:
metadata:
labels: {{ include "pushProxy.client.labels" . | nindent 8 }}
spec:
{{- if .Values.clients.nodeSelector }}
nodeSelector: {{ toYaml .Values.clients.nodeSelector | nindent 8 }}
{{- end }}
{{- if .Values.clients.tolerations }}
tolerations: {{ toYaml .Values.clients.tolerations | nindent 6 }}
{{- end }}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ template "pushProxy.client.name" . }}
containers:
- name: pushprox-client
image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }}
command:
{{- range .Values.clients.command }}
- {{ . | quote }}
{{- end }}
args:
- --fqdn=$(HOST_IP)
- --proxy-url=$(PROXY_URL)
- --metrics-addr=$(PORT)
- --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}}
{{- if .Values.clients.useLocalhost }}
- --use-localhost
{{- end }}
{{- if .Values.clients.https.enabled }}
{{- if .Values.clients.https.insecureSkipVerify }}
- --insecure-skip-verify
{{- end }}
{{- if .Values.clients.https.useServiceAccountCredentials }}
- --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token
{{- end }}
{{- if .Values.clients.https.certDir }}
- --tls.cert=/etc/ssl/push-proxy/push-proxy.pem
- --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem
- --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem
{{- end }}
{{- end }}
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: PORT
value: :{{ .Values.clients.port }}
- name: PROXY_URL
value: {{ template "pushProxy.proxyUrl" . }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
volumeMounts:
- name: metrics-cert-dir
mountPath: /etc/ssl/push-proxy
{{- end }}
{{- if .Values.clients.resources }}
resources: {{ toYaml .Values.clients.resources | nindent 10 }}
{{- end }}
{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }}
initContainers:
- name: copy-certs
image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }}
command:
- sh
- -c
- |
echo "Searching for files to copy within the source volume"
echo "cert: ${CERT_FILE_NAME}"
echo "key: ${KEY_FILE_NAME}"
echo "cacert: ${CACERT_FILE_NAME}"
CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1)
KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1)
CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1)
test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1
test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1
test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1
echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET"
cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1
chmod 444 $CERT_FILE_TARGET || exit 1
echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET"
cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1
chmod 444 $KEY_FILE_TARGET || exit 1
echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET"
cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1
chmod 444 $CACERT_FILE_TARGET || exit 1
env:
- name: CERT_FILE_NAME
value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }}
- name: KEY_FILE_NAME
value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }}
- name: CACERT_FILE_NAME
value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }}
- name: CERT_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy.pem
- name: KEY_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy-key.pem
- name: CACERT_FILE_TARGET
value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem
securityContext:
runAsNonRoot: false
volumeMounts:
- name: metrics-cert-dir-source
mountPath: /etc/source
readOnly: true
- name: metrics-cert-dir
mountPath: /etc/ssl/push-proxy
volumes:
- name: metrics-cert-dir-source
hostPath:
path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }}
type: Directory
- name: metrics-cert-dir
emptyDir: {}
{{- end }}
{{- end }}{{- end }}

Some files were not shown because too many files have changed in this diff Show More