Make charts

pull/3204/head
Steven Crespo 2023-10-30 12:52:21 -07:00
parent 161e82e1bc
commit 964a3f9aec
34 changed files with 1835 additions and 0 deletions

View File

@ -0,0 +1,11 @@
annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-resources-system
catalog.cattle.io/release-name: rancher-backup-crd
apiVersion: v2
appVersion: 4.0.0
description: Installs the CRDs for rancher-backup.
name: rancher-backup-crd
type: application
version: 103.0.0+up4.0.0

View File

@ -0,0 +1,3 @@
# Rancher Backup CRD
A Rancher chart that installs the CRDs used by `rancher-backup`.

View File

@ -0,0 +1,141 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: backups.resources.cattle.io
spec:
group: resources.cattle.io
names:
kind: Backup
plural: backups
singular: backup
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .status.storageLocation
name: Location
type: string
- jsonPath: .status.backupType
name: Type
type: string
- jsonPath: .status.filename
name: Latest-Backup
type: string
- jsonPath: .spec.resourceSetName
name: ResourceSet
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
encryptionConfigSecretName:
description: Name of the Secret containing the encryption config
nullable: true
type: string
resourceSetName:
description: Name of the ResourceSet CR to use for backup
nullable: true
type: string
retentionCount:
minimum: 1
type: integer
schedule:
description: Cron schedule for recurring backups
example:
Descriptors: '@midnight'
Standard crontab specs: 0 0 * * *
nullable: true
type: string
storageLocation:
nullable: true
properties:
s3:
nullable: true
properties:
bucketName:
nullable: true
type: string
credentialSecretName:
nullable: true
type: string
credentialSecretNamespace:
nullable: true
type: string
endpoint:
nullable: true
type: string
endpointCA:
nullable: true
type: string
folder:
nullable: true
type: string
insecureTLSSkipVerify:
type: boolean
region:
nullable: true
type: string
type: object
type: object
required:
- resourceSetName
type: object
status:
properties:
backupType:
nullable: true
type: string
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
filename:
nullable: true
type: string
lastSnapshotTs:
nullable: true
type: string
nextSnapshotAt:
nullable: true
type: string
observedGeneration:
type: integer
storageLocation:
nullable: true
type: string
summary:
nullable: true
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,118 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: resourcesets.resources.cattle.io
spec:
group: resources.cattle.io
names:
kind: ResourceSet
plural: resourcesets
singular: resourceset
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
controllerReferences:
items:
properties:
apiVersion:
nullable: true
type: string
name:
nullable: true
type: string
namespace:
nullable: true
type: string
replicas:
type: integer
resource:
nullable: true
type: string
type: object
nullable: true
type: array
resourceSelectors:
items:
properties:
apiVersion:
nullable: true
type: string
excludeKinds:
items:
nullable: true
type: string
nullable: true
type: array
excludeResourceNameRegexp:
nullable: true
type: string
kinds:
items:
nullable: true
type: string
nullable: true
type: array
kindsRegexp:
nullable: true
type: string
labelSelectors:
nullable: true
properties:
matchExpressions:
items:
properties:
key:
nullable: true
type: string
operator:
nullable: true
type: string
values:
items:
nullable: true
type: string
nullable: true
type: array
type: object
nullable: true
type: array
matchLabels:
additionalProperties:
nullable: true
type: string
nullable: true
type: object
type: object
namespaceRegexp:
nullable: true
type: string
namespaces:
items:
nullable: true
type: string
nullable: true
type: array
resourceNameRegexp:
nullable: true
type: string
resourceNames:
items:
nullable: true
type: string
nullable: true
type: array
type: object
nullable: true
required:
- apiVersion
type: array
required:
- resourceSelectors
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,122 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: restores.resources.cattle.io
spec:
group: resources.cattle.io
names:
kind: Restore
plural: restores
singular: restore
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .status.backupSource
name: Backup-Source
type: string
- jsonPath: .spec.backupFilename
name: Backup-File
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
backupFilename:
nullable: true
type: string
deleteTimeoutSeconds:
maximum: 10
type: integer
encryptionConfigSecretName:
nullable: true
type: string
ignoreErrors:
type: boolean
prune:
nullable: true
type: boolean
storageLocation:
nullable: true
properties:
s3:
nullable: true
properties:
bucketName:
nullable: true
type: string
credentialSecretName:
nullable: true
type: string
credentialSecretNamespace:
nullable: true
type: string
endpoint:
nullable: true
type: string
endpointCA:
nullable: true
type: string
folder:
nullable: true
type: string
insecureTLSSkipVerify:
type: boolean
region:
nullable: true
type: string
type: object
type: object
required:
- backupFilename
type: object
status:
properties:
backupSource:
nullable: true
type: string
conditions:
items:
properties:
lastTransitionTime:
nullable: true
type: string
lastUpdateTime:
nullable: true
type: string
message:
nullable: true
type: string
reason:
nullable: true
type: string
status:
nullable: true
type: string
type:
nullable: true
type: string
type: object
nullable: true
type: array
observedGeneration:
type: integer
restoreCompletionTs:
nullable: true
type: string
summary:
nullable: true
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,26 @@
annotations:
catalog.cattle.io/auto-install: rancher-backup-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Rancher Backups
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: cattle-resources-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: resources.cattle.io.resourceset/v1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: rancher-backup
catalog.cattle.io/scope: management
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/ui-component: rancher-backup
catalog.cattle.io/upstream-version: 2.1.1
apiVersion: v2
appVersion: 4.0.0
description: Provides ability to back up and restore the Rancher application running
on any Kubernetes cluster
icon: https://charts.rancher.io/assets/logos/backup-restore.svg
keywords:
- applications
- infrastructure
kubeVersion: '>= 1.23.0-0'
name: rancher-backup
version: 103.0.0+up4.0.0

View File

@ -0,0 +1,79 @@
# Rancher Backup
This chart provides ability to back up and restore the Rancher application running on any Kubernetes cluster.
Refer [this](https://github.com/rancher/backup-restore-operator) repository for implementation details.
-----
### Get Repo Info
```bash
helm repo add rancher-chart https://charts.rancher.io
helm repo update
```
-----
### Install Chart
```bash
helm install rancher-backup-crd rancher-chart/rancher-backup-crd -n cattle-resources-system --create-namespace
helm install rancher-backup rancher-chart/rancher-backup -n cattle-resources-system
```
-----
### Configuration
The following table lists the configurable parameters of the rancher-backup chart and their default values:
| Parameter | Description | Default |
|----------|---------------|-------|
| image.repository | Container image repository | rancher/backup-restore-operator |
| image.tag | Container image tag | v0.1.0-rc1 |
| s3.enabled | Configure S3 compatible default storage location. Current version supports S3 and MinIO | false |
| s3.credentialSecretName | Name of the Secret containing S3 credentials. This is an optional field. Skip this field in order to use IAM Role authentication. The Secret must contain following two keys, `accessKey` and `secretKey` | "" |
| s3.credentialSecretNamespace | Namespace of the Secret containing S3 credentials. This can be any namespace. | "" |
| s3.region | Region of the S3 Bucket (Required for S3, not valid for MinIO) | "" |
| s3.bucketName | Name of the Bucket | "" |
| s3.folder | Base folder within the Bucket (optional) | "" |
| s3.endpoint | Endpoint for the S3 storage provider | "" |
| s3.endpointCA | Base64 encoded CA cert for the S3 storage provider (optional) | "" |
| s3.insecureTLSSkipVerify | Skip SSL verification | false |
| persistence.enabled | Configure a Persistent Volume as the default storage location. It accepts either a StorageClass name to create a PVC, or directly accepts the PV to use. The Persistent Volume is mounted at `/var/lib/backups` in the operator pod | false |
| persistence.storageClass | StorageClass to use for dynamically provisioning the Persistent Volume, which will be used for storing backups | "" |
| persistence.volumeName | Persistent Volume to use for storing backups | "" |
| persistence.size | Requested size of the Persistent Volume (Applicable when using dynamic provisioning) | "" |
| debug | Set debug flag for backup-restore deployment | false |
| trace | Set trace flag for backup-restore deployment | false |
| nodeSelector | https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | {} |
| tolerations | https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration | [] |
| affinity | https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity | {} |
| serviceAccount.annotations | Annotations to apply to created service account | {} |
| global.cattle.psp.enabled | Enable or disable PSPs in the chart | false |
-----
### PSPs
We have added a configuration to the chart `values.yaml` which allows you to enable or disable PSPs to align with the PSP deprecation in Kubernetes `v1.25` and above.
-----
### CRDs
Refer [this](https://github.com/rancher/backup-restore-operator#crds) section for information on CRDs that this chart installs. Also refer [this](https://github.com/rancher/backup-restore-operator/tree/master/examples) folder containing sample manifests for the CRDs.
-----
### Upgrading Chart
```bash
helm upgrade rancher-backup-crd -n cattle-resources-system
helm upgrade rancher-backup -n cattle-resources-system
```
-----
### Uninstall Chart
```bash
helm uninstall rancher-backup -n cattle-resources-system
helm uninstall rancher-backup-crd -n cattle-resources-system
```

View File

@ -0,0 +1,33 @@
# Rancher Backup
This chart enables ability to capture backups of the Rancher application and restore from these backups. This chart can be used to migrate Rancher from one Kubernetes cluster to a different Kubernetes cluster.
For more information on how to use the feature, refer to our [docs](https://ranchermanager.docs.rancher.com/pages-for-subheaders/backup-restore-and-disaster-recovery).
This chart installs the following components:
- [backup-restore-operator](https://github.com/rancher/backup-restore-operator)
- The operator handles backing up all Kubernetes resources and CRDs that Rancher creates and manages from the local cluster. It gathers these resources by querying the Kubernetes API server, packages all the resources to create a tarball file and saves it in the configured backup storage location.
- The operator can be configured to store backups in S3-compatible object stores such as AWS S3 and MinIO, and in persistent volumes. During deployment, you can create a default storage location, but there is always the option to override the default storage location with each backup, but will be limited to using an S3-compatible object store.
- It preserves the ownerReferences on all resources, hence maintaining dependencies between objects.
- This operator provides encryption support, to encrypt user specified resources before saving them in the backup file. It uses the same encryption configuration that is used to enable [Kubernetes Encryption at Rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
- Backup - A backup is a CRD (`Backup`) that defines when to take backups, where to store the backup and what encryption to use (optional). Backups can be taken ad hoc or scheduled to be taken in intervals.
- Restore - A restore is a CRD (`Restore`) that defines which backup to use to restore the Rancher application to.
## Upgrading to Kubernetes v1.25+
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`.
> **Note:**
> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`.
> **Note:**
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
>
> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets.
Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.

View File

@ -0,0 +1,25 @@
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "aks.cattle.io$"
- apiVersion: "aks.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
namespaces:
- "cattle-system"
resourceNames:
- "aks-config-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNames:
- "aks-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNames:
- "aks-operator"
- apiVersion: "v1"
kindsRegexp: "^serviceaccounts$"
namespaces:
- "cattle-system"
resourceNames:
- "aks-operator"

View File

@ -0,0 +1,17 @@
- apiVersion: "eks.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
resourceNames:
- "eks-config-operator"
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "eks.cattle.io$"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNames:
- "eks-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNames:
- "eks-operator"

View File

@ -0,0 +1,49 @@
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "elemental.cattle.io$"
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
namespaces:
- "cattle-elemental-system"
resourceNames:
- "elemental-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNames:
- "elemental-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNames:
- "elemental-operator"
- apiVersion: "v1"
kindsRegexp: "^serviceaccounts$"
namespaces:
- "cattle-elemental-system"
resourceNames:
- "elemental-operator"
- apiVersion: "management.cattle.io/v3"
kindsRegexp: "^globalrole$"
resourceNames:
- "elemental-operator"
- apiVersion: "management.cattle.io/v3"
kindsRegexp: "^apiservice$"
resourceNameRegexp: "elemental.cattle.io$"
- apiVersion: "elemental.cattle.io/v1beta1"
kindsRegexp: "."
namespaceRegexp: "^cattle-fleet-|^fleet-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^roles$|^rolebindings$"
labelSelectors:
matchExpressions:
- key: "elemental.cattle.io/managed"
operator: "In"
values: ["true"]
namespaceRegexp: "^cattle-fleet-|^fleet-"
- apiVersion: "v1"
kindsRegexp: "^secrets$|^serviceaccounts$"
labelSelectors:
matchExpressions:
- key: "elemental.cattle.io/managed"
operator: "In"
values: ["true"]
namespaceRegexp: "^cattle-fleet-|^fleet-"

View File

@ -0,0 +1,53 @@
- apiVersion: "v1"
kindsRegexp: "^namespaces$"
resourceNameRegexp: "^fleet-"
- apiVersion: "v1"
kindsRegexp: "^secrets$"
namespaceRegexp: "^cattle-fleet-|^fleet-"
excludeResourceNameRegexp: "^import-token"
labelSelectors:
matchExpressions:
- key: "owner"
operator: "NotIn"
values: ["helm"]
- key: "fleet.cattle.io/managed"
operator: "In"
values: ["true"]
- apiVersion: "v1"
kindsRegexp: "^serviceaccounts$"
namespaceRegexp: "^cattle-fleet-|^fleet-"
excludeResourceNameRegexp: "^default$"
- apiVersion: "v1"
kindsRegexp: "^configmaps$"
namespaceRegexp: "^cattle-fleet-|^fleet-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^roles$|^rolebindings$"
namespaceRegexp: "^cattle-fleet-|^fleet-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNameRegexp: "^fleet-|^gitjob-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNameRegexp: "^fleet-"
resourceNames:
- "gitjob"
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "fleet.cattle.io$|gitjob.cattle.io$"
- apiVersion: "fleet.cattle.io/v1alpha1"
kindsRegexp: "."
excludeKinds:
- "bundledeployments"
- apiVersion: "gitjob.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
namespaceRegexp: "^cattle-fleet-|^fleet-"
resourceNameRegexp: "^fleet-"
resourceNames:
- "gitjob"
- apiVersion: "apps/v1"
kindsRegexp: "^services$"
namespaceRegexp: "^cattle-fleet-|^fleet-"
resourceNames:
- "gitjob"

View File

@ -0,0 +1,17 @@
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "gke.cattle.io$"
- apiVersion: "gke.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
resourceNames:
- "gke-config-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNames:
- "gke-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNames:
- "gke-operator"

View File

@ -0,0 +1,23 @@
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "provisioning.cattle.io$|rke-machine-config.cattle.io$|rke-machine.cattle.io$|rke.cattle.io$|cluster.x-k8s.io$"
- apiVersion: "provisioning.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "rke-machine-config.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "rke-machine.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "rke.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "cluster.x-k8s.io/v1beta1"
kindsRegexp: "."
- apiVersion: "v1"
kindsRegexp: "^secrets$"
resourceNameRegexp: "machine-plan$|rke-state$|machine-state$|machine-driver-secret$|machine-provision$|^harvesterconfig"
namespaces:
- "fleet-default"
- apiVersion: "v1"
kindsRegexp: "^configmaps$"
resourceNames:
- "provisioning-log"
namespaceRegexp: "^c-m-"

View File

@ -0,0 +1,28 @@
- apiVersion: "rancher.cattle.io/v1"
kindsRegexp: "."
- apiVersion: "apps/v1"
kindsRegexp: "^deployments$"
resourceNames:
- "rancher-operator"
namespaces:
- "rancher-operator-system"
- apiVersion: "v1"
kindsRegexp: "^serviceaccounts$"
namespaces:
- "rancher-operator-system"
excludeResourceNameRegexp: "^default$"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNames:
- "rancher-operator"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNames:
- "rancher-operator"
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "rancher.cattle.io$"
- apiVersion: "v1"
kindsRegexp: "^namespaces$"
resourceNames:
- "rancher-operator-system"

View File

@ -0,0 +1,65 @@
- apiVersion: "v1"
kindsRegexp: "^namespaces$"
resourceNameRegexp: "^cattle-|^p-|^c-|^user-|^u-"
resourceNames:
- "local"
- apiVersion: "v1"
kindsRegexp: "^secrets$"
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
labelSelectors:
matchExpressions:
- key: "owner"
operator: "NotIn"
values: ["helm"]
excludeResourceNameRegexp: "^bootstrap-secret$|^rancher-csp-adapter|^csp-adapter-cache$"
- apiVersion: "v1"
kindsRegexp: "^serviceaccounts$"
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
excludeResourceNameRegexp: "^default$|^rancher-csp-adapter$"
- apiVersion: "v1"
kindsRegexp: "^configmaps$"
namespaces:
- "cattle-system"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^roles$|^rolebindings$"
namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-"
excludeResourceNameRegexp: "^rancher-csp-adapter"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterrolebindings$"
resourceNameRegexp: "^cattle-|^clusterrolebinding-|^globaladmin-user-|^grb-u-|^crb-"
- apiVersion: "rbac.authorization.k8s.io/v1"
kindsRegexp: "^clusterroles$"
resourceNameRegexp: "^cattle-|^p-|^c-|^local-|^user-|^u-|^project-|^create-ns$"
excludeResourceNameRegexp: "^rancher-csp-adapter-"
- apiVersion: "scheduling.k8s.io/v1"
kindsRegexp: "^priorityclasses$"
resourceNameRegexp: "^rancher-critical$"
- apiVersion: "apiextensions.k8s.io/v1"
kindsRegexp: "."
resourceNameRegexp: "management.cattle.io$|project.cattle.io$|catalog.cattle.io$|resources.cattle.io$"
- apiVersion: "management.cattle.io/v3"
kindsRegexp: "."
excludeKinds:
- "tokens"
- "rancherusernotifications"
- apiVersion: "management.cattle.io/v3"
kindsRegexp: "^tokens$"
labelSelectors:
matchExpressions:
- key: "authn.management.cattle.io/kind"
operator: "NotIn"
values: [ "provisioning" ]
- apiVersion: "project.cattle.io/v3"
kindsRegexp: "."
- apiVersion: "catalog.cattle.io/v1"
kindsRegexp: "^clusterrepos$"
- apiVersion: "resources.cattle.io/v1"
kindsRegexp: "^ResourceSet$"
- apiVersion: "v1"
kindsRegexp: "^secrets$"
namespaceRegexp: "^.*$"
labelSelectors:
matchExpressions:
- key: "resources.cattle.io/backup"
operator: "In"
values: ["true"]

View File

@ -0,0 +1,87 @@
{{- define "system_default_registry" -}}
{{- if .Values.global.cattle.systemDefaultRegistry -}}
{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}}
{{- else -}}
{{- "" -}}
{{- end -}}
{{- end -}}
{{/*
Windows cluster will add default taint for linux nodes,
add below linux tolerations to workloads could be scheduled to those linux nodes
*/}}
{{- define "linux-node-tolerations" -}}
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
{{- end -}}
{{- define "linux-node-selector" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
beta.kubernetes.io/os: linux
{{- else -}}
kubernetes.io/os: linux
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "backupRestore.fullname" -}}
{{- .Chart.Name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "backupRestore.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "backupRestore.labels" -}}
helm.sh/chart: {{ include "backupRestore.chart" . }}
{{ include "backupRestore.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "backupRestore.selectorLabels" -}}
app.kubernetes.io/name: {{ include "backupRestore.fullname" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
resources.cattle.io/operator: backup-restore
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "backupRestore.serviceAccountName" -}}
{{ include "backupRestore.fullname" . }}
{{- end }}
{{- define "backupRestore.s3SecretName" -}}
{{- printf "%s-%s" .Chart.Name "s3" | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create PVC name using release and revision number, unless a volumeName is given.
*/}}
{{- define "backupRestore.pvcName" -}}
{{- if and .Values.persistence.volumeName }}
{{- printf "%s" .Values.persistence.volumeName }}
{{- else -}}
{{- printf "%s-%d" .Release.Name .Release.Revision }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,14 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "backupRestore.fullname" . }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "backupRestore.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,79 @@
{{- if and .Values.s3.enabled .Values.persistence.enabled }}
{{- fail "\n\nCannot configure both s3 and PV for storing backups" }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "backupRestore.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "backupRestore.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "backupRestore.selectorLabels" . | nindent 8 }}
annotations:
checksum/s3: {{ include (print $.Template.BasePath "/s3-secret.yaml") . | sha256sum }}
checksum/pvc: {{ include (print $.Template.BasePath "/pvc.yaml") . | sha256sum }}
spec:
serviceAccountName: {{ include "backupRestore.serviceAccountName" . }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 6 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ default "Always" .Values.imagePullPolicy }}
args:
{{- if .Values.debug }}
- "--debug"
{{- end }}
{{- if .Values.trace }}
- "--trace"
{{- end }}
env:
- name: CHART_NAMESPACE
value: {{ .Release.Namespace }}
{{- if .Values.s3.enabled }}
- name: DEFAULT_S3_BACKUP_STORAGE_LOCATION
value: {{ include "backupRestore.s3SecretName" . }}
{{- end }}
{{- if .Values.proxy }}
- name: HTTP_PROXY
value: {{ .Values.proxy }}
- name: HTTPS_PROXY
value: {{ .Values.proxy }}
- name: NO_PROXY
value: {{ .Values.noProxy }}
{{- end }}
{{- if .Values.persistence.enabled }}
- name: DEFAULT_PERSISTENCE_ENABLED
value: "persistence-enabled"
volumeMounts:
- mountPath: "/var/lib/backups"
name: pv-storage
volumes:
- name: pv-storage
persistentVolumeClaim:
claimName: {{ include "backupRestore.pvcName" . }}
{{- end }}
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}

View File

@ -0,0 +1,124 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "backupRestore.fullname" . }}-patch-sa
namespace: {{ .Release.Namespace }}
labels: {{ include "backupRestore.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
backoffLimit: 1
template:
spec:
serviceAccountName: {{ include "backupRestore.fullname" . }}-patch-sa
securityContext:
runAsNonRoot: true
runAsUser: 1000
restartPolicy: Never
nodeSelector: {{ include "linux-node-selector" . | nindent 8 }}
{{- if .Values.nodeSelector }}
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
tolerations: {{ include "linux-node-tolerations" . | nindent 8 }}
{{- if .Values.tolerations }}
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
containers:
- name: {{ include "backupRestore.fullname" . }}-patch-sa
image: {{ include "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}
imagePullPolicy: IfNotPresent
command: ["kubectl", "-n", {{ .Release.Namespace | quote }}, "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "backupRestore.fullname" . }}-patch-sa
namespace: {{ .Release.Namespace }}
labels: {{ include "backupRestore.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "backupRestore.fullname" . }}-patch-sa
labels: {{ include "backupRestore.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "patch"]
{{- if .Values.global.cattle.psp.enabled}}
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
resourceNames:
- {{ include "backupRestore.fullname" . }}-patch-sa
{{- end}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "backupRestore.fullname" . }}-patch-sa
labels: {{ include "backupRestore.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "backupRestore.fullname" . }}-patch-sa
subjects:
- kind: ServiceAccount
name: {{ include "backupRestore.fullname" . }}-patch-sa
namespace: {{ .Release.Namespace }}
---
{{- if .Values.global.cattle.psp.enabled}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "backupRestore.fullname" . }}-patch-sa
labels: {{ include "backupRestore.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install, post-upgrade
"helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation
spec:
privileged: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'secret'
{{- end}}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "backupRestore.fullname" . }}-default-allow-all
namespace: {{ .Release.Namespace }}
spec:
podSelector: {}
egress:
- {}
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1,31 @@
{{- if .Values.global.cattle.psp.enabled -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "backupRestore.fullname" . }}-psp
labels: {{ include "backupRestore.labels" . | nindent 4 }}
spec:
privileged: false
allowPrivilegeEscalation: false
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
volumes:
- 'persistentVolumeClaim'
- 'secret'
{{- end -}}

View File

@ -0,0 +1,27 @@
{{- if and .Values.persistence.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "backupRestore.pvcName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
spec:
accessModes:
- ReadWriteOnce
resources:
{{- with .Values.persistence }}
requests:
storage: {{ .size | quote }}
{{- if .storageClass }}
{{- if (eq "-" .storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: {{ .storageClass | quote }}
{{- end }}
{{- end }}
{{- if .volumeName }}
volumeName: {{ .volumeName | quote }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,13 @@
apiVersion: resources.cattle.io/v1
kind: ResourceSet
metadata:
name: rancher-resource-set
controllerReferences:
- apiVersion: "apps/v1"
resource: "deployments"
name: "rancher"
namespace: "cattle-system"
resourceSelectors:
{{- range $path, $_ := .Files.Glob "files/default-resourceset-contents/*.yaml" -}}
{{- $.Files.Get $path | nindent 2 -}}
{{- end -}}

View File

@ -0,0 +1,31 @@
{{- if .Values.s3.enabled -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "backupRestore.s3SecretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
type: Opaque
stringData:
{{- with .Values.s3 }}
{{- if .credentialSecretName }}
credentialSecretName: {{ .credentialSecretName }}
credentialSecretNamespace: {{ required "When providing a Secret containing S3 credentials, a valid .Values.credentialSecretNamespace must be provided" .credentialSecretNamespace }}
{{- end }}
{{- if .region }}
region: {{ .region | quote }}
{{- end }}
bucketName: {{ required "A valid .Values.bucketName is required for configuring S3 compatible storage as the default backup storage location" .bucketName | quote }}
{{- if .folder }}
folder: {{ .folder | quote }}
{{- end }}
endpoint: {{ required "A valid .Values.endpoint is required for configuring S3 compatible storage as the default backup storage location" .endpoint | quote }}
{{- if .endpointCA }}
endpointCA: {{ .endpointCA }}
{{- end }}
{{- if .insecureTLSSkipVerify }}
insecureTLSSkipVerify: {{ .insecureTLSSkipVerify | quote }}
{{- end }}
{{- end }}
{{ end }}

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "backupRestore.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "backupRestore.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }}
annotations:
{{- toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,16 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
# {{- $found := dict -}}
# {{- set $found "resources.cattle.io/v1/Backup" false -}}
# {{- set $found "resources.cattle.io/v1/ResourceSet" false -}}
# {{- set $found "resources.cattle.io/v1/Restore" false -}}
# {{- range .Capabilities.APIVersions -}}
# {{- if hasKey $found (toString .) -}}
# {{- set $found (toString .) true -}}
# {{- end -}}
# {{- end -}}
# {{- range $_, $exists := $found -}}
# {{- if (eq $exists false) -}}
# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}}
# {{- end -}}
# {{- end -}}
#{{- end -}}

View File

@ -0,0 +1,7 @@
#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}}
#{{- if .Values.global.cattle.psp.enabled }}
#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}}
#{{- end }}
#{{- end }}
#{{- end }}

View File

@ -0,0 +1,216 @@
suite: Test Deployment
templates:
- deployment.yaml
- s3-secret.yaml
- pvc.yaml
- _helpers.tpl
tests:
- it: should set name
template: deployment.yaml
asserts:
- equal:
path: metadata.name
value: "rancher-backup"
- it: should set namespace
template: deployment.yaml
asserts:
- equal:
path: metadata.namespace
value: "NAMESPACE"
- it: should set priorityClassName
set:
priorityClassName: "testClass"
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.priorityClassName
value: "testClass"
- it: should set default imagePullPolicy
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: "Always"
- it: should set imagePullPolicy
set:
imagePullPolicy: "IfNotPresent"
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: "IfNotPresent"
- it: should set debug loglevel
set:
debug: true
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: "--debug"
- it: should set trace loglevel
set:
trace: true
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: "--trace"
- it: should set proxy environment variables
set:
proxy: "https://127.0.0.1:3128"
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: HTTP_PROXY
value: "https://127.0.0.1:3128"
- contains:
path: spec.template.spec.containers[0].env
content:
name: HTTPS_PROXY
value: "https://127.0.0.1:3128"
- contains:
path: spec.template.spec.containers[0].env
content:
name: NO_PROXY
value: "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local"
- it: should set proxy environment variables with modified noproxy
set:
proxy: "https://127.0.0.1:3128"
noProxy: "192.168.0.0/24"
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: NO_PROXY
value: "192.168.0.0/24"
- it: should set persistence variables
set:
persistence.enabled: true
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: DEFAULT_PERSISTENCE_ENABLED
value: "persistence-enabled"
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
mountPath: "/var/lib/backups"
name: "pv-storage"
- equal:
path: spec.template.spec.volumes[0].name
value: "pv-storage"
- equal:
path: spec.template.spec.volumes[0].persistentVolumeClaim
value:
claimName: RELEASE-NAME-0
- it: should set claim from custom static volumeName
set:
persistence.enabled: true
persistence.volumeName: "PREDEFINED-VOLUME"
persistence.storageClass: "PREDEFINED-STORAGECLASS"
persistence.size: "PREDIFINED-SAMEAS-PVSIZE"
template: deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: DEFAULT_PERSISTENCE_ENABLED
value: "persistence-enabled"
- equal:
path: spec.template.spec.volumes[0].persistentVolumeClaim
value:
claimName: PREDEFINED-VOLUME
- it: should set private registry
template: deployment.yaml
set:
global.cattle.systemDefaultRegistry: "my.registry.local:3000"
asserts:
- matchRegex:
path: spec.template.spec.containers[0].image
pattern: ^my.registry.local:3000/rancher/backup-restore-operator:.*$
- it: should set nodeselector
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.nodeSelector
value:
kubernetes.io/os: linux
- it: should not set default affinity
template: deployment.yaml
asserts:
- isNull:
path: spec.template.spec.affinity
- it: should set custom affinity
template: deployment.yaml
set:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: disktype
operator: In
values:
- ssd
asserts:
- equal:
path: spec.template.spec.affinity
value:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: disktype
operator: In
values:
- ssd
- it: should set tolerations
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.tolerations[0]
value:
key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
- it: should set custom tolerations
template: deployment.yaml
set:
tolerations:
- key: "example-key"
operator: "Exists"
effect: "NoSchedule"
asserts:
- equal:
path: spec.template.spec.tolerations[0]
value:
key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
- equal:
path: spec.template.spec.tolerations[1]
value:
key: "example-key"
operator: "Exists"
effect: "NoSchedule"
- it: should not set default imagePullSecrets
template: deployment.yaml
asserts:
- isNull:
path: spec.template.spec.imagePullSecrets
- it: should set imagePullSecrets
set:
imagePullSecrets:
- name: "pull-secret"
template: deployment.yaml
asserts:
- equal:
path: spec.template.spec.imagePullSecrets[0].name
value: "pull-secret"

View File

@ -0,0 +1,102 @@
suite: Test PVC
templates:
- pvc.yaml
- _helpers.tpl
tests:
- it: should set name
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- equal:
path: metadata.name
value: "RELEASE-NAME-0"
- it: should set namespace
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- equal:
path: metadata.namespace
value: "NAMESPACE"
- it: should set accessModes
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- equal:
path: spec.accessModes[0]
value: "ReadWriteOnce"
- it: should set size
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- equal:
path: spec.resources.requests.storage
value: "2Gi"
- it: should set size
template: pvc.yaml
set:
persistence:
enabled: true
size: "10Gi"
asserts:
- equal:
path: spec.resources.requests.storage
value: "10Gi"
- it: should not set volumeName
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- isNull:
path: spec.volumeName
- it: should set default storageClass
template: pvc.yaml
set:
persistence:
enabled: true
asserts:
- equal:
path: spec.storageClassName
value: ""
- it: should set custom storageClass
template: pvc.yaml
set:
persistence:
enabled: true
storageClass: "storage-class"
asserts:
- equal:
path: spec.storageClassName
value: "storage-class"
- it: should set custom volumeName
template: pvc.yaml
set:
persistence:
enabled: true
volumeName: "volume-name"
asserts:
- equal:
path: spec.volumeName
value: "volume-name"
- it: should set claim from custom static volumeName
set:
persistence.enabled: true
persistence.volumeName: "PREDEFINED-VOLUME"
persistence.storageClass: "PREDEFINED-STORAGECLASS"
persistence.size: "PREDEFINED-SAMEAS-PVSIZE"
template: pvc.yaml
asserts:
- equal:
path: spec.resources.requests.storage
value: "PREDEFINED-SAMEAS-PVSIZE"
- equal:
path: spec.storageClassName
value: "PREDEFINED-STORAGECLASS"

View File

@ -0,0 +1,141 @@
suite: Test S3 Secret
templates:
- s3-secret.yaml
- _helpers.tpl
tests:
- it: should set name
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- equal:
path: metadata.name
value: "rancher-backup-s3"
- it: should set namespace
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- equal:
path: metadata.namespace
value: "NAMESPACE"
- it: should not set credentialSecretName
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- isNull:
path: stringData.credentialSecretName
- it: should set credentialSecretName
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
credentialSecretName: "credential-secret-name"
credentialSecretNamespace: "credential-secret-namespace"
asserts:
- equal:
path: stringData.credentialSecretName
value: "credential-secret-name"
- equal:
path: stringData.credentialSecretNamespace
value: "credential-secret-namespace"
- it: should not set folder
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- isNull:
path: stringData.folder
- it: should set folder
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
folder: "myfolder"
asserts:
- equal:
path: stringData.folder
value: "myfolder"
- it: should not set region
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- isNull:
path: stringData.region
- it: should set region
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
region: "us-west-1"
asserts:
- equal:
path: stringData.region
value: "us-west-1"
- it: should not set endpointCA
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- isNull:
path: stringData.endpointCA
- it: should set endpointCA
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
endpointCA: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURHakNDQWdLZ0F3SUJBZ0lKQUtpWFZpNEpBb0J5TUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NakF3T0RNd01UZ3lOVFE1V2hjTk1qQXhNREk1TVRneU5UUTVXakFTTVJBdwpEZ1lEVlFRRERBZDBaWE4wTFdOaE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCjA4dnV3Q2Y0SEhtR2Q2azVNTmozRW5NOG00T2RpS3czSGszd1NlOUlXQkwyVzY5WDZxenBhN2I2M3U2L05mMnkKSnZWNDVqeXplRFB6bFJycjlpbEpWaVZ1NFNqWlFjdG9jWmFCaVNsL0xDbEFDdkFaUlYvKzN0TFVTZSs1ZDY0QQpWcUhDQlZObU5xM3E3aVY0TE1aSVpRc3N6K0FxaU1Sd0pOMVVKQTZ6V0tUc2Yzc3ByQ0J2dWxJWmZsVXVETVAyCnRCTCt6cXZEc0pDdWlhNEEvU2JNT29tVmM2WnNtTGkwMjdub3dGRld3MnRpSkM5d0xMRE14NnJoVHQ4a3VvVHYKQXJpUjB4WktiRU45L1Uzb011eUVKbHZyck9YS2ZuUDUwbk8ycGNaQnZCb3pUTStYZnRvQ1d5UnhKUmI5cFNTRApKQjlmUEFtLzNZcFpMMGRKY2sxR1h3SURBUUFCbzNNd2NUQWRCZ05WSFE0RUZnUVU5NHU4WXlMdmE2MTJnT1pyCm44QnlFQ2NucVFjd1FnWURWUjBqQkRzd09ZQVU5NHU4WXlMdmE2MTJnT1pybjhCeUVDY25xUWVoRnFRVU1CSXgKRURBT0JnTlZCQU1NQjNSbGMzUXRZMkdDQ1FDb2wxWXVDUUtBY2pBTUJnTlZIUk1FQlRBREFRSC9NQTBHQ1NxRwpTSWIzRFFFQkN3VUFBNElCQVFER1JRZ1RtdzdVNXRQRHA5Q2psOXlLRW9Vd2pYWWM2UlAwdm1GSHpubXJ3dUVLCjFrTkVJNzhBTUw1MEpuS29CY0ljVDNEeGQ3TGdIbTNCRE5mVVh2anArNnZqaXhJYXR2UWhsSFNVaWIyZjJsSTkKVEMxNzVyNCtROFkzelc1RlFXSDdLK08vY3pJTGh5ei93aHRDUlFkQ29lS1dXZkFiby8wd0VSejZzNkhkVFJzNwpHcWlGNWZtWGp6S0lOcTBjMHRyZ0xtalNKd1hwSnU0ZnNGOEcyZUh4b2pOKzdJQ1FuSkg5cGRIRVpUQUtOL2ppCnIvem04RlZtd1kvdTBndEZneWVQY1ZWbXBqRm03Y0ZOSkc4Y2ZYd0QzcEFwVjhVOGNocTZGeFBHTkVvWFZnclMKY1VRMklaU0RJd1FFY3FvSzFKSGdCUWw2RXBaUVpWMW1DRklrdFBwSQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t"
asserts:
- equal:
path: stringData.endpointCA
value: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURHakNDQWdLZ0F3SUJBZ0lKQUtpWFZpNEpBb0J5TUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NakF3T0RNd01UZ3lOVFE1V2hjTk1qQXhNREk1TVRneU5UUTVXakFTTVJBdwpEZ1lEVlFRRERBZDBaWE4wTFdOaE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCjA4dnV3Q2Y0SEhtR2Q2azVNTmozRW5NOG00T2RpS3czSGszd1NlOUlXQkwyVzY5WDZxenBhN2I2M3U2L05mMnkKSnZWNDVqeXplRFB6bFJycjlpbEpWaVZ1NFNqWlFjdG9jWmFCaVNsL0xDbEFDdkFaUlYvKzN0TFVTZSs1ZDY0QQpWcUhDQlZObU5xM3E3aVY0TE1aSVpRc3N6K0FxaU1Sd0pOMVVKQTZ6V0tUc2Yzc3ByQ0J2dWxJWmZsVXVETVAyCnRCTCt6cXZEc0pDdWlhNEEvU2JNT29tVmM2WnNtTGkwMjdub3dGRld3MnRpSkM5d0xMRE14NnJoVHQ4a3VvVHYKQXJpUjB4WktiRU45L1Uzb011eUVKbHZyck9YS2ZuUDUwbk8ycGNaQnZCb3pUTStYZnRvQ1d5UnhKUmI5cFNTRApKQjlmUEFtLzNZcFpMMGRKY2sxR1h3SURBUUFCbzNNd2NUQWRCZ05WSFE0RUZnUVU5NHU4WXlMdmE2MTJnT1pyCm44QnlFQ2NucVFjd1FnWURWUjBqQkRzd09ZQVU5NHU4WXlMdmE2MTJnT1pybjhCeUVDY25xUWVoRnFRVU1CSXgKRURBT0JnTlZCQU1NQjNSbGMzUXRZMkdDQ1FDb2wxWXVDUUtBY2pBTUJnTlZIUk1FQlRBREFRSC9NQTBHQ1NxRwpTSWIzRFFFQkN3VUFBNElCQVFER1JRZ1RtdzdVNXRQRHA5Q2psOXlLRW9Vd2pYWWM2UlAwdm1GSHpubXJ3dUVLCjFrTkVJNzhBTUw1MEpuS29CY0ljVDNEeGQ3TGdIbTNCRE5mVVh2anArNnZqaXhJYXR2UWhsSFNVaWIyZjJsSTkKVEMxNzVyNCtROFkzelc1RlFXSDdLK08vY3pJTGh5ei93aHRDUlFkQ29lS1dXZkFiby8wd0VSejZzNkhkVFJzNwpHcWlGNWZtWGp6S0lOcTBjMHRyZ0xtalNKd1hwSnU0ZnNGOEcyZUh4b2pOKzdJQ1FuSkg5cGRIRVpUQUtOL2ppCnIvem04RlZtd1kvdTBndEZneWVQY1ZWbXBqRm03Y0ZOSkc4Y2ZYd0QzcEFwVjhVOGNocTZGeFBHTkVvWFZnclMKY1VRMklaU0RJd1FFY3FvSzFKSGdCUWw2RXBaUVpWMW1DRklrdFBwSQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t"
- it: should not set insecureTLSSkipVerify
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
asserts:
- isNull:
path: stringData.insecureTLSSkipVerify
- it: should set insecureTLSSkipVerify
template: s3-secret.yaml
set:
s3:
enabled: true
bucketName: "yourbucket"
endpoint: "https://s3.amazonaws.com"
insecureTLSSkipVerify: "true"
asserts:
- equal:
path: stringData.insecureTLSSkipVerify
value: "true"

View File

@ -0,0 +1,81 @@
image:
repository: rancher/backup-restore-operator
tag: v4.0.0
## Default s3 bucket for storing all backup files created by the backup-restore-operator
s3:
enabled: false
## credentialSecretName if set, should be the name of the Secret containing AWS credentials.
## To use IAM Role, don't set this field
credentialSecretName: ""
credentialSecretNamespace: ""
region: ""
bucketName: ""
folder: ""
endpoint: ""
endpointCA: ""
insecureTLSSkipVerify: false
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
## If persistence is enabled, operator will create a PVC with mountPath /var/lib/backups
persistence:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack).
## Refer https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
##
storageClass: "-"
## If you want to disable dynamic provisioning by setting storageClass to "-" above,
## and want to target a particular PV, provide name of the target volume
volumeName: ""
## Only certain StorageClasses allow resizing PVs; Refer https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/
size: 2Gi
# Add log level flags to backup-restore
debug: false
trace: false
# http[s] proxy server passed to backup client
# proxy: http://<username>@<password>:<url>:<port>
# comma separated list of domains or ip addresses that will not use the proxy
noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
global:
cattle:
systemDefaultRegistry: ""
psp:
enabled: false # PSP enablement should default to false
kubectl:
repository: rancher/kubectl
tag: v1.21.9
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
affinity: {}
serviceAccount:
annotations: {}
priorityClassName: ""
# Override imagePullPolicy for image
# options: Always, Never, IfNotPresent
# Defaults to Always
imagePullPolicy: "Always"
## Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []

View File

@ -6527,6 +6527,36 @@ entries:
- assets/rancher-alerting-drivers/rancher-alerting-drivers-1.0.100.tgz
version: 1.0.100
rancher-backup:
- annotations:
catalog.cattle.io/auto-install: rancher-backup-crd=match
catalog.cattle.io/certified: rancher
catalog.cattle.io/display-name: Rancher Backups
catalog.cattle.io/kube-version: '>= 1.23.0-0 < 1.28.0-0'
catalog.cattle.io/namespace: cattle-resources-system
catalog.cattle.io/os: linux
catalog.cattle.io/permits-os: linux,windows
catalog.cattle.io/provides-gvr: resources.cattle.io.resourceset/v1
catalog.cattle.io/rancher-version: '>= 2.8.0-0 < 2.9.0-0'
catalog.cattle.io/release-name: rancher-backup
catalog.cattle.io/scope: management
catalog.cattle.io/type: cluster-tool
catalog.cattle.io/ui-component: rancher-backup
catalog.cattle.io/upstream-version: 2.1.1
apiVersion: v2
appVersion: 4.0.0
created: "2023-10-30T12:40:59.106857-07:00"
description: Provides ability to back up and restore the Rancher application running
on any Kubernetes cluster
digest: 5b211ec8cb04c65d642eec33ce5d6e2e9df8d3c0a8f25372f33ea7983a47ead3
icon: https://charts.rancher.io/assets/logos/backup-restore.svg
keywords:
- applications
- infrastructure
kubeVersion: '>= 1.23.0-0'
name: rancher-backup
urls:
- assets/rancher-backup/rancher-backup-103.0.0+up4.0.0.tgz
version: 103.0.0+up4.0.0
- annotations:
catalog.cattle.io/auto-install: rancher-backup-crd=match
catalog.cattle.io/certified: rancher
@ -7055,6 +7085,21 @@ entries:
- assets/rancher-backup/rancher-backup-1.0.200.tgz
version: 1.0.200
rancher-backup-crd:
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"
catalog.cattle.io/namespace: cattle-resources-system
catalog.cattle.io/release-name: rancher-backup-crd
apiVersion: v2
appVersion: 4.0.0
created: "2023-10-30T12:41:00.790812-07:00"
description: Installs the CRDs for rancher-backup.
digest: d3363fb031d2756acbaf716133c8bdddb2177e906347f17fe4cf6e5ef662dd4b
name: rancher-backup-crd
type: application
urls:
- assets/rancher-backup-crd/rancher-backup-crd-103.0.0+up4.0.0.tgz
version: 103.0.0+up4.0.0
- annotations:
catalog.cattle.io/certified: rancher
catalog.cattle.io/hidden: "true"