diff --git a/assets/argo/argo-cd-5.51.0.tgz b/assets/argo/argo-cd-5.51.0.tgz index 1b450c95d..b93f7c00c 100644 Binary files a/assets/argo/argo-cd-5.51.0.tgz and b/assets/argo/argo-cd-5.51.0.tgz differ diff --git a/assets/argo/argo-cd-5.51.1.tgz b/assets/argo/argo-cd-5.51.1.tgz new file mode 100644 index 000000000..8799d106e Binary files /dev/null and b/assets/argo/argo-cd-5.51.1.tgz differ diff --git a/assets/avesha/kubeslice-controller-1.3.4.tgz b/assets/avesha/kubeslice-controller-1.3.4.tgz new file mode 100644 index 000000000..14ab982de Binary files /dev/null and b/assets/avesha/kubeslice-controller-1.3.4.tgz differ diff --git a/assets/avesha/kubeslice-worker-1.3.4.tgz b/assets/avesha/kubeslice-worker-1.3.4.tgz new file mode 100644 index 000000000..7c17d714d Binary files /dev/null and b/assets/avesha/kubeslice-worker-1.3.4.tgz differ diff --git a/assets/bitnami/kafka-26.4.0.tgz b/assets/bitnami/kafka-26.4.0.tgz new file mode 100644 index 000000000..5a5a29bb3 Binary files /dev/null and b/assets/bitnami/kafka-26.4.0.tgz differ diff --git a/assets/bitnami/postgresql-13.2.7.tgz b/assets/bitnami/postgresql-13.2.7.tgz new file mode 100644 index 000000000..79ca6e15f Binary files /dev/null and b/assets/bitnami/postgresql-13.2.7.tgz differ diff --git a/assets/bitnami/redis-18.3.2.tgz b/assets/bitnami/redis-18.3.2.tgz new file mode 100644 index 000000000..b97fa31fb Binary files /dev/null and b/assets/bitnami/redis-18.3.2.tgz differ diff --git a/assets/bitnami/wordpress-18.1.11.tgz b/assets/bitnami/wordpress-18.1.11.tgz new file mode 100644 index 000000000..3c57d0eec Binary files /dev/null and b/assets/bitnami/wordpress-18.1.11.tgz differ diff --git a/assets/clastix/kamaji-0.12.9.tgz b/assets/clastix/kamaji-0.12.9.tgz new file mode 100644 index 000000000..74449c68d Binary files /dev/null and b/assets/clastix/kamaji-0.12.9.tgz differ diff --git a/assets/confluent/confluent-for-kubernetes-0.824.33.tgz b/assets/confluent/confluent-for-kubernetes-0.824.33.tgz new file mode 100644 index 000000000..3d3dc8946 Binary files /dev/null and b/assets/confluent/confluent-for-kubernetes-0.824.33.tgz differ diff --git a/assets/datadog/datadog-3.45.0.tgz b/assets/datadog/datadog-3.45.0.tgz new file mode 100644 index 000000000..d53901de8 Binary files /dev/null and b/assets/datadog/datadog-3.45.0.tgz differ diff --git a/assets/datadog/datadog-operator-1.2.2.tgz b/assets/datadog/datadog-operator-1.2.2.tgz new file mode 100644 index 000000000..8063060a0 Binary files /dev/null and b/assets/datadog/datadog-operator-1.2.2.tgz differ diff --git a/assets/nats/nats-1.1.5.tgz b/assets/nats/nats-1.1.5.tgz new file mode 100644 index 000000000..8453bfd6c Binary files /dev/null and b/assets/nats/nats-1.1.5.tgz differ diff --git a/assets/speedscale/speedscale-operator-1.4.3.tgz b/assets/speedscale/speedscale-operator-1.4.3.tgz new file mode 100644 index 000000000..39b4e6309 Binary files /dev/null and b/assets/speedscale/speedscale-operator-1.4.3.tgz differ diff --git a/charts/argo/argo-cd/Chart.yaml b/charts/argo/argo-cd/Chart.yaml index 22993096b..7bd5e8448 100644 --- a/charts/argo/argo-cd/Chart.yaml +++ b/charts/argo/argo-cd/Chart.yaml @@ -1,7 +1,7 @@ annotations: artifacthub.io/changes: | - - kind: changed - description: Upgrade Argo CD to v2.9.0 + - kind: fixed + description: Add configurations for Applications in any namespace artifacthub.io/signKey: | fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 url: https://argoproj.github.io/argo-helm/pgp_keys.asc @@ -33,4 +33,4 @@ name: argo-cd sources: - https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd - https://github.com/argoproj/argo-cd -version: 5.51.0 +version: 5.51.1 diff --git a/charts/argo/argo-cd/README.md b/charts/argo/argo-cd/README.md index e0e250256..7b79080ec 100644 --- a/charts/argo/argo-cd/README.md +++ b/charts/argo/argo-cd/README.md @@ -449,6 +449,7 @@ NAME: my-release | configs.credentialTemplatesAnnotations | object | `{}` | Annotations to be added to `configs.credentialTemplates` Secret | | configs.gpg.annotations | object | `{}` | Annotations to be added to argocd-gpg-keys-cm configmap | | configs.gpg.keys | object | `{}` (See [values.yaml]) | [GnuPG] public keys to add to the keyring | +| configs.params."application.namespaces" | string | `""` | Enables [Applications in any namespace] | | configs.params."applicationsetcontroller.enable.progressive.syncs" | bool | `false` | Enables use of the Progressive Syncs capability | | configs.params."applicationsetcontroller.policy" | string | `"sync"` | Modify how application is synced between the generator and the cluster. One of: `sync`, `create-only`, `create-update`, `create-delete` | | configs.params."controller.operation.processors" | int | `10` | Number of application operation processors | @@ -1253,3 +1254,4 @@ Autogenerated from chart metadata using [helm-docs](https://github.com/norwoodj/ [tini]: https://github.com/argoproj/argo-cd/pull/12707 [EKS EoL]: https://endoflife.date/amazon-eks [Kubernetes Compatibility Matrix]: https://argo-cd.readthedocs.io/en/stable/operator-manual/installation/#supported-versions +[Applications in any namespace]: https://argo-cd.readthedocs.io/en/stable/operator-manual/app-any-namespace/#applications-in-any-namespace diff --git a/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml b/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml index 44d50515c..bd10316b4 100644 --- a/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml +++ b/charts/argo/argo-cd/templates/argocd-server/clusterrole.yaml @@ -21,6 +21,9 @@ rules: - events verbs: - list + {{- if (index .Values.configs.params "application.namespaces") }} + - create + {{- end }} - apiGroups: - "" resources: diff --git a/charts/argo/argo-cd/values.yaml b/charts/argo/argo-cd/values.yaml index 4ec932224..49dd4e70b 100644 --- a/charts/argo/argo-cd/values.yaml +++ b/charts/argo/argo-cd/values.yaml @@ -263,6 +263,12 @@ configs: # -- Enables use of the Progressive Syncs capability applicationsetcontroller.enable.progressive.syncs: false + # -- Enables [Applications in any namespace] + ## List of additional namespaces where applications may be created in and reconciled from. + ## The namespace where Argo CD is installed to will always be allowed. + ## Set comma-separated list. (e.g. app-team-one, app-team-two) + application.namespaces: "" + # Argo CD RBAC policy configuration ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md rbac: diff --git a/charts/avesha/kubeslice-controller/Chart.yaml b/charts/avesha/kubeslice-controller/Chart.yaml index ad7a98cd6..4558fed0f 100644 --- a/charts/avesha/kubeslice-controller/Chart.yaml +++ b/charts/avesha/kubeslice-controller/Chart.yaml @@ -5,7 +5,7 @@ annotations: catalog.cattle.io/namespace: kubeslice-controller catalog.cattle.io/release-name: kubeslice-controller apiVersion: v2 -appVersion: 1.1.1 +appVersion: 1.3.4 description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking tool for efficient, secure, policy-enforced connectivity and true multi-tenancy capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure @@ -39,4 +39,4 @@ maintainers: name: Avesha name: kubeslice-controller type: application -version: 1.1.1 +version: 1.3.4 diff --git a/charts/avesha/kubeslice-controller/Readme.MD b/charts/avesha/kubeslice-controller/Readme.MD index 64f26bef4..ba485a3fb 100644 --- a/charts/avesha/kubeslice-controller/Readme.MD +++ b/charts/avesha/kubeslice-controller/Readme.MD @@ -1,13 +1,13 @@ # Kubeslice Enterprise Controller Helm Charts ## Prerequisites -๐Ÿ“– Follow the overview and registration [documentation](https://docs.avesha.io/documentation/enterprise/1.1.1/deployment-partners/deploying-kubeslice-on-rancher/) +๐Ÿ“– Follow the overview and registration [documentation](https://docs.avesha.io/documentation/enterprise/1.3.0/get-started/prerequisites/prerequisites-kubeslice-registration). -- Create and configure the controller cluster following instructions in the prerequisites section [documentation](https://docs.avesha.io/documentation/enterprise/1.1.1/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher) +- Create and configure the controller cluster following instructions in the prerequisites [documentation](https://docs.avesha.io/documentation/enterprise/1.3.0/get-started/prerequisites/prerequisites-rancher-deployments). - Copy the chart version from the upper right hand section of this page [VERSION parameter need during install and upgrade] - Click on the download chart link from the upper right hand section of this page, save it to location available from command prompt - Untar the chart to get the values.yaml file, update values.yaml with the follwing information - - cluster end point [documentation](https://docs.avesha.io/documentation/enterprise/1.1.1/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher#getting-the-controller-cluster-endpoint) + - cluster end point [documentation](https://docs.avesha.io/documentation/enterprise/1.3.0/get-started/prerequisites/prerequisites-rancher-deployments#get-the-controller-cluster-endpoint) - helm repository username, password and email [From registration] @@ -19,20 +19,20 @@ Add the repo as follows: helm repo add kubeslice-rancher https://kubeslice.github.io/rancher-avesha-charts/ ``` -### Install KubeSlice Controller +### Install the KubeSlice Controller ```console export KUBECONFIG= helm install --namespace=kubeslice-controller --create-namespace kubeslice-controller kubeslice-rancher/kubeslice-controller --timeout=10m0s --values= --version= --wait=true ``` -### Upgrading KubeSlice Controller +### Upgrading the KubeSlice Controller ```console helm upgrade --history-max=5 --namespace=kubeslice-controller kubeslice-controller kubeslice-rancher/kubeslice-controller --timeout=10m0s --values= --version= --wait=true ``` -### Uninstall KubeSlice Controller -- Follow instructions [documentation](https://docs.avesha.io/documentation/enterprise/1.1.1/getting-started-with-cloud-clusters/uninstalling-kubeslice/uninstalling-the-kubeslice-controller/) +### Uninstall the KubeSlice Controller +- Follow instructions [documentation](https://docs.avesha.io/documentation/enterprise/1.3.0/uninstall-kubeslice/) ```console export KUBECONFIG= diff --git a/charts/avesha/kubeslice-controller/questions.yml b/charts/avesha/kubeslice-controller/questions.yml index d53a79841..e3bfc0635 100644 --- a/charts/avesha/kubeslice-controller/questions.yml +++ b/charts/avesha/kubeslice-controller/questions.yml @@ -2,7 +2,7 @@ questions: - default: "" - description: "https://docs.avesha.io/documentation/enterprise/1.1.1/deployment-partners/deploying-kubeslice-on-rancher/#registering-to-access-the-enterprise-helm-chart" + description: "https://docs.avesha.io/documentation/enterprise/1.3.0/get-started/prerequisites/prerequisites-kubeslice-registration/" group: "Global Settings" label: "Registered Username" required: true @@ -18,7 +18,7 @@ questions: variable: imagePullSecrets.password - default: "" - description: "https://docs.avesha.io/documentation/enterprise/1.1.1/deployment-partners/deploying-kubeslice-on-rancher/installing-the-kubeslice-controller-on-rancher/#getting-the-controller-cluster-endpoint" + description: "https://docs.avesha.io/documentation/enterprise/1.3.0/get-started/prerequisites/prerequisites-rancher-deployments/" group: "Controller Settings" label: "Controller Endpoint" required: true @@ -50,7 +50,7 @@ questions: variable: kubeslice.uiproxy.service.type - default: "" - description: "https://docs.avesha.io/documentation/enterprise/1.1.1/reference/configuration-parameters/#license-parameters" + description: "https://docs.avesha.io/documentation/enterprise/1.3.0/license/trial-license" group: "Controller Settings" label: "Customer Name for generating Trial License" required: false diff --git a/charts/avesha/kubeslice-controller/templates/_helpers.tpl b/charts/avesha/kubeslice-controller/templates/_helpers.tpl index 6e2be538c..7ca4d66e0 100644 --- a/charts/avesha/kubeslice-controller/templates/_helpers.tpl +++ b/charts/avesha/kubeslice-controller/templates/_helpers.tpl @@ -126,4 +126,133 @@ Create the name of the service account to use {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} +{{- end }} +*************************kubeslice-ui********************************* + +{{/* +Expand the name of the chart. +*/}} +{{- define "kubeslice-ui.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kubeslice-ui.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kubeslice-ui.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kubeslice-ui.labels" -}} +helm.sh/chart: {{ include "kubeslice-ui.chart" . }} +{{ include "kubeslice-ui.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kubeslice-ui.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kubeslice-ui.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kubeslice-ui.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "kubeslice-ui.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +*************************KUBERNETES-DASHBOARD********************************* + +{{/* +Expand the name of the chart. +*/}} +{{- define "kubernetes-dashboard.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kubernetes-dashboard.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kubernetes-dashboard.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kubernetes-dashboard.labels" -}} +helm.sh/chart: {{ include "kubernetes-dashboard.chart" . }} +{{ include "kubernetes-dashboard.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kubernetes-dashboard.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kubernetes-dashboard.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kubernetes-dashboard.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "kubernetes-dashboard.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} {{- end }} \ No newline at end of file diff --git a/charts/avesha/kubeslice-controller/templates/admission-webhook.yaml b/charts/avesha/kubeslice-controller/templates/admission-webhook.yaml index a27b8c1de..a640122b6 100644 --- a/charts/avesha/kubeslice-controller/templates/admission-webhook.yaml +++ b/charts/avesha/kubeslice-controller/templates/admission-webhook.yaml @@ -11,7 +11,6 @@ metadata: namespace: {{ .Release.Namespace }} type: Opaque data: - ca.crt: {{ $ca.Cert | b64enc }} tls.key: {{ $cert.Key | b64enc }} tls.crt: {{ $cert.Cert | b64enc }} @@ -261,6 +260,28 @@ webhooks: resources: - workerslicegateways sideEffects: None + - admissionReviewVersions: + - v1 + clientConfig: + caBundle: {{ $ca.Cert | b64enc }} + service: + name: kubeslice-controller-webhook-service + namespace: kubeslice-controller + path: /validate-controller-kubeslice-io-v1alpha1-vpnkeyrotation + failurePolicy: Fail + name: vvpnkeyrotation.kb.io + rules: + - apiGroups: + - controller.kubeslice.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - vpnkeyrotations + sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration @@ -455,4 +476,4 @@ webhooks: - UPDATE resources: - workerslicegateways - sideEffects: None + sideEffects: None \ No newline at end of file diff --git a/charts/avesha/kubeslice-controller/templates/controller-deployment.yaml b/charts/avesha/kubeslice-controller/templates/controller-deployment.yaml index baafb518f..758e3fc2a 100644 --- a/charts/avesha/kubeslice-controller/templates/controller-deployment.yaml +++ b/charts/avesha/kubeslice-controller/templates/controller-deployment.yaml @@ -169,6 +169,7 @@ data: - WorkerSliceResourceQuotaDeletedForcefully - WorkerSliceResourceQuotaRecreationFailed - WorkerSliceResourceQuotaRecreated + - WorkerSliceResourceQuotaListFailed - OffBoardedNamespaceUtilizationMetricsReset - ResourceQuotaMetricsPopulated - ClusterCPULimitViolated @@ -198,90 +199,126 @@ data: - WorkerSliceResourceQuotaUpdated - WorkerSliceResourceQuotaDeletionFailed - WorkerSliceResourceQuotaDeleted - - DetachClusterInititated + - DetachClusterInitiated - DetachClusterSucceeded - DetachClusterFailed - - OffboardNamesapceInitiated - - OffboardNamesapceSucceeded - - OffboardNamesapceFailed - - InactiveServiceAccountDeletionFailed - - WorkerSliceGatewayCreated - - ServiceExportConfigDeletionFailed - - ReadWriteRoleCreated - - DefaultRoleBindingCreated - - DefaultRoleBindingDeleted - - WorkerSliceGatewayRecreated + - OffboardNamespaceInitiated + - OffboardNamespaceSucceeded + - OffboardNamespaceFailed + - SliceDeleteInitiated + - SliceDeleteFailed + - SlicePropagationInitiated + - SlicePropagationSucceeded + - SlicePropagationTimedOut + - LicenseSecretNotFound + - MachineFileNotFound + - MachineFileInvalid + - LicenseKeyInvalid + - LicenseExpired + - LicenseExpiredGracePeriodOn + - MachineFingerPrintErr + - GotMachineFingerPrint + - ConfigMapErr + - GotConfigMap + - LicenseProxyUnreachable + - ClusterMetadataCollectionFailed + - ClusterMetadataCollectionSuccess + - LicenseDataFetchError + - LicenseDataFetchSuccess + - LicenseSecretCreationFailed + - LicenseSecretCreationSuccess + - LicenseVCPUViolated + - LicenseVCPUWarning + - UnableToDetectEnterpriseLicense + - StaleLicensevCPUData + - StaleLicensevCPUDataSince72Hours - ClusterDeregistered - - ReadOnlyRoleCreationFailed - - ReadOnlyRoleUpdated - - WorkerClusterRoleCreationFailed - - DefaultRoleBindingCreationFailed - - DefaultRoleBindingUpdated - - InactiveRoleBindingDeleted - - ServiceAccountDeleted - - ProjectDeletionFailed - - ClusterDeletionFailed - - WorkerClusterRoleCreated - - WorkerServiceImportRecreationFailed - - WorkerSliceConfigCreationFailed - - SliceGatewayJobCreated + - SliceConfigDeletionFailed + - ServiceExportConfigDeleted + - SecretDeletionFailed + - ServiceAccountSecretCreationFailed - WorkerServiceImportUpdateFailed - - ServiceAccountCreationFailed - - InactiveServiceAccountDeleted - - WorkerServiceImportRecreated - - ServiceAccountDeletionFailed - - NamespaceCreated - - ServiceAccountSecretCreated - - DefaultRoleBindingUpdateFailed + - ProjectDeleted + - ClusterDeletionFailed + - WorkerSliceGatewayDeletedForcefully + - NamespaceCreationFailed + - ReadOnlyRoleCreationFailed + - ReadWriteRoleUpdateFailed - WorkerServiceImportDeletedForcefully + - WorkerSliceConfigCreated + - WorkerSliceConfigDeleted + - DefaultSliceQoSConfigCreated + - SecretDeleted + - ReadWriteRoleUpdated + - DefaultRoleBindingUpdateFailed + - CertificatesRenewNow + - ClusterInstallationInProgress + - ReadWriteRoleCreated + - ServiceAccountDeleted + - WorkerServiceImportCreationFailed + - WorkerSliceConfigRecreationFailed + - CertificateJobCreationFailed + - ClusterInstallationPending + - DefaultRoleBindingDeleted + - DefaultRoleBindingDeletionFailed + - InactiveServiceAccountDeletionFailed - WorkerServiceImportCreated - - SliceQoSConfigDeleted + - WorkerSliceGatewayDeleted + - WorkerSliceGatewayCreationFailed + - ReadOnlyRoleUpdated + - ReadOnlyRoleUpdateFailed + - WorkerSliceGatewayCreated + - ClusterDeregisterFailed + - WorkerSliceConfigDeletedForcefully + - WorkerServiceImportRecreated + - WorkerServiceImportDeleted + - VPNKeyRotationConfigCreationFailed + - NamespaceDeletionFailed + - InactiveServiceAccountDeleted + - WorkerClusterRoleCreationFailed + - WorkerSliceGatewayRecreationFailed + - NamespaceDeleted + - WorkerServiceImportDeletionFailed + - WorkerClusterRoleUpdateFailed - ReadWriteRoleCreationFailed - - InactiveRoleBindingDeletionFailed - - WorkerClusterRoleUpdated + - ServiceAccountCreated + - ServiceAccountCreationFailed + - DefaultRoleBindingUpdated + - WorkerSliceConfigRecreated + - SliceQoSConfigDeletionFailed + - NamespaceCreated - WorkerSliceConfigUpdateFailed - WorkerSliceGatewayDeletionFailed - - ClusterDeleted - - ServiceExportConfigDeleted - - SecretDeleted - - ReadOnlyRoleUpdateFailed - - WorkerServiceImportCreationFailed - - WorkerSliceGatewayCreationFailed - - SliceConfigDeletionFailed - - WorkerSliceConfigDeletedForcefully - - WorkerSliceConfigDeletionFailed - - WorkerSliceGatewayDeleted - - NamespaceDeleted - - WorkerClusterRoleUpdateFailed - - WorkerServiceImportDeletionFailed - - ClusterInstallationFailed - - WorkerSliceConfigUpdated - - ClusterInstallationInProgress - - ClusterDeregistrationInProgress - - WorkerServiceImportDeleted - - SliceConfigDeleted - - SliceQoSConfigDeletionFailed - - NamespaceDeletionFailed - - WorkerSliceConfigRecreated + - InactiveRoleBindingDeleted - SliceGatewayJobCreationFailed - - ClusterDeregisterFailed - - SecretDeletionFailed - - ReadWriteRoleUpdateFailed - - WorkerSliceConfigRecreationFailed - - ClusterInstallationPending - - NamespaceCreationFailed - - WorkerServiceImportUpdated - - ReadWriteRoleUpdated - - ServiceAccountCreated - - ServiceAccountSecretCreationFailed - - DefaultRoleBindingDeletionFailed - - WorkerSliceConfigCreated - - ProjectDeleted - - ClusterDeregisterTimeout - ReadOnlyRoleCreated - - WorkerSliceConfigDeleted - - WorkerSliceGatewayDeletedForcefully - - WorkerSliceGatewayRecreationFailed + - DefaultRoleBindingCreationFailed + - WorkerClusterRoleCreated + - ClusterInstallationFailed + - ClusterDeregistrationInProgress + - IllegalVPNKeyRotationConfigDelete + - WorkerSliceConfigDeletionFailed + - SliceGatewayJobCreated + - ServiceAccountSecretCreated + - WorkerSliceConfigUpdated + - WorkerSliceGatewayRecreated + - VPNKeyRotationConfigCreated + - ClusterDeleted + - SliceConfigDeleted + - DefaultRoleBindingCreated + - ServiceAccountDeletionFailed + - WorkerServiceImportUpdated + - VPNKeyRotationStart + - VPNKeyRotationConfigUpdated + - ProjectDeletionFailed + - ServiceExportConfigDeletionFailed + - WorkerClusterRoleUpdated + - InactiveRoleBindingDeletionFailed + - WorkerServiceImportRecreationFailed + - WorkerSliceConfigCreationFailed + - CertificateJobFailed + - ClusterDeregisterTimeout + - SliceQoSConfigDeleted {{ else }} controller.yaml: |- disabledEvents: diff --git a/charts/avesha/kubeslice-controller/templates/kubeslice-api-gw.yaml b/charts/avesha/kubeslice-controller/templates/kubeslice-api-gw.yaml index 50cec16ac..13343dcd9 100644 --- a/charts/avesha/kubeslice-controller/templates/kubeslice-api-gw.yaml +++ b/charts/avesha/kubeslice-controller/templates/kubeslice-api-gw.yaml @@ -83,4 +83,4 @@ data: repository: # [Optional] Required for for private docker repo username: # [Optional] Required for for private docker repo password: # [Optional]Required for for private docker repo - email: # [Optional] Required for for private docker repo \ No newline at end of file + email: # [Optional] Required for for private docker repo diff --git a/charts/avesha/kubeslice-controller/templates/kubeslice-controller.yaml b/charts/avesha/kubeslice-controller/templates/kubeslice-controller.yaml index fae615da5..6faa40f3f 100644 --- a/charts/avesha/kubeslice-controller/templates/kubeslice-controller.yaml +++ b/charts/avesha/kubeslice-controller/templates/kubeslice-controller.yaml @@ -187,6 +187,20 @@ spec: secretName: description: SecretName is the name of the secret for the worker cluster. type: string + vCPURestriction: + description: VCPURestriction is the restriction on the cluster disabling + the creation of new pods + properties: + enforceRestrictions: + description: EnforceRestrictions is the flag to check if the cluster + is restricted + type: boolean + lastUpdatedTimestamp: + description: LastUpdatedTimestamp is the timestamp when the enforcement + was updated + format: date-time + type: string + type: object type: object type: object served: true @@ -370,8 +384,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.11.1 name: sliceconfigs.controller.kubeslice.io spec: group: controller.kubeslice.io @@ -388,10 +401,14 @@ spec: description: SliceConfig is the Schema for the sliceconfig API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -404,7 +421,8 @@ spec: type: array externalGatewayConfig: items: - description: ExternalGatewayConfig is the configuration for external gateways like 'istio', etc/ + description: ExternalGatewayConfig is the configuration for external + gateways like 'istio', etc/ properties: clusters: items: @@ -505,8 +523,18 @@ spec: - queueType - tcType type: object + renewBefore: + description: RenewBefore is used for renew now! + format: date-time + type: string + rotationInterval: + default: 30 + maximum: 90 + minimum: 30 + type: integer sliceGatewayProvider: - description: WorkerSliceGatewayProvider defines the configuration for slicegateway + description: WorkerSliceGatewayProvider defines the configuration + for slicegateway properties: sliceCaType: default: Local @@ -528,6 +556,19 @@ spec: type: string standardQosProfileName: type: string + vpnConfig: + description: VPNConfiguration defines the additional (optional) VPN + Configuration to customise + properties: + cipher: + default: AES-256-CBC + enum: + - AES-256-CBC + - AES-128-CBC + type: string + required: + - cipher + type: object required: - sliceGatewayProvider type: object @@ -579,12 +620,112 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: vpnkeyrotations.controller.kubeslice.io +spec: + group: controller.kubeslice.io + names: + kind: VpnKeyRotation + listKind: VpnKeyRotationList + plural: vpnkeyrotations + singular: vpnkeyrotation + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: VpnKeyRotation is the Schema for the vpnkeyrotations API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VpnKeyRotationSpec defines the desired state of VpnKeyRotation + properties: + certificateCreationTime: + description: CertificateCreationTime is a time when certificate for + all the gateway pairs is created/updated + format: date-time + type: string + certificateExpiryTime: + description: CertificateExpiryTime is a time when certificate for + all the gateway pairs will expire + format: date-time + type: string + clusterGatewayMapping: + additionalProperties: + items: + type: string + type: array + description: ClusterGatewayMapping represents a map where key is cluster + name and value is array of gateways present on that cluster. This + is used to avoid unnecessary reconciliation in worker-operator. + type: object + clusters: + description: clusters contains the list of clusters attached to this + slice + items: + type: string + type: array + rotationCount: + description: RotationCount represent the number of times rotation + has been already performed. + type: integer + rotationInterval: + type: integer + sliceName: + type: string + type: object + status: + description: VpnKeyRotationStatus defines the observed state of VpnKeyRotation + properties: + currentRotationState: + additionalProperties: + description: StatusOfKeyRotation represent per gateway status + properties: + lastUpdatedTimestamp: + format: date-time + type: string + status: + type: string + type: object + description: This is map of gateway name to the current rotation state + type: object + statusHistory: + additionalProperties: + items: + description: StatusOfKeyRotation represent per gateway status + properties: + lastUpdatedTimestamp: + format: date-time + type: string + status: + type: string + type: object + type: array + description: This is circular array of last n number of rotation status. + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -1793,7 +1934,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.11.1 creationTimestamp: null name: workerslicegwrecyclers.worker.kubeslice.io spec: @@ -1826,6 +1967,8 @@ spec: spec: description: WorkerSliceGwRecyclerSpec defines the desired state of WorkerSliceGwRecycler properties: + clientRedundancyNumber: + type: integer gwPair: properties: clientId: @@ -1835,6 +1978,8 @@ spec: type: object request: type: string + serverRedundancyNumber: + type: integer sliceGwClient: type: string sliceGwServer: @@ -1869,12 +2014,6 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -2790,6 +2929,7 @@ rules: - sliceresourcequotaconfigs - slicerolebindings - sliceroletemplates + - vpnkeyrotations verbs: - create - delete @@ -2810,6 +2950,7 @@ rules: - sliceresourcequotaconfigs/finalizers - slicerolebindings/finalizers - sliceroletemplates/finalizers + - vpnkeyrotations/finalizers verbs: - update - apiGroups: @@ -2824,6 +2965,7 @@ rules: - sliceresourcequotaconfigs/status - slicerolebindings/status - sliceroletemplates/status + - vpnkeyrotations/status verbs: - get - list @@ -3052,6 +3194,7 @@ rules: - update - patch - create + - delete - apiGroups: - worker.kubeslice.io resources: diff --git a/charts/avesha/kubeslice-controller/templates/license.yaml b/charts/avesha/kubeslice-controller/templates/license.yaml index d2909d54c..7e4cd11a5 100644 --- a/charts/avesha/kubeslice-controller/templates/license.yaml +++ b/charts/avesha/kubeslice-controller/templates/license.yaml @@ -26,6 +26,8 @@ data: apiURL: LZtbEDBzFinn2HBQgc89vK8h2chsdurscRqbcvgzstvJ2zUR7cXL0d21Ik73br6vfE8aqZrROC41Zbf1Zj485W7OXHI= apiKey: szl3olNL5Sn0GrS3jbuLxZjTMw7ja1tmRXiyQtZMyFJL8kgC3tTBNNWaLyK7utqN63bStzvpgXM= publicKey: OSITIrMziTso5NF-JW7t1y1HSLs0t0CwQTEIR4SKgNOIIxbP-ZlKrkD7fDq-8XG4uw-R7KkmqLKaxUFGqAAL8KI6IBnFiO968PTTTXyrCqk= + maxSamples: Y9hz2m-_gUS3hMFpDTmhSO5A + sampleInterval: 6jEhxl11PVq7GnELEgjUYtq2 binaryData: {} diff --git a/charts/avesha/kubeslice-controller/templates/openshift.yaml b/charts/avesha/kubeslice-controller/templates/openshift.yaml new file mode 100644 index 000000000..516d016da --- /dev/null +++ b/charts/avesha/kubeslice-controller/templates/openshift.yaml @@ -0,0 +1,38 @@ +{{ if .Values.global.profile.openshift}} + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: "{{ .Release.Name }}-anyuid-scc" + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-15" +subjects: + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:serviceaccounts:kubeslice-controller + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:serviceaccounts:kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:openshift:scc:anyuid +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: "{{ .Release.Name }}-privileged-scc" + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-15" +subjects: + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:serviceaccounts:kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:openshift:scc:privileged + +{{end}} \ No newline at end of file diff --git a/charts/avesha/kubeslice-controller/values.schema.json b/charts/avesha/kubeslice-controller/values.schema.json index 80cfebf6e..5671ee06e 100644 --- a/charts/avesha/kubeslice-controller/values.schema.json +++ b/charts/avesha/kubeslice-controller/values.schema.json @@ -1,5 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Values schema for Kubeslice Helm chart", "type": "object", "properties": { "kubeslice": { @@ -175,6 +176,18 @@ "password": {"type": [ "string", "null" ]}, "email": {"type": [ "string", "null" ]} } + },"global": { + "type": "object", + "properties":{ + "profile": { + "type": "object", + "properties": { + "openshift": { + "type": "boolean" + } + } + } + } } }, "required": ["kubeslice", "imagePullSecrets"] diff --git a/charts/avesha/kubeslice-controller/values.yaml b/charts/avesha/kubeslice-controller/values.yaml index 83ce5d665..d199fbae7 100644 --- a/charts/avesha/kubeslice-controller/values.yaml +++ b/charts/avesha/kubeslice-controller/values.yaml @@ -1,4 +1,11 @@ -# Kubeslice CONTROLLER settings +# Default values for k-native. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# if you're installing in openshift cluster make this variable true +global: + profile: + openshift: false kubeslice: rbacproxy: image: gcr.io/kubebuilder/kube-rbac-proxy @@ -9,11 +16,11 @@ kubeslice: projectnsPrefix: kubeslice endpoint: image: aveshasystems/kubeslice-controller-ent - tag: 1.1.1 + tag: 1.3.4 pullPolicy: IfNotPresent ovpnJob: image: aveshasystems/gateway-certs-generator - tag: 0.1.10 + tag: 0.2.0 prometheus: enabled: true url: http://kubeslice-controller-prometheus-service:9090 @@ -31,11 +38,11 @@ kubeslice: # Kubeslice UI settings ui: image: aveshasystems/kubeslice-ui-ent - tag: 1.1.1 + tag: 1.3.4 pullPolicy: IfNotPresent uiv2: image: aveshasystems/kubeslice-ui-v2-ent - tag: 1.1.1 + tag: 1.3.3 pullPolicy: IfNotPresent dashboard: image: aveshasystems/kubeslice-kubernetes-dashboard @@ -43,7 +50,7 @@ kubeslice: pullPolicy: IfNotPresent uiproxy: image: aveshasystems/kubeslice-ui-proxy - tag: 1.3.0 + tag: 1.4.0 pullPolicy: IfNotPresent service: ## For kind, set this to NodePort, elsewhere use LoadBalancer or NodePort @@ -54,12 +61,12 @@ kubeslice: # nodePort: apigw: image: aveshasystems/kubeslice-api-gw-ent - tag: 1.9.0 + tag: 1.10.1 pullPolicy: IfNotPresent workerinstaller: image: aveshasystems/worker-installer - tag: 1.1.9 + tag: 1.1.10 pullPolicy: Always # username & password & email values for imagePullSecrets has to provided to create a secret imagePullSecrets: diff --git a/charts/avesha/kubeslice-worker/Chart.yaml b/charts/avesha/kubeslice-worker/Chart.yaml index d9bfa9313..0ada01888 100644 --- a/charts/avesha/kubeslice-worker/Chart.yaml +++ b/charts/avesha/kubeslice-worker/Chart.yaml @@ -5,7 +5,7 @@ annotations: catalog.cattle.io/namespace: kubeslice-system catalog.cattle.io/release-name: kubeslice-worker apiVersion: v2 -appVersion: 1.1.1 +appVersion: 1.3.4 description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking tool for efficient, secure, policy-enforced connectivity and true multi-tenancy capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure @@ -39,4 +39,4 @@ maintainers: name: Avesha name: kubeslice-worker type: application -version: 1.1.1 +version: 1.3.4 diff --git a/charts/avesha/kubeslice-worker/Readme.MD b/charts/avesha/kubeslice-worker/Readme.MD index 30d6189af..09d668823 100644 --- a/charts/avesha/kubeslice-worker/Readme.MD +++ b/charts/avesha/kubeslice-worker/Readme.MD @@ -2,13 +2,13 @@ ## Prerequisites - KubeSlice Controller needs to be installed -- Create and configure the worker cluster following instructions in prerequisites and "registering the worker cluster" sections [documentation](https://docs.avesha.io/documentation/enterprise/1.1.1/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher) -- Copy the chart version from the upper right hand section of this page [VERSION parameter need during install and upgrade] -- Click on the download link from the upper right hand section of this page, save it to location available from command prompt +- Create and configure the worker cluster following instructions in the prerequisites and "registering the worker cluster" sections [documentation](https://docs.avesha.io/documentation/enterprise/1.3.0/install-kubeslice/rancher/rancher-install-kubeslice#kubeslice-worker-clusters) +- Copy the chart version from the upper right hand section of this page [VERSION parameter needed during install and upgrade] +- Click on the download link from the upper right hand section of this page, and save it to the location available from the command prompt - Untar the chart to get the values.yaml file and edit the following fields - controllerSecret: namespace, endpoint, ca.crt, token - cluster: name, nodeIp, endpoint - - imagePullSecrets: username, passowrd and email [use the same info from KubeSlice Controller install] + - imagePullSecrets: username, password, and email [use the same info from KubeSlice Controller install] # Installation @@ -34,7 +34,7 @@ helm upgrade --history-max=5 --namespace=kubeslice-system kubeslice-worker kubes ``` ### Uninstall Kubeslice Worker -- Follow instructions [documentation](https://docs.avesha.io/documentation/enterprise/1.1.1/getting-started-with-cloud-clusters/uninstalling-kubeslice/deregistering-the-worker-cluster) +- Refer to the [documentation](https://docs.avesha.io/documentation/enterprise/1.3.0/uninstall-kubeslice/) for instructions. ```console export KUBECONFIG= diff --git a/charts/avesha/kubeslice-worker/charts/nsm/Chart.yaml b/charts/avesha/kubeslice-worker/charts/nsm/Chart.yaml index 1bd4e5100..63348f477 100644 --- a/charts/avesha/kubeslice-worker/charts/nsm/Chart.yaml +++ b/charts/avesha/kubeslice-worker/charts/nsm/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: 0.6.3 +appVersion: 0.6.4 description: Basic Network Service Mesh Infrastructure name: nsm -version: 0.6.3 +version: 0.6.4 diff --git a/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/Chart.yaml b/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/Chart.yaml index bb201a2e7..e2f5b9a5d 100644 --- a/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/Chart.yaml +++ b/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: 0.3.1 +appVersion: 0.3.2 description: A Helm chart for Kubernetes name: admission-webhook -version: 0.3.1 +version: 0.3.2 diff --git a/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/templates/admission-webhook.yaml b/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/templates/admission-webhook.yaml index 70432eaa6..6809cd3da 100644 --- a/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/templates/admission-webhook.yaml +++ b/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/templates/admission-webhook.yaml @@ -48,3 +48,5 @@ spec: value: spiffe.io/spiffe-id:true - name: NSM_ENVS value: NSM_CONNECT_TO=tcp://nsmgr.kubeslice-system.svc.cluster.local:5001,NSM_LOG_LEVEL=TRACE,NSM_LIVENESSCHECKENABLED=false + - name: PROFILE_OPENSHIFT + value: "{{ .Values.global.profile.openshift | default false }}" diff --git a/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/values.yaml b/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/values.yaml index 64a76bc0a..b21d4d73c 100644 --- a/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/values.yaml +++ b/charts/avesha/kubeslice-worker/charts/nsm/charts/admission-webhook/values.yaml @@ -4,10 +4,10 @@ # Declare variables to be passed into your templates. webhookImageRegistry: docker.io/aveshasystems/cmd-admission-webhook-k8s -webhookImageTag: 1.6.2 +webhookImageTag: 1.6.4 nsmInjectContainerImageRegistry: docker.io/aveshasystems/cmd-nsc -nsmInjectContainerImageTag: 1.5.6 +nsmInjectContainerImageTag: 1.5.7 nsmInjectInitContainerImageRegistry: docker.io/aveshasystems/cmd-nsc-init nsmInjectInitContainerImageTag: 1.5.5 diff --git a/charts/avesha/kubeslice-worker/charts/nsm/charts/spire/charts/spiffe-csi-driver/templates/daemonset.yaml b/charts/avesha/kubeslice-worker/charts/nsm/charts/spire/charts/spiffe-csi-driver/templates/daemonset.yaml index bd7f7202b..d176b17fd 100644 --- a/charts/avesha/kubeslice-worker/charts/nsm/charts/spire/charts/spiffe-csi-driver/templates/daemonset.yaml +++ b/charts/avesha/kubeslice-worker/charts/nsm/charts/spire/charts/spiffe-csi-driver/templates/daemonset.yaml @@ -55,6 +55,12 @@ spec: - mountPath: {{ .Values.kubeletPath }}/pods mountPropagation: Bidirectional name: mountpoint-dir + {{- if .Values.global.profile.openshift }} + # The volume containing mount points for containers in openshift clusters + - name: openshift-mountpoint-dir + mountPath: /var/data/kubelet/pods + mountPropagation: Bidirectional + {{- end }} securityContext: readOnlyRootFilesystem: true capabilities: @@ -93,6 +99,13 @@ spec: resources: {{- toYaml .Values.nodeDriverRegistrar.resources | nindent 12 }} volumes: + {{- if .Values.global.profile.openshift }} + # where SPIFFE CSI driver mounts volume in openshift clusters + - name: openshift-mountpoint-dir + hostPath: + path: /var/data/kubelet/pods + type: Directory + {{- end }} - name: spire-agent-socket-dir hostPath: path: {{ include "spiffe-csi-driver.agent-socket-path" . | dir }} diff --git a/charts/avesha/kubeslice-worker/charts/nsm/templates/nsmgr.yaml b/charts/avesha/kubeslice-worker/charts/nsm/templates/nsmgr.yaml index ed5a5228b..3e0a2d101 100644 --- a/charts/avesha/kubeslice-worker/charts/nsm/templates/nsmgr.yaml +++ b/charts/avesha/kubeslice-worker/charts/nsm/templates/nsmgr.yaml @@ -85,6 +85,10 @@ spec: containers: - image: {{ .Values.nsmgr.imageRegistry }}:{{ .Values.nsmgr.imageTag }} imagePullPolicy: IfNotPresent + {{- if .Values.global.profile.openshift }} + securityContext: + privileged: true + {{- end }} name: nsmgr ports: - containerPort: 5001 @@ -163,6 +167,10 @@ spec: periodSeconds: 5 - image: {{ .Values.nsmgr.excludePrefixesImageRegistry }}:{{ .Values.nsmgr.excludePrefixesImageTag }} imagePullPolicy: IfNotPresent + {{- if .Values.global.profile.openshift }} + securityContext: + privileged: true + {{- end }} name: exclude-prefixes env: - name: NSM_LOG_LEVEL diff --git a/charts/avesha/kubeslice-worker/charts/nsm/templates/registry-k8s.yaml b/charts/avesha/kubeslice-worker/charts/nsm/templates/registry-k8s.yaml index 5f619b25f..2f8f03f76 100644 --- a/charts/avesha/kubeslice-worker/charts/nsm/templates/registry-k8s.yaml +++ b/charts/avesha/kubeslice-worker/charts/nsm/templates/registry-k8s.yaml @@ -33,6 +33,10 @@ spec: - name: REGISTRY_K8S_PROXY_REGISTRY_URL value: nsmgr-proxy:5004 imagePullPolicy: IfNotPresent + {{- if .Values.global.profile.openshift }} + securityContext: + privileged: true + {{- end }} name: registry ports: - containerPort: 5002 diff --git a/charts/avesha/kubeslice-worker/charts/nsm/values.yaml b/charts/avesha/kubeslice-worker/charts/nsm/values.yaml index e27ac2d24..972b10859 100644 --- a/charts/avesha/kubeslice-worker/charts/nsm/values.yaml +++ b/charts/avesha/kubeslice-worker/charts/nsm/values.yaml @@ -10,7 +10,7 @@ global: forwardingPlane: kernelImageRegistry: docker.io/aveshasystems/cmd-forwarder-kernel - kernelImageTag: 1.0.2 + kernelImageTag: 1.0.3 nsmgr: imageRegistry: docker.io/aveshasystems/cmd-nsmgr diff --git a/charts/avesha/kubeslice-worker/crds/networking.kubeslice.io_slicegateways.yaml b/charts/avesha/kubeslice-worker/crds/networking.kubeslice.io_slicegateways.yaml index 691059a69..0086c7b35 100644 --- a/charts/avesha/kubeslice-worker/crds/networking.kubeslice.io_slicegateways.yaml +++ b/charts/avesha/kubeslice-worker/crds/networking.kubeslice.io_slicegateways.yaml @@ -75,6 +75,11 @@ spec: sliceGatewayLocalVpnIp: description: Local VPN IP type: string + sliceGatewayIntermediateDeployments: + description: Intermediate Slice Gw Deployments + items: + type: string + type: array sliceGatewayName: description: Slice Gateway Name type: string diff --git a/charts/avesha/kubeslice-worker/questions.yaml b/charts/avesha/kubeslice-worker/questions.yaml index 857211bd9..8cf11a17b 100644 --- a/charts/avesha/kubeslice-worker/questions.yaml +++ b/charts/avesha/kubeslice-worker/questions.yaml @@ -17,7 +17,7 @@ questions: variable: imagePullSecrets.password - default: "" - description: "https://docs.avesha.io/documentation/enterprise/1.1.1/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster" + description: "https://docs.avesha.io/documentation/enterprise/1.3.0/install-kubeslice/rancher/rancher-install-kubeslice/#kubeslice-worker-clusters" group: "Worker Secrets from Controller" label: "Controller Namespace" required: true @@ -25,7 +25,7 @@ questions: variable: controllerSecret.namespace - default: "" - description: "https://docs.avesha.io/documentation/enterprise/1.1.1/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster" + description: "https://docs.avesha.io/documentation/enterprise/1.3.0/install-kubeslice/rancher/rancher-install-kubeslice/#kubeslice-worker-clusters" group: "Worker Secrets from Controller" label: "Controller Endpoint" required: true @@ -33,7 +33,7 @@ questions: variable: controllerSecret.endpoint - default: "" - description: "https://docs.avesha.io/documentation/enterprise/1.1.1/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster" + description: "https://docs.avesha.io/documentation/enterprise/1.3.0/install-kubeslice/rancher/rancher-install-kubeslice/#kubeslice-worker-clusters" group: "Worker Secrets from Controller" label: "Controller CA Cert" required: true @@ -41,7 +41,7 @@ questions: variable: controllerSecret.'ca.crt' - default: "" - description: "https://docs.avesha.io/documentation/enterprise/1.1.1/deployment-partners/deploying-kubeslice-on-rancher/installing-the-worker-operator-on-rancher#getting-the-secrets-of-the-registered-cluster" + description: "https://docs.avesha.io/documentation/enterprise/1.3.0/install-kubeslice/rancher/rancher-install-kubeslice/#kubeslice-worker-clusters" group: "Worker Secrets from Controller" label: "Controller Token" required: true @@ -57,7 +57,7 @@ questions: variable: cluster.name - default: "" - description: "Worker Cluster Endpoint,use 'kubectl cluster-info on worker cluster' or for details please follow https://docs.avesha.io/documentation/enterprise/1.1.1/" + description: "Worker Cluster Endpoint,use 'kubectl cluster-info on worker cluster' or for details please follow https://docs.avesha.io/documentation/enterprise/1.3.0/" group: "Worker Cluster Details" label: "Cluster Endpoint" required: true diff --git a/charts/avesha/kubeslice-worker/templates/openshift.yaml b/charts/avesha/kubeslice-worker/templates/openshift.yaml new file mode 100644 index 000000000..2a3683c5e --- /dev/null +++ b/charts/avesha/kubeslice-worker/templates/openshift.yaml @@ -0,0 +1,41 @@ +{{ if .Values.global.profile.openshift}} + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: "{{ .Release.Name }}-anyuid-scc" + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-15" +subjects: + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:serviceaccounts:spire + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:serviceaccounts:kubeslice-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:openshift:scc:anyuid +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: "{{ .Release.Name }}-privileged-scc" + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-15" +subjects: + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:serviceaccounts:spire + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:serviceaccounts:kubeslice-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:openshift:scc:privileged + +{{end}} \ No newline at end of file diff --git a/charts/avesha/kubeslice-worker/templates/preinstall-rbac.yaml b/charts/avesha/kubeslice-worker/templates/preinstall-rbac.yaml index 65a05e930..8c22a959f 100644 --- a/charts/avesha/kubeslice-worker/templates/preinstall-rbac.yaml +++ b/charts/avesha/kubeslice-worker/templates/preinstall-rbac.yaml @@ -76,7 +76,6 @@ rules: - get - create - update - - patch - apiGroups: - apps resources: diff --git a/charts/avesha/kubeslice-worker/templates/upgrade-crds.yaml b/charts/avesha/kubeslice-worker/templates/upgrade-crds.yaml index 878b25759..e9c412e05 100644 --- a/charts/avesha/kubeslice-worker/templates/upgrade-crds.yaml +++ b/charts/avesha/kubeslice-worker/templates/upgrade-crds.yaml @@ -1050,6 +1050,11 @@ data: sliceGatewayId: description: UUID of the slice gateway. type: string + sliceGatewayIntermediateDeployments: + description: Intermediate Slice Gw Deployments + items: + type: string + type: array sliceGatewayLocalVpnIp: description: Local VPN IP type: string diff --git a/charts/avesha/kubeslice-worker/templates/webhook.yaml b/charts/avesha/kubeslice-worker/templates/webhook.yaml index f51a31d99..5084aa572 100644 --- a/charts/avesha/kubeslice-worker/templates/webhook.yaml +++ b/charts/avesha/kubeslice-worker/templates/webhook.yaml @@ -74,3 +74,50 @@ webhooks: - spire - {{ .Release.Namespace | quote }} - {{ .Values.controllerNamespace | quote }} + +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: kubeslice-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: {{ $ca.Cert | b64enc }} + service: + name: kubeslice-webhook-service + namespace: {{ .Release.Namespace }} + path: /validating-webhook + failurePolicy: Fail + name: vwebhook.kubeslice.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods + sideEffects: NoneOnDryRun + namespaceSelector: + matchExpressions: + - key: kubeslice.io/slice + operator: Exists + - key: name + operator: NotIn + values: + - kube-system + - spire + - kubeslice-system + - kubeslice-controller + - key: kubernetes.io/metadata.name + operator: NotIn + values: + - kube-system + - spire + - kubeslice-system + - kubeslice-controller diff --git a/charts/avesha/kubeslice-worker/values.schema.json b/charts/avesha/kubeslice-worker/values.schema.json new file mode 100644 index 000000000..b9947b831 --- /dev/null +++ b/charts/avesha/kubeslice-worker/values.schema.json @@ -0,0 +1,20 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Values schema for Kubeslice Helm chart", + "type": "object", + "properties": { + "global": { + "type": "object", + "properties":{ + "profile": { + "type": "object", + "properties": { + "openshift": { + "type": "boolean" + } + } + } + } + } + } +} diff --git a/charts/avesha/kubeslice-worker/values.yaml b/charts/avesha/kubeslice-worker/values.yaml index 4e12e9473..638c8ef60 100644 --- a/charts/avesha/kubeslice-worker/values.yaml +++ b/charts/avesha/kubeslice-worker/values.yaml @@ -1,6 +1,6 @@ operator: image: docker.io/aveshasystems/worker-operator-ent - tag: 1.1.1 + tag: 1.3.4 pullPolicy: IfNotPresent logLevel: INFO @@ -23,7 +23,7 @@ router: routerSidecar: image: docker.io/aveshasystems/kubeslice-router-sidecar - tag: 1.4.2 + tag: 1.4.3 pullPolicy: IfNotPresent netop: @@ -72,3 +72,8 @@ imagePullSecrets: email: controllerNamespace: kubeslice-controller + +# if you're installing in openshift cluster make this variable true +global: + profile: + openshift: false diff --git a/charts/bitnami/kafka/Chart.yaml b/charts/bitnami/kafka/Chart.yaml index 6931bbfe1..46fa30dae 100644 --- a/charts/bitnami/kafka/Chart.yaml +++ b/charts/bitnami/kafka/Chart.yaml @@ -45,4 +45,4 @@ maintainers: name: kafka sources: - https://github.com/bitnami/charts/tree/main/bitnami/kafka -version: 26.3.2 +version: 26.4.0 diff --git a/charts/bitnami/kafka/README.md b/charts/bitnami/kafka/README.md index dabaf8c4c..fd40da94e 100644 --- a/charts/bitnami/kafka/README.md +++ b/charts/bitnami/kafka/README.md @@ -226,6 +226,8 @@ The command removes all the Kubernetes components associated with the chart and | `controller.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | | `controller.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | | `controller.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `controller.initContainerResources.limits` | The resources limits for the init container | `{}` | +| `controller.initContainerResources.requests` | The requested resources for the init container | `{}` | | `controller.resources.limits` | The resources limits for the container | `{}` | | `controller.resources.requests` | The requested resources for the container | `{}` | | `controller.podSecurityContext.enabled` | Enable security context for the pods | `true` | @@ -325,6 +327,8 @@ The command removes all the Kubernetes components associated with the chart and | `broker.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | | `broker.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | | `broker.lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `broker.initContainerResources.limits` | The resources limits for the container | `{}` | +| `broker.initContainerResources.requests` | The requested resources for the container | `{}` | | `broker.resources.limits` | The resources limits for the container | `{}` | | `broker.resources.requests` | The requested resources for the container | `{}` | | `broker.podSecurityContext.enabled` | Enable security context for the pods | `true` | diff --git a/charts/bitnami/kafka/templates/_helpers.tpl b/charts/bitnami/kafka/templates/_helpers.tpl index 86af975a5..c2c3c1b43 100644 --- a/charts/bitnami/kafka/templates/_helpers.tpl +++ b/charts/bitnami/kafka/templates/_helpers.tpl @@ -742,6 +742,9 @@ Init container definition for Kafka initialization {{- if $roleSettings.containerSecurityContext.enabled }} securityContext: {{- omit $roleSettings.containerSecurityContext "enabled" | toYaml | nindent 4 }} {{- end }} + {{- if $roleSettings.initContainerResources }} + resources: {{- toYaml $roleSettings.initContainerResources | nindent 4 }} + {{- end }} command: - /bin/bash args: diff --git a/charts/bitnami/kafka/values.yaml b/charts/bitnami/kafka/values.yaml index a297abfdb..3362ba84d 100644 --- a/charts/bitnami/kafka/values.yaml +++ b/charts/bitnami/kafka/values.yaml @@ -561,6 +561,14 @@ controller: ## @param controller.lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup ## lifecycleHooks: {} + ## Kafka init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param controller.initContainerResources.limits The resources limits for the init container + ## @param controller.initContainerResources.requests The requested resources for the init container + ## + initContainerResources: + limits: {} + requests: {} ## Kafka resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param controller.resources.limits The resources limits for the container @@ -942,6 +950,14 @@ broker: ## @param broker.lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup ## lifecycleHooks: {} + ## Kafka init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param broker.initContainerResources.limits The resources limits for the container + ## @param broker.initContainerResources.requests The requested resources for the container + ## + initContainerResources: + limits: {} + requests: {} ## Kafka resource requests and limits ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @param broker.resources.limits The resources limits for the container diff --git a/charts/bitnami/postgresql/Chart.yaml b/charts/bitnami/postgresql/Chart.yaml index bbbb2c122..66f228963 100644 --- a/charts/bitnami/postgresql/Chart.yaml +++ b/charts/bitnami/postgresql/Chart.yaml @@ -6,14 +6,14 @@ annotations: category: Database images: | - name: os-shell - image: docker.io/bitnami/os-shell:11-debian-11-r90 + image: docker.io/bitnami/os-shell:11-debian-11-r91 - name: postgres-exporter - image: docker.io/bitnami/postgres-exporter:0.15.0-debian-11-r0 + image: docker.io/bitnami/postgres-exporter:0.15.0-debian-11-r2 - name: postgresql - image: docker.io/bitnami/postgresql:16.0.0-debian-11-r15 + image: docker.io/bitnami/postgresql:16.1.0-debian-11-r2 licenses: Apache-2.0 apiVersion: v2 -appVersion: 16.0.0 +appVersion: 16.1.0 dependencies: - name: common repository: file://./charts/common @@ -38,4 +38,4 @@ maintainers: name: postgresql sources: - https://github.com/bitnami/charts/tree/main/bitnami/postgresql -version: 13.2.3 +version: 13.2.7 diff --git a/charts/bitnami/postgresql/values.yaml b/charts/bitnami/postgresql/values.yaml index 4eed740f3..c327da2a3 100644 --- a/charts/bitnami/postgresql/values.yaml +++ b/charts/bitnami/postgresql/values.yaml @@ -98,7 +98,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/postgresql - tag: 16.0.0-debian-11-r15 + tag: 16.1.0-debian-11-r2 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1300,7 +1300,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 11-debian-11-r90 + tag: 11-debian-11-r91 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -1401,7 +1401,7 @@ metrics: image: registry: docker.io repository: bitnami/postgres-exporter - tag: 0.15.0-debian-11-r0 + tag: 0.15.0-debian-11-r2 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. diff --git a/charts/bitnami/redis/Chart.yaml b/charts/bitnami/redis/Chart.yaml index db9a95487..b678498ba 100644 --- a/charts/bitnami/redis/Chart.yaml +++ b/charts/bitnami/redis/Chart.yaml @@ -6,13 +6,13 @@ annotations: category: Database images: | - name: os-shell - image: docker.io/bitnami/os-shell:11-debian-11-r90 + image: docker.io/bitnami/os-shell:11-debian-11-r91 - name: redis-exporter - image: docker.io/bitnami/redis-exporter:1.55.0-debian-11-r0 + image: docker.io/bitnami/redis-exporter:1.55.0-debian-11-r2 - name: redis-sentinel - image: docker.io/bitnami/redis-sentinel:7.2.3-debian-11-r0 + image: docker.io/bitnami/redis-sentinel:7.2.3-debian-11-r1 - name: redis - image: docker.io/bitnami/redis:7.2.3-debian-11-r0 + image: docker.io/bitnami/redis:7.2.3-debian-11-r1 licenses: Apache-2.0 apiVersion: v2 appVersion: 7.2.3 @@ -37,4 +37,4 @@ maintainers: name: redis sources: - https://github.com/bitnami/charts/tree/main/bitnami/redis -version: 18.3.0 +version: 18.3.2 diff --git a/charts/bitnami/redis/templates/_helpers.tpl b/charts/bitnami/redis/templates/_helpers.tpl index 7ca7e0430..a554418b6 100644 --- a/charts/bitnami/redis/templates/_helpers.tpl +++ b/charts/bitnami/redis/templates/_helpers.tpl @@ -44,7 +44,7 @@ Return sysctl image Return the proper Docker Image Registry Secret Names */}} {{- define "redis.imagePullSecrets" -}} -{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.sentinel.image .Values.metrics.image .Values.volumePermissions.image .Values.sysctl.image) "global" .Values.global) -}} +{{- include "common.images.renderPullSecrets" (dict "images" (list .Values.image .Values.sentinel.image .Values.metrics.image .Values.volumePermissions.image .Values.sysctl.image) "context" $) -}} {{- end -}} {{/* diff --git a/charts/bitnami/redis/values.yaml b/charts/bitnami/redis/values.yaml index 63a8490b3..50fc77697 100644 --- a/charts/bitnami/redis/values.yaml +++ b/charts/bitnami/redis/values.yaml @@ -91,7 +91,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/redis - tag: 7.2.3-debian-11-r0 + tag: 7.2.3-debian-11-r1 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1060,7 +1060,7 @@ sentinel: image: registry: docker.io repository: bitnami/redis-sentinel - tag: 7.2.3-debian-11-r0 + tag: 7.2.3-debian-11-r1 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1539,7 +1539,7 @@ metrics: image: registry: docker.io repository: bitnami/redis-exporter - tag: 1.55.0-debian-11-r0 + tag: 1.55.0-debian-11-r2 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -1851,7 +1851,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 11-debian-11-r90 + tag: 11-debian-11-r91 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -1899,7 +1899,7 @@ sysctl: image: registry: docker.io repository: bitnami/os-shell - tag: 11-debian-11-r90 + tag: 11-debian-11-r91 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. diff --git a/charts/bitnami/wordpress/Chart.lock b/charts/bitnami/wordpress/Chart.lock index ea8d9d426..5064a9e32 100644 --- a/charts/bitnami/wordpress/Chart.lock +++ b/charts/bitnami/wordpress/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: memcached repository: oci://registry-1.docker.io/bitnamicharts - version: 6.6.8 + version: 6.7.0 - name: mariadb repository: oci://registry-1.docker.io/bitnamicharts version: 14.1.1 - name: common repository: oci://registry-1.docker.io/bitnamicharts version: 2.13.3 -digest: sha256:d9432795c5a778a65c7b377525672b0ca14177c32740c53ca54cc5bef1324cb5 -generated: "2023-11-09T06:37:36.895996181Z" +digest: sha256:ea092288db95be52ecf0389162c09783f7fba6d724d0e33aeb7f178b2f19a3c7 +generated: "2023-11-09T10:46:31.269080696Z" diff --git a/charts/bitnami/wordpress/Chart.yaml b/charts/bitnami/wordpress/Chart.yaml index 8e8ebd276..eb4cef6e4 100644 --- a/charts/bitnami/wordpress/Chart.yaml +++ b/charts/bitnami/wordpress/Chart.yaml @@ -8,9 +8,9 @@ annotations: - name: apache-exporter image: docker.io/bitnami/apache-exporter:1.0.3-debian-11-r1 - name: os-shell - image: docker.io/bitnami/os-shell:11-debian-11-r90 + image: docker.io/bitnami/os-shell:11-debian-11-r91 - name: wordpress - image: docker.io/bitnami/wordpress:6.4.1-debian-11-r0 + image: docker.io/bitnami/wordpress:6.4.1-debian-11-r1 licenses: Apache-2.0 apiVersion: v2 appVersion: 6.4.1 @@ -47,4 +47,4 @@ maintainers: name: wordpress sources: - https://github.com/bitnami/charts/tree/main/bitnami/wordpress -version: 18.1.9 +version: 18.1.11 diff --git a/charts/bitnami/wordpress/charts/memcached/Chart.yaml b/charts/bitnami/wordpress/charts/memcached/Chart.yaml index 798873dc5..0a02faef0 100644 --- a/charts/bitnami/wordpress/charts/memcached/Chart.yaml +++ b/charts/bitnami/wordpress/charts/memcached/Chart.yaml @@ -30,4 +30,4 @@ maintainers: name: memcached sources: - https://github.com/bitnami/charts/tree/main/bitnami/memcached -version: 6.6.8 +version: 6.7.0 diff --git a/charts/bitnami/wordpress/charts/memcached/README.md b/charts/bitnami/wordpress/charts/memcached/README.md index f3373b87f..695837a72 100644 --- a/charts/bitnami/wordpress/charts/memcached/README.md +++ b/charts/bitnami/wordpress/charts/memcached/README.md @@ -101,70 +101,75 @@ The command removes all the Kubernetes components associated with the chart and ### Deployment/Statefulset parameters -| Name | Description | Value | -| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | -| `replicaCount` | Number of Memcached nodes | `1` | -| `containerPorts.memcached` | Memcached container port | `11211` | -| `livenessProbe.enabled` | Enable livenessProbe on Memcached containers | `true` | -| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | -| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `readinessProbe.enabled` | Enable readinessProbe on Memcached containers | `true` | -| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | -| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | -| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `startupProbe.enabled` | Enable startupProbe on Memcached containers | `false` | -| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | -| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `lifecycleHooks` | for the Memcached container(s) to automate configuration before or after startup | `{}` | -| `resources.limits` | The resources limits for the Memcached containers | `{}` | -| `resources.requests.memory` | The requested memory for the Memcached containers | `256Mi` | -| `resources.requests.cpu` | The requested cpu for the Memcached containers | `250m` | -| `podSecurityContext.enabled` | Enabled Memcached pods' Security Context | `true` | -| `podSecurityContext.fsGroup` | Set Memcached pod's Security Context fsGroup | `1001` | -| `containerSecurityContext.enabled` | Enabled Memcached containers' Security Context | `true` | -| `containerSecurityContext.runAsUser` | Set Memcached containers' Security Context runAsUser | `1001` | -| `containerSecurityContext.runAsNonRoot` | Set Memcached containers' Security Context runAsNonRoot | `true` | -| `hostAliases` | Add deployment host aliases | `[]` | -| `podLabels` | Extra labels for Memcached pods | `{}` | -| `podAnnotations` | Annotations for Memcached pods | `{}` | -| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | -| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | -| `affinity` | Affinity for pod assignment | `{}` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Tolerations for pod assignment | `[]` | -| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` | -| `priorityClassName` | Name of the existing priority class to be used by Memcached pods, priority class needs to be created beforehand | `""` | -| `schedulerName` | Kubernetes pod scheduler registry | `""` | -| `terminationGracePeriodSeconds` | In seconds, time the given to the memcached pod needs to terminate gracefully | `""` | -| `updateStrategy.type` | Memcached statefulset strategy type | `RollingUpdate` | -| `updateStrategy.rollingUpdate` | Memcached statefulset rolling update configuration parameters | `{}` | -| `extraVolumes` | Optionally specify extra list of additional volumes for the Memcached pod(s) | `[]` | -| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Memcached container(s) | `[]` | -| `sidecars` | Add additional sidecar containers to the Memcached pod(s) | `[]` | -| `initContainers` | Add additional init containers to the Memcached pod(s) | `[]` | -| `autoscaling.enabled` | Enable memcached statefulset autoscaling (requires architecture: "high-availability") | `false` | -| `autoscaling.minReplicas` | memcached statefulset autoscaling minimum number of replicas | `3` | -| `autoscaling.maxReplicas` | memcached statefulset autoscaling maximum number of replicas | `6` | -| `autoscaling.targetCPU` | memcached statefulset autoscaling target CPU percentage | `50` | -| `autoscaling.targetMemory` | memcached statefulset autoscaling target CPU memory | `50` | -| `pdb.create` | Deploy a pdb object for the Memcached pod | `false` | -| `pdb.minAvailable` | Minimum available Memcached replicas | `""` | -| `pdb.maxUnavailable` | Maximum unavailable Memcached replicas | `1` | +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `replicaCount` | Number of Memcached nodes | `1` | +| `containerPorts.memcached` | Memcached container port | `11211` | +| `livenessProbe.enabled` | Enable livenessProbe on Memcached containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe on Memcached containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe on Memcached containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | for the Memcached container(s) to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the Memcached containers | `{}` | +| `resources.requests.memory` | The requested memory for the Memcached containers | `256Mi` | +| `resources.requests.cpu` | The requested cpu for the Memcached containers | `250m` | +| `podSecurityContext.enabled` | Enabled Memcached pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set Memcached pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `hostAliases` | Add deployment host aliases | `[]` | +| `podLabels` | Extra labels for Memcached pods | `{}` | +| `podAnnotations` | Annotations for Memcached pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by Memcached pods, priority class needs to be created beforehand | `""` | +| `schedulerName` | Kubernetes pod scheduler registry | `""` | +| `terminationGracePeriodSeconds` | In seconds, time the given to the memcached pod needs to terminate gracefully | `""` | +| `updateStrategy.type` | Memcached statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | Memcached statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the Memcached pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Memcached container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the Memcached pod(s) | `[]` | +| `initContainers` | Add additional init containers to the Memcached pod(s) | `[]` | +| `autoscaling.enabled` | Enable memcached statefulset autoscaling (requires architecture: "high-availability") | `false` | +| `autoscaling.minReplicas` | memcached statefulset autoscaling minimum number of replicas | `3` | +| `autoscaling.maxReplicas` | memcached statefulset autoscaling maximum number of replicas | `6` | +| `autoscaling.targetCPU` | memcached statefulset autoscaling target CPU percentage | `50` | +| `autoscaling.targetMemory` | memcached statefulset autoscaling target CPU memory | `50` | +| `pdb.create` | Deploy a pdb object for the Memcached pod | `false` | +| `pdb.minAvailable` | Minimum available Memcached replicas | `""` | +| `pdb.maxUnavailable` | Maximum unavailable Memcached replicas | `1` | ### Traffic Exposure parameters @@ -205,65 +210,70 @@ The command removes all the Kubernetes components associated with the chart and ### Volume Permissions parameters -| Name | Description | Value | -| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ | -| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | -| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | -| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | -| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | -| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | -| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | -| `metrics.enabled` | Start a side-car prometheus exporter | `false` | -| `metrics.image.registry` | Memcached exporter image registry | `REGISTRY_NAME` | -| `metrics.image.repository` | Memcached exporter image repository | `REPOSITORY_NAME/memcached-exporter` | -| `metrics.image.digest` | Memcached exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `metrics.containerPorts.metrics` | Memcached Prometheus Exporter container port | `9150` | -| `metrics.resources.limits` | Init container volume-permissions resource limits | `{}` | -| `metrics.resources.requests` | Init container volume-permissions resource requests | `{}` | -| `metrics.containerSecurityContext.enabled` | Enabled Metrics containers' Security Context | `true` | -| `metrics.containerSecurityContext.runAsUser` | Set Metrics containers' Security Context runAsUser | `1001` | -| `metrics.containerSecurityContext.runAsNonRoot` | Set Metrics containers' Security Context runAsNonRoot | `true` | -| `metrics.livenessProbe.enabled` | Enable livenessProbe on Memcached Prometheus exporter containers | `true` | -| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` | -| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | -| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `metrics.readinessProbe.enabled` | Enable readinessProbe on Memcached Prometheus exporter containers | `true` | -| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | -| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | -| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `metrics.startupProbe.enabled` | Enable startupProbe on Memcached Prometheus exporter containers | `false` | -| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | -| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | -| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `metrics.podAnnotations` | Memcached Prometheus exporter pod Annotation and Labels | `{}` | -| `metrics.service.ports.metrics` | Prometheus metrics service port | `9150` | -| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `metrics.service.annotations` | Annotations for the Prometheus metrics service | `{}` | -| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | -| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | -| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | -| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | -| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | -| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| Name | Description | Value | +| ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Memcached exporter image registry | `REGISTRY_NAME` | +| `metrics.image.repository` | Memcached exporter image repository | `REPOSITORY_NAME/memcached-exporter` | +| `metrics.image.digest` | Memcached exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.containerPorts.metrics` | Memcached Prometheus Exporter container port | `9150` | +| `metrics.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `metrics.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `metrics.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `metrics.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `metrics.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` | +| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `metrics.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `metrics.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe on Memcached Prometheus exporter containers | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe on Memcached Prometheus exporter containers | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `3` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe on Memcached Prometheus exporter containers | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `metrics.podAnnotations` | Memcached Prometheus exporter pod Annotation and Labels | `{}` | +| `metrics.service.ports.metrics` | Prometheus metrics service port | `9150` | +| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.service.annotations` | Annotations for the Prometheus metrics service | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | The above parameters map to the environment variables defined in the [bitnami/memcached](https://github.com/bitnami/containers/tree/main/bitnami/memcached) container image. For more information please refer to the [bitnami/memcached](https://github.com/bitnami/containers/tree/main/bitnami/memcached) container image documentation. diff --git a/charts/bitnami/wordpress/charts/memcached/values.yaml b/charts/bitnami/wordpress/charts/memcached/values.yaml index b6383c869..fcf5b5f18 100644 --- a/charts/bitnami/wordpress/charts/memcached/values.yaml +++ b/charts/bitnami/wordpress/charts/memcached/values.yaml @@ -222,14 +222,26 @@ podSecurityContext: fsGroup: 1001 ## Configure Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## @param containerSecurityContext.enabled Enabled Memcached containers' Security Context -## @param containerSecurityContext.runAsUser Set Memcached containers' Security Context runAsUser -## @param containerSecurityContext.runAsNonRoot Set Memcached containers' Security Context runAsNonRoot +## @param containerSecurityContext.enabled Enabled containers' Security Context +## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot +## @param containerSecurityContext.privileged Set container's Security Context privileged +## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem +## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation +## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## @param hostAliases Add deployment host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## @@ -585,14 +597,26 @@ metrics: requests: {} ## Configure Metrics Container Security Context ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param metrics.containerSecurityContext.enabled Enabled Metrics containers' Security Context - ## @param metrics.containerSecurityContext.runAsUser Set Metrics containers' Security Context runAsUser - ## @param metrics.containerSecurityContext.runAsNonRoot Set Metrics containers' Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.enabled Enabled containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.privileged Set container's Security Context privileged + ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile ## containerSecurityContext: enabled: true runAsUser: 1001 runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" ## Configure extra options for Memcached Prometheus exporter containers' liveness, readiness and startup probes ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes ## @param metrics.livenessProbe.enabled Enable livenessProbe on Memcached Prometheus exporter containers diff --git a/charts/bitnami/wordpress/values.yaml b/charts/bitnami/wordpress/values.yaml index 5a786e6d2..fc729f372 100644 --- a/charts/bitnami/wordpress/values.yaml +++ b/charts/bitnami/wordpress/values.yaml @@ -76,7 +76,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/wordpress - tag: 6.4.1-debian-11-r0 + tag: 6.4.1-debian-11-r1 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -768,7 +768,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 11-debian-11-r90 + tag: 11-debian-11-r91 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. diff --git a/charts/clastix/kamaji/Chart.yaml b/charts/clastix/kamaji/Chart.yaml index 6bb3486c7..f345e98fa 100644 --- a/charts/clastix/kamaji/Chart.yaml +++ b/charts/clastix/kamaji/Chart.yaml @@ -20,4 +20,4 @@ name: kamaji sources: - https://github.com/clastix/kamaji type: application -version: 0.12.8 +version: 0.12.9 diff --git a/charts/clastix/kamaji/README.md b/charts/clastix/kamaji/README.md index 6fb50c005..909d6a170 100644 --- a/charts/clastix/kamaji/README.md +++ b/charts/clastix/kamaji/README.md @@ -1,6 +1,6 @@ # kamaji -![Version: 0.12.8](https://img.shields.io/badge/Version-0.12.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.3.5](https://img.shields.io/badge/AppVersion-v0.3.5-informational?style=flat-square) +![Version: 0.12.9](https://img.shields.io/badge/Version-0.12.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.3.5](https://img.shields.io/badge/AppVersion-v0.3.5-informational?style=flat-square) Kamaji is a Kubernetes Control Plane Manager. @@ -73,8 +73,9 @@ Here the values you can override: | datastore.basicAuth.usernameSecret.name | string | `nil` | The name of the Secret containing the username used to connect to the relational database. | | datastore.basicAuth.usernameSecret.namespace | string | `nil` | The namespace of the Secret containing the username used to connect to the relational database. | | datastore.driver | string | `"etcd"` | (string) The Kamaji Datastore driver, supported: etcd, MySQL, PostgreSQL (defaults=etcd). | +| datastore.enabled | bool | `true` | (bool) Enable the Kamaji Datastore creation (default=true) | | datastore.endpoints | list | `[]` | (array) List of endpoints of the selected Datastore. When letting the Chart install the etcd datastore, this field is populated automatically. | -| datastore.nameOverride | string | `nil` | The Datastore name override, if empty defaults to `default` | +| datastore.nameOverride | string | `nil` | The Datastore name override, if empty and enabled=true defaults to `default`, if enabled=false, this is the name of the Datastore to connect to. | | datastore.tlsConfig.certificateAuthority.certificate.keyPath | string | `nil` | Key of the Secret which contains the content of the certificate. | | datastore.tlsConfig.certificateAuthority.certificate.name | string | `nil` | Name of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore. | | datastore.tlsConfig.certificateAuthority.certificate.namespace | string | `nil` | Namespace of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore. | diff --git a/charts/clastix/kamaji/templates/_helpers_datastore.tpl b/charts/clastix/kamaji/templates/_helpers_datastore.tpl index b7b4698b4..3ed2c16e7 100644 --- a/charts/clastix/kamaji/templates/_helpers_datastore.tpl +++ b/charts/clastix/kamaji/templates/_helpers_datastore.tpl @@ -2,7 +2,11 @@ Create a default fully qualified datastore name. */}} {{- define "datastore.fullname" -}} +{{- if .Values.datastore.enabled }} {{- default "default" .Values.datastore.nameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- required "A valid .Values.datastore.nameOverride required!" .Values.datastore.nameOverride }} +{{- end }} {{- end }} {{/* diff --git a/charts/clastix/kamaji/templates/datastore.yaml b/charts/clastix/kamaji/templates/datastore.yaml index 60a84ffe3..b54ef99a0 100644 --- a/charts/clastix/kamaji/templates/datastore.yaml +++ b/charts/clastix/kamaji/templates/datastore.yaml @@ -1,3 +1,4 @@ +{{- if .Values.datastore.enabled}} apiVersion: kamaji.clastix.io/v1alpha1 kind: DataStore metadata: @@ -24,3 +25,4 @@ spec: {{- include "datastore.certificateAuthority" . | indent 6 }} clientCertificate: {{- include "datastore.clientCertificate" . | indent 6 }} +{{- end}} diff --git a/charts/clastix/kamaji/values.yaml b/charts/clastix/kamaji/values.yaml index d8aec6b4b..092b3fdd4 100644 --- a/charts/clastix/kamaji/values.yaml +++ b/charts/clastix/kamaji/values.yaml @@ -157,7 +157,9 @@ loggingDevel: enable: false datastore: - # -- (string) The Datastore name override, if empty defaults to `default` + # -- (bool) Enable the Kamaji Datastore creation (default=true) + enabled: true + # -- (string) The Datastore name override, if empty and enabled=true defaults to `default`, if enabled=false, this is the name of the Datastore to connect to. nameOverride: # -- (string) The Kamaji Datastore driver, supported: etcd, MySQL, PostgreSQL (defaults=etcd). driver: etcd diff --git a/charts/confluent/confluent-for-kubernetes/Chart.yaml b/charts/confluent/confluent-for-kubernetes/Chart.yaml index 71451d1e9..4a276ac04 100644 --- a/charts/confluent/confluent-for-kubernetes/Chart.yaml +++ b/charts/confluent/confluent-for-kubernetes/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.15-0' catalog.cattle.io/release-name: confluent-for-kubernetes apiVersion: v1 -appVersion: 2.7.1 +appVersion: 2.7.2 description: A Helm chart to deploy Confluent for Kubernetes home: https://www.confluent.io/ icon: https://cdn.confluent.io/wp-content/uploads/seo-logo-meadow.png @@ -19,4 +19,4 @@ maintainers: name: confluent-for-kubernetes sources: - https://docs.confluent.io/current/index.html -version: 0.824.29 +version: 0.824.33 diff --git a/charts/confluent/confluent-for-kubernetes/values.yaml b/charts/confluent/confluent-for-kubernetes/values.yaml index edfeb7e37..cd1559224 100644 --- a/charts/confluent/confluent-for-kubernetes/values.yaml +++ b/charts/confluent/confluent-for-kubernetes/values.yaml @@ -81,7 +81,7 @@ image: registry: docker.io repository: confluentinc/confluent-operator pullPolicy: IfNotPresent - tag: "0.824.29" + tag: "0.824.33" ### ## Priority class for Confluent Operator pod diff --git a/charts/datadog/datadog-operator/CHANGELOG.md b/charts/datadog/datadog-operator/CHANGELOG.md index 07dbb818d..f3f38b0f7 100644 --- a/charts/datadog/datadog-operator/CHANGELOG.md +++ b/charts/datadog/datadog-operator/CHANGELOG.md @@ -1,5 +1,8 @@ # Changelog +## 1.2.2 +* Fix that an error occurs when specifying replicaCount using `--set` + ## 1.2.1 * Minor spelling corrections in the `datadog-operator` chart. diff --git a/charts/datadog/datadog-operator/Chart.yaml b/charts/datadog/datadog-operator/Chart.yaml index 7b50a9609..9e2f20bb3 100644 --- a/charts/datadog/datadog-operator/Chart.yaml +++ b/charts/datadog/datadog-operator/Chart.yaml @@ -26,4 +26,4 @@ name: datadog-operator sources: - https://app.datadoghq.com/account/settings#agent/kubernetes - https://github.com/DataDog/datadog-agent -version: 1.2.1 +version: 1.2.2 diff --git a/charts/datadog/datadog-operator/README.md b/charts/datadog/datadog-operator/README.md index 77181e99f..1b81b92f4 100644 --- a/charts/datadog/datadog-operator/README.md +++ b/charts/datadog/datadog-operator/README.md @@ -1,6 +1,6 @@ # Datadog Operator -![Version: 1.2.1](https://img.shields.io/badge/Version-1.2.1-informational?style=flat-square) ![AppVersion: 1.2.0](https://img.shields.io/badge/AppVersion-1.2.0-informational?style=flat-square) +![Version: 1.2.2](https://img.shields.io/badge/Version-1.2.2-informational?style=flat-square) ![AppVersion: 1.2.0](https://img.shields.io/badge/AppVersion-1.2.0-informational?style=flat-square) ## Values diff --git a/charts/datadog/datadog-operator/templates/pod_disruption_budget.yaml b/charts/datadog/datadog-operator/templates/pod_disruption_budget.yaml index a4417538a..3c9553e63 100644 --- a/charts/datadog/datadog-operator/templates/pod_disruption_budget.yaml +++ b/charts/datadog/datadog-operator/templates/pod_disruption_budget.yaml @@ -1,4 +1,4 @@ -{{- if gt .Values.replicaCount 1.0 -}} +{{- if gt (int .Values.replicaCount) 1 -}} apiVersion: {{ template "policy.poddisruptionbudget.apiVersion" . }} kind: PodDisruptionBudget metadata: diff --git a/charts/datadog/datadog/CHANGELOG.md b/charts/datadog/datadog/CHANGELOG.md index 1297834fe..f548dc583 100644 --- a/charts/datadog/datadog/CHANGELOG.md +++ b/charts/datadog/datadog/CHANGELOG.md @@ -1,5 +1,10 @@ # Datadog changelog +## 3.45.0 + +* Separate values for `DD_CONTAINER_INCLUDE` and `DD_CONTAINER_EXCLUDE` in `Agent` and `Cluster-Agent` + Note: this requires agent/cluster agent version 7.50.0+ + ## 3.44.1 * Fix local agent Kubernetes service to include APM traceport diff --git a/charts/datadog/datadog/Chart.yaml b/charts/datadog/datadog/Chart.yaml index 57d7092b9..d49d31e0d 100644 --- a/charts/datadog/datadog/Chart.yaml +++ b/charts/datadog/datadog/Chart.yaml @@ -19,4 +19,4 @@ name: datadog sources: - https://app.datadoghq.com/account/settings#agent/kubernetes - https://github.com/DataDog/datadog-agent -version: 3.44.1 +version: 3.45.0 diff --git a/charts/datadog/datadog/README.md b/charts/datadog/datadog/README.md index 3831feb60..dd23e77de 100644 --- a/charts/datadog/datadog/README.md +++ b/charts/datadog/datadog/README.md @@ -1,6 +1,6 @@ # Datadog -![Version: 3.44.1](https://img.shields.io/badge/Version-3.44.1-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) +![Version: 3.45.0](https://img.shields.io/badge/Version-3.45.0-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) [Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/). @@ -497,6 +497,8 @@ helm install \ | clusterAgent.affinity | object | `{}` | Allow the Cluster Agent Deployment to schedule using affinity rules | | clusterAgent.command | list | `[]` | Command to run in the Cluster Agent container as entrypoint | | clusterAgent.confd | object | `{}` | Provide additional cluster check configurations. Each key will become a file in /conf.d. | +| clusterAgent.containerExclude | string | `nil` | Exclude containers from the Cluster Agent Autodiscovery, as a space-separated list. (Requires Agent/Cluster Agent 7.50.0+) | +| clusterAgent.containerInclude | string | `nil` | Include containers in the Cluster Agent Autodiscovery, as a space-separated list. If a container matches an include rule, itโ€™s always included in the Autodiscovery. (Requires Agent/Cluster Agent 7.50.0+) | | clusterAgent.containers.clusterAgent.securityContext | object | `{"allowPrivilegeEscalation":false,"readOnlyRootFilesystem":true}` | Specify securityContext on the cluster-agent container. | | clusterAgent.containers.initContainers.securityContext | object | `{}` | | | clusterAgent.createPodDisruptionBudget | bool | `false` | Create pod disruption budget for Cluster Agent deployments | diff --git a/charts/datadog/datadog/templates/_components-common-env.yaml b/charts/datadog/datadog/templates/_components-common-env.yaml index 88ecbf7d5..8ddd83a99 100644 --- a/charts/datadog/datadog/templates/_components-common-env.yaml +++ b/charts/datadog/datadog/templates/_components-common-env.yaml @@ -48,14 +48,6 @@ - name: DD_DD_URL value: {{ .Values.datadog.dd_url | quote }} {{- end }} -{{- if .Values.datadog.containerInclude }} -- name: DD_CONTAINER_INCLUDE - value: {{ .Values.datadog.containerInclude | quote }} -{{- end }} -{{- if .Values.datadog.containerExclude }} -- name: DD_CONTAINER_EXCLUDE - value: {{ .Values.datadog.containerExclude | quote }} -{{- end }} {{- if not .Values.datadog.excludePauseContainer }} - name: DD_EXCLUDE_PAUSE_CONTAINER value: "false" diff --git a/charts/datadog/datadog/templates/_containers-common-env.yaml b/charts/datadog/datadog/templates/_containers-common-env.yaml index 4d9909820..7307f1e45 100644 --- a/charts/datadog/datadog/templates/_containers-common-env.yaml +++ b/charts/datadog/datadog/templates/_containers-common-env.yaml @@ -48,6 +48,14 @@ - name: DD_AC_EXCLUDE value: {{ .Values.datadog.acExclude | quote }} {{- end }} +{{- if .Values.datadog.containerInclude }} +- name: DD_CONTAINER_INCLUDE + value: {{ .Values.datadog.containerInclude | quote }} +{{- end }} +{{- if .Values.datadog.containerExclude }} +- name: DD_CONTAINER_EXCLUDE + value: {{ .Values.datadog.containerExclude | quote }} +{{- end }} {{- if .Values.datadog.containerIncludeMetrics }} - name: DD_CONTAINER_INCLUDE_METRICS value: {{ .Values.datadog.containerIncludeMetrics | quote }} diff --git a/charts/datadog/datadog/templates/cluster-agent-deployment.yaml b/charts/datadog/datadog/templates/cluster-agent-deployment.yaml index 8d266a2d3..b3eda489a 100644 --- a/charts/datadog/datadog/templates/cluster-agent-deployment.yaml +++ b/charts/datadog/datadog/templates/cluster-agent-deployment.yaml @@ -187,6 +187,14 @@ spec: - name: DD_EXTERNAL_METRICS_PROVIDER_ENDPOINT value: {{ .Values.clusterAgent.metricsProvider.endpoint | quote }} {{- end }} + {{- if .Values.clusterAgent.containerInclude }} + - name: DD_CONTAINER_INCLUDE + value: {{ .Values.clusterAgent.containerInclude | quote }} + {{- end }} + {{- if .Values.clusterAgent.containerExclude }} + - name: DD_CONTAINER_EXCLUDE + value: {{ .Values.clusterAgent.containerExclude | quote }} + {{- end }} - name: DD_EXTERNAL_METRICS_AGGREGATOR value: {{ .Values.clusterAgent.metricsProvider.aggregator | quote }} {{- end }} diff --git a/charts/datadog/datadog/values.yaml b/charts/datadog/datadog/values.yaml index 4df28c43e..bd1d438f8 100644 --- a/charts/datadog/datadog/values.yaml +++ b/charts/datadog/datadog/values.yaml @@ -1177,6 +1177,19 @@ clusterAgent: additionalLabels: {} # key: "value" + # clusterAgent.containerExclude -- Exclude containers from the Cluster Agent + # Autodiscovery, as a space-separated list. (Requires Agent/Cluster Agent 7.50.0+) + + ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#exclude-containers + containerExclude: # "image:datadog/agent" + + # clusterAgent.containerInclude -- Include containers in the Cluster Agent Autodiscovery, + # as a space-separated list. If a container matches an include rule, itโ€™s + # always included in the Autodiscovery. (Requires Agent/Cluster Agent 7.50.0+) + + ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#include-containers + containerInclude: + ## This section lets you configure the agents deployed by this chart to connect to a Cluster Agent ## deployed independently existingClusterAgent: diff --git a/charts/nats/nats/Chart.yaml b/charts/nats/nats/Chart.yaml index acf50861a..c1c65f936 100644 --- a/charts/nats/nats/Chart.yaml +++ b/charts/nats/nats/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.16-0' catalog.cattle.io/release-name: nats apiVersion: v2 -appVersion: 2.10.4 +appVersion: 2.10.5 description: A Helm chart for the NATS.io High Speed Cloud Native Distributed Communications Technology. home: http://github.com/nats-io/k8s @@ -18,4 +18,4 @@ maintainers: name: The NATS Authors url: https://github.com/nats-io name: nats -version: 1.1.4 +version: 1.1.5 diff --git a/charts/nats/nats/values.yaml b/charts/nats/nats/values.yaml index 7687dfbf9..cb45b08f6 100644 --- a/charts/nats/nats/values.yaml +++ b/charts/nats/nats/values.yaml @@ -312,7 +312,7 @@ config: container: image: repository: nats - tag: 2.10.4-alpine + tag: 2.10.5-alpine pullPolicy: registry: diff --git a/charts/speedscale/speedscale-operator/Chart.yaml b/charts/speedscale/speedscale-operator/Chart.yaml index 1522199d5..16524e426 100644 --- a/charts/speedscale/speedscale-operator/Chart.yaml +++ b/charts/speedscale/speedscale-operator/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>= 1.17.0-0' catalog.cattle.io/release-name: speedscale-operator apiVersion: v1 -appVersion: 1.4.22 +appVersion: 1.4.32 description: Stress test your APIs with real world scenarios. Collect and replay traffic without scripting. home: https://speedscale.com @@ -24,4 +24,4 @@ maintainers: - email: support@speedscale.com name: Speedscale Support name: speedscale-operator -version: 1.4.1 +version: 1.4.3 diff --git a/charts/speedscale/speedscale-operator/README.md b/charts/speedscale/speedscale-operator/README.md index 810d82c41..8d727d518 100644 --- a/charts/speedscale/speedscale-operator/README.md +++ b/charts/speedscale/speedscale-operator/README.md @@ -101,10 +101,10 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. -### Upgrade to 1.4.1 +### Upgrade to 1.4.3 ```bash -kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.4.1/templates/crds/trafficreplays.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.4.3/templates/crds/trafficreplays.yaml ``` ### Upgrade to 1.1.0 diff --git a/charts/speedscale/speedscale-operator/app-readme.md b/charts/speedscale/speedscale-operator/app-readme.md index 810d82c41..8d727d518 100644 --- a/charts/speedscale/speedscale-operator/app-readme.md +++ b/charts/speedscale/speedscale-operator/app-readme.md @@ -101,10 +101,10 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. -### Upgrade to 1.4.1 +### Upgrade to 1.4.3 ```bash -kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.4.1/templates/crds/trafficreplays.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/speedscale/operator-helm/main/1.4.3/templates/crds/trafficreplays.yaml ``` ### Upgrade to 1.1.0 diff --git a/charts/speedscale/speedscale-operator/values.yaml b/charts/speedscale/speedscale-operator/values.yaml index e9905de61..747a05ef5 100644 --- a/charts/speedscale/speedscale-operator/values.yaml +++ b/charts/speedscale/speedscale-operator/values.yaml @@ -20,7 +20,7 @@ clusterName: "my-cluster" # Speedscale components image settings. image: registry: gcr.io/speedscale - tag: v1.4.22 + tag: v1.4.32 pullPolicy: Always # Log level for Speedscale components. diff --git a/index.yaml b/index.yaml index 001c9f622..5cc570e0c 100644 --- a/index.yaml +++ b/index.yaml @@ -2348,8 +2348,8 @@ entries: argo-cd: - annotations: artifacthub.io/changes: | - - kind: changed - description: Upgrade Argo CD to v2.9.0 + - kind: fixed + description: Add configurations for Applications in any namespace artifacthub.io/signKey: | fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 url: https://argoproj.github.io/argo-helm/pgp_keys.asc @@ -2360,7 +2360,7 @@ entries: catalog.cattle.io/release-name: argo-cd apiVersion: v2 appVersion: v2.9.0 - created: "2023-11-06T14:43:12.698967005Z" + created: "2023-11-13T12:57:27.718901299Z" dependencies: - condition: redis-ha.enabled name: redis-ha @@ -2368,7 +2368,46 @@ entries: version: 4.23.0 description: A Helm chart for Argo CD, a declarative, GitOps continuous delivery tool for Kubernetes. - digest: 98e3940e05a80f96977e8dbe9fbddca8bc081579d4f0fa16e61e2cdda901c4bd + digest: fbfcdd88baba19c51ec08c0fe5a353affbc65ba00edb266a8c902cc476a0b8fc + home: https://github.com/argoproj/argo-helm + icon: https://argo-cd.readthedocs.io/en/stable/assets/logo.png + keywords: + - argoproj + - argocd + - gitops + kubeVersion: '>=1.23.0-0' + maintainers: + - name: argoproj + url: https://argoproj.github.io/ + name: argo-cd + sources: + - https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd + - https://github.com/argoproj/argo-cd + urls: + - assets/argo/argo-cd-5.51.1.tgz + version: 5.51.1 + - annotations: + artifacthub.io/changes: | + - kind: changed + description: Upgrade Argo CD to v2.9.0 + artifacthub.io/signKey: | + fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 + url: https://argoproj.github.io/argo-helm/pgp_keys.asc + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Argo CD + catalog.cattle.io/kube-version: '>=1.23.0-0' + catalog.cattle.io/release-name: argo-cd + apiVersion: v2 + appVersion: v2.9.0 + created: "2023-11-13T12:57:07.17390649Z" + dependencies: + - condition: redis-ha.enabled + name: redis-ha + repository: file://./charts/redis-ha + version: 4.23.0 + description: A Helm chart for Argo CD, a declarative, GitOps continuous delivery + tool for Kubernetes. + digest: 9933964023bb3ce32839caabe6a25c4b0ad7a6bf163eb71cf67d7528c3565993 home: https://github.com/argoproj/argo-helm icon: https://argo-cd.readthedocs.io/en/stable/assets/logo.png keywords: @@ -14736,6 +14775,32 @@ entries: - assets/mongodb/community-operator-0.7.6.tgz version: 0.7.6 confluent-for-kubernetes: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Confluent For Kubernetes + catalog.cattle.io/kube-version: '>=1.15-0' + catalog.cattle.io/release-name: confluent-for-kubernetes + apiVersion: v1 + appVersion: 2.7.2 + created: "2023-11-13T12:57:32.920211475Z" + description: A Helm chart to deploy Confluent for Kubernetes + digest: 4fd9b22b2454a9d8cf4a2cf00d3da34f90ed1151d7fbc3c05dcc630d1c2206e7 + home: https://www.confluent.io/ + icon: https://cdn.confluent.io/wp-content/uploads/seo-logo-meadow.png + keywords: + - Confluent + - Confluent Operator + - Confluent Platform + - CFK + maintainers: + - email: operator@confluent.io + name: Confluent Operator + name: confluent-for-kubernetes + sources: + - https://docs.confluent.io/current/index.html + urls: + - assets/confluent/confluent-for-kubernetes-0.824.33.tgz + version: 0.824.33 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Confluent For Kubernetes @@ -17945,6 +18010,43 @@ entries: - assets/weka/csi-wekafsplugin-0.6.400.tgz version: 0.6.400 datadog: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Datadog + catalog.cattle.io/kube-version: '>=1.10-0' + catalog.cattle.io/release-name: datadog + apiVersion: v1 + appVersion: "7" + created: "2023-11-13T12:57:33.612624463Z" + dependencies: + - condition: clusterAgent.metricsProvider.useDatadogMetrics + name: datadog-crds + repository: https://helm.datadoghq.com + tags: + - install-crds + version: 1.0.1 + - condition: datadog.kubeStateMetricsEnabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 2.13.2 + description: Datadog Agent + digest: 0a3cde3e5da8d095952fffbda5a4abd9e44811b85cfa6000e4ed18c466896669 + home: https://www.datadoghq.com + icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png + keywords: + - monitoring + - alerting + - metric + maintainers: + - email: support@datadoghq.com + name: Datadog + name: datadog + sources: + - https://app.datadoghq.com/account/settings#agent/kubernetes + - https://github.com/DataDog/datadog-agent + urls: + - assets/datadog/datadog-3.45.0.tgz + version: 3.45.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Datadog @@ -20741,6 +20843,39 @@ entries: - assets/datadog/datadog-2.4.200.tgz version: 2.4.200 datadog-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Datadog Operator + catalog.cattle.io/release-name: datadog-operator + apiVersion: v2 + appVersion: 1.2.0 + created: "2023-11-13T12:57:33.751504464Z" + dependencies: + - alias: datadogCRDs + condition: installCRDs + name: datadog-crds + repository: file://./charts/datadog-crds + tags: + - install-crds + version: =1.2.0 + description: Datadog Operator + digest: a12a9acae8798efb347305e2fbec18b7cc3868cef54a2fe7f6b5e7ea81a1c035 + home: https://www.datadoghq.com + icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png + keywords: + - monitoring + - alerting + - metric + maintainers: + - email: support@datadoghq.com + name: Datadog + name: datadog-operator + sources: + - https://app.datadoghq.com/account/settings#agent/kubernetes + - https://github.com/DataDog/datadog-agent + urls: + - assets/datadog/datadog-operator-1.2.2.tgz + version: 1.2.2 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Datadog Operator @@ -31884,6 +32019,58 @@ entries: - assets/kasten/k10-4.5.900.tgz version: 4.5.900 kafka: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Kafka + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: kafka + category: Infrastructure + images: | + - name: jmx-exporter + image: docker.io/bitnami/jmx-exporter:0.20.0-debian-11-r0 + - name: kafka-exporter + image: docker.io/bitnami/kafka-exporter:1.7.0-debian-11-r132 + - name: kafka + image: docker.io/bitnami/kafka:3.6.0-debian-11-r1 + - name: kubectl + image: docker.io/bitnami/kubectl:1.28.3-debian-11-r0 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 3.6.0 + created: "2023-11-13T12:57:30.092699102Z" + dependencies: + - condition: zookeeper.enabled + name: zookeeper + repository: file://./charts/zookeeper + version: 12.x.x + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Kafka is a distributed streaming platform designed to build + real-time pipelines and can be used as a message broker or as a replacement + for a log aggregation solution for big data applications. + digest: 35a3ee0a452a73f93659ef5670498071a3a6c5bf1a6392871fc93c60dc0716fa + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/kafka.svg + keywords: + - kafka + - zookeeper + - streaming + - producer + - consumer + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: kafka + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/kafka + urls: + - assets/bitnami/kafka-26.4.0.tgz + version: 26.4.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Kafka @@ -34657,6 +34844,33 @@ entries: - assets/bitnami/kafka-19.0.1.tgz version: 19.0.1 kamaji: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Kamaji + catalog.cattle.io/kube-version: '>=1.21.0-0' + catalog.cattle.io/release-name: kamaji + apiVersion: v2 + appVersion: v0.3.5 + created: "2023-11-13T12:57:32.685653985Z" + description: Kamaji is a Kubernetes Control Plane Manager. + digest: 4570f985ac6e4410ef3e40d0048c7db454dd6f2efa474c631e4894a4abf00229 + home: https://github.com/clastix/kamaji + icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png + kubeVersion: '>=1.21.0-0' + maintainers: + - email: dario@tranchitella.eu + name: Dario Tranchitella + - email: me@maxgio.it + name: Massimiliano Giovagnoli + - email: me@bsctl.io + name: Adriano Pezzuto + name: kamaji + sources: + - https://github.com/clastix/kamaji + type: application + urls: + - assets/clastix/kamaji-0.12.9.tgz + version: 0.12.9 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Kamaji @@ -36417,6 +36631,52 @@ entries: - assets/ngrok/kubernetes-ingress-controller-0.11.0.tgz version: 0.11.0 kubeslice-controller: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Avesha Kubeslice Controller + catalog.cattle.io/kube-version: '>= 1.19.0-0' + catalog.cattle.io/namespace: kubeslice-controller + catalog.cattle.io/release-name: kubeslice-controller + apiVersion: v2 + appVersion: 1.3.4 + created: "2023-11-13T12:57:28.579372599Z" + description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking + tool for efficient, secure, policy-enforced connectivity and true multi-tenancy + capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure + costs, cluster/namespace sprawl, avoid complex firewall and gateway configurations + and more. + digest: 900ce96b29e781803ac7a9721006036d9f67fee10c5c9798e11fdf11fff71644 + icon: https://kubeslice.io/documentation/open-source/img/kubeslice-logo.svg + keywords: + - multicloud + - multi cloud + - multitenant + - multitenancy + - multi tenant + - multi tenancy + - federated mesh + - federated clusters + - federated k8s + - federated kubernetes + - cluster sprawl + - sprawl + - namespace sprawl + - network policy + - overlay network + - mesh network + - security + - networking + - infrastructure + - application + kubeVersion: '>= 1.19.0-0' + maintainers: + - email: support@avesha.io + name: Avesha + name: kubeslice-controller + type: application + urls: + - assets/avesha/kubeslice-controller-1.3.4.tgz + version: 1.3.4 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Avesha Kubeslice Controller @@ -36717,6 +36977,52 @@ entries: - assets/avesha/kubeslice-controller-0.4.2.tgz version: 0.4.2 kubeslice-worker: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Avesha Kubeslice Worker + catalog.cattle.io/kube-version: '>= 1.19.0-0' + catalog.cattle.io/namespace: kubeslice-system + catalog.cattle.io/release-name: kubeslice-worker + apiVersion: v2 + appVersion: 1.3.4 + created: "2023-11-13T12:57:28.599851446Z" + description: Multi cloud networking (MCN), multi cluster, hybrid cloud networking + tool for efficient, secure, policy-enforced connectivity and true multi-tenancy + capabilities. KubeSlice enables enterprise platform teams to reduce infrastructure + costs, cluster/namespace sprawl, avoid complex firewall and gateway configurations + and more. + digest: 5cbf03aa507af9db994d2071d70aac201267c3871640f72d2aadba0ba30e4ec0 + icon: https://kubeslice.io/documentation/open-source/img/kubeslice-logo.svg + keywords: + - multicloud + - multi cloud + - multitenant + - multitenancy + - multi tenant + - multi tenancy + - federated mesh + - federated clusters + - federated k8s + - federated kubernetes + - cluster sprawl + - sprawl + - namespace sprawl + - network policy + - overlay network + - mesh network + - security + - networking + - infrastructure + - application + kubeVersion: '>= 1.19.0-0' + maintainers: + - email: support@avesha.io + name: Avesha + name: kubeslice-worker + type: application + urls: + - assets/avesha/kubeslice-worker-1.3.4.tgz + version: 1.3.4 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Avesha Kubeslice Worker @@ -41871,6 +42177,31 @@ entries: - assets/bitnami/mysql-9.4.1.tgz version: 9.4.1 nats: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: NATS Server + catalog.cattle.io/kube-version: '>=1.16-0' + catalog.cattle.io/release-name: nats + apiVersion: v2 + appVersion: 2.10.5 + created: "2023-11-13T12:57:37.358013548Z" + description: A Helm chart for the NATS.io High Speed Cloud Native Distributed + Communications Technology. + digest: aab67ff399975943df800f5d1febeae341692c1dff01b7ee4030e7009c713e59 + home: http://github.com/nats-io/k8s + icon: https://nats.io/img/nats-icon-color.png + keywords: + - nats + - messaging + - cncf + maintainers: + - email: info@nats.io + name: The NATS Authors + url: https://github.com/nats-io + name: nats + urls: + - assets/nats/nats-1.1.5.tgz + version: 1.1.5 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: NATS Server @@ -47873,6 +48204,51 @@ entries: - assets/portworx/portworx-essentials-2.9.100.tgz version: 2.9.100 postgresql: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: PostgreSQL + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: postgresql + category: Database + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r91 + - name: postgres-exporter + image: docker.io/bitnami/postgres-exporter:0.15.0-debian-11-r2 + - name: postgresql + image: docker.io/bitnami/postgresql:16.1.0-debian-11-r2 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 16.1.0 + created: "2023-11-13T12:57:30.76968738Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: PostgreSQL (Postgres) is an open source object-relational database + known for reliability and data integrity. ACID-compliant, it supports foreign + keys, joins, views, triggers and stored procedures. + digest: 8317f1cb7115c2e592e41a9b8d0b5146bb4e4267a28182f46791dbb0d58ef86c + home: https://bitnami.com + icon: https://wiki.postgresql.org/images/a/a4/PostgreSQL_logo.3colors.svg + keywords: + - postgresql + - postgres + - database + - sql + - replication + - cluster + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: postgresql + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/postgresql + urls: + - assets/bitnami/postgresql-13.2.7.tgz + version: 13.2.7 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: PostgreSQL @@ -51187,6 +51563,50 @@ entries: - assets/quobyte/quobyte-cluster-0.1.5.tgz version: 0.1.5 redis: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Redis + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: redis + category: Database + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r91 + - name: redis-exporter + image: docker.io/bitnami/redis-exporter:1.55.0-debian-11-r2 + - name: redis-sentinel + image: docker.io/bitnami/redis-sentinel:7.2.3-debian-11-r1 + - name: redis + image: docker.io/bitnami/redis:7.2.3-debian-11-r1 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 7.2.3 + created: "2023-11-13T12:57:31.15414715Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Redis(R) is an open source, advanced key-value store. It is often + referred to as a data structure server since keys can contain strings, hashes, + lists, sets and sorted sets. + digest: b1c218b483bb176b17da1410ce77dbd7d387db4a3d044abc9cfe9733edaa574d + home: https://bitnami.com + icon: https://redis.com/wp-content/uploads/2021/08/redis-logo.png + keywords: + - redis + - keyvalue + - database + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: redis + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/redis + urls: + - assets/bitnami/redis-18.3.2.tgz + version: 18.3.2 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Redis @@ -59445,6 +59865,37 @@ entries: - assets/bitnami/spark-6.3.8.tgz version: 6.3.8 speedscale-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Speedscale Operator + catalog.cattle.io/kube-version: '>= 1.17.0-0' + catalog.cattle.io/release-name: speedscale-operator + apiVersion: v1 + appVersion: 1.4.32 + created: "2023-11-13T12:57:39.017657005Z" + description: Stress test your APIs with real world scenarios. Collect and replay + traffic without scripting. + digest: 5734dece6093117c71b1c988ad7d84eb76c530fb93dd63dfaf9e0c769fe9c029 + home: https://speedscale.com + icon: https://raw.githubusercontent.com/speedscale/assets/main/logo/gold_logo_only.png + keywords: + - speedscale + - test + - testing + - regression + - reliability + - load + - replay + - network + - traffic + kubeVersion: '>= 1.17.0-0' + maintainers: + - email: support@speedscale.com + name: Speedscale Support + name: speedscale-operator + urls: + - assets/speedscale/speedscale-operator-1.4.3.tgz + version: 1.4.3 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Speedscale Operator @@ -66700,6 +67151,60 @@ entries: - assets/hashicorp/vault-0.22.0.tgz version: 0.22.0 wordpress: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: WordPress + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: wordpress + category: CMS + images: | + - name: apache-exporter + image: docker.io/bitnami/apache-exporter:1.0.3-debian-11-r1 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r91 + - name: wordpress + image: docker.io/bitnami/wordpress:6.4.1-debian-11-r1 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 6.4.1 + created: "2023-11-13T12:57:32.381729295Z" + dependencies: + - condition: memcached.enabled + name: memcached + repository: file://./charts/memcached + version: 6.x.x + - condition: mariadb.enabled + name: mariadb + repository: file://./charts/mariadb + version: 14.x.x + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: WordPress is the world's most popular blogging and content management + platform. Powerful yet simple, everyone from students to global corporations + use it to build beautiful, functional websites. + digest: 51956a19a923c90604cab479e28bdde4522c9f3fb86ce8aafef5b549d325dc89 + home: https://bitnami.com + icon: https://s.w.org/style/images/about/WordPress-logotype-simplified.png + keywords: + - application + - blog + - cms + - http + - php + - web + - wordpress + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: wordpress + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/wordpress + urls: + - assets/bitnami/wordpress-18.1.11.tgz + version: 18.1.11 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: WordPress