rancher-partner-charts/charts/artifactory-ha/values.yaml

1654 lines
58 KiB
YAML
Raw Normal View History

2020-09-23 20:05:24 +00:00
# Default values for artifactory-ha.
# This is a YAML-formatted file.
# Beware when changing values here. You should know what you are doing!
# Access the values with {{ .Values.key.subkey }}
2021-02-26 18:55:49 +00:00
global:
# imageRegistry: docker.bintray.io
# imagePullSecrets:
# - myRegistryKeySecretName
## Chart.AppVersion can be overidden using global.versions.artifactory or .Values.artifactory.image.tag
## Note: Order of preference is 1) global.versions 2) .Values.artifactory.image.tag 3) Chart.AppVersion
## This applies also for nginx images (.Values.nginx.image.tag)
versions: {}
# artifactory:
# joinKey:
# masterKey:
# joinKeySecretName:
# masterKeySecretName:
# customInitContainersBegin: |
# customInitContainers: |
# customVolumes: |
# customVolumeMounts: |
# customSidecarContainers: |
initContainerImage: docker.bintray.io/alpine:3.12.1
2020-09-23 20:05:24 +00:00
installer:
type:
platform:
installerInfo: '{"productId": "Helm_artifactory-ha/{{ .Chart.Version }}", "features": [ { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}"}]}'
# For supporting pulling from private registries
2021-02-26 18:55:49 +00:00
# imagePullSecrets:
# - myRegistryKeySecretName
## Artifactory systemYaml override
## This is for advanced usecases where users wants to provide their own systemYaml for configuring artifactory
## Refer: https://www.jfrog.com/confluence/display/JFROG/Artifactory+System+YAML
## Note: This will override existing (default) .Values.artifactory.systemYaml in values.yaml
## Alternatively, systemYaml can be overidden via customInitContainers using external sources like vaults, external repositories etc. Please refer customInitContainer section below for an example.
## Note: Order of preference is 1) customInitContainers 2) systemYamlOverride existingSecret 3) default systemYaml in values.yaml
systemYamlOverride:
## You can use a pre-existing secret by specifying existingSecret
existingSecret:
## The dataKey should be the name of the secret data key created.
dataKey:
2020-09-23 20:05:24 +00:00
## Role Based Access Control
## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
rbac:
create: true
role:
## Rules to create. It follows the role specification
rules:
- apiGroups:
- ''
resources:
- services
- endpoints
- pods
verbs:
- get
- watch
- list
## Service Account
## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/
##
serviceAccount:
create: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
name:
annotations: {}
ingress:
enabled: false
defaultBackend:
enabled: true
# Used to create an Ingress record.
hosts: []
routerPath: /
artifactoryPath: /artifactory/
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# nginx.ingress.kubernetes.io/proxy-body-size: "0"
labels: {}
# traffic-type: external
# traffic-type: internal
tls: []
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - artifactory.domain.example
# Additional ingress rules
additionalRules: []
2021-02-26 18:55:49 +00:00
## Allows to add custom ingress
customIngress: |
2020-09-23 20:05:24 +00:00
networkpolicy:
# Allows all ingress and egress
- name: artifactory
podSelector:
matchLabels:
app: artifactory-ha
egress:
- {}
ingress:
- {}
# Uncomment to allow only artifactory pods to communicate with postgresql (if postgresql.enabled is true)
# - name: postgresql
# podSelector:
# matchLabels:
# app: postgresql
# ingress:
# - from:
# - podSelector:
# matchLabels:
# app: artifactory-ha
## Database configurations
## Use the wait-for-db init container. Set to false to skip
waitForDatabase: true
## Configuration values for the postgresql dependency
## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md
##
postgresql:
enabled: true
image:
registry: docker.bintray.io
repository: bitnami/postgresql
2021-02-26 18:55:49 +00:00
tag: 12.5.0-debian-10-r25
2020-09-23 20:05:24 +00:00
postgresqlUsername: artifactory
postgresqlPassword: ""
postgresqlDatabase: artifactory
postgresqlExtendedConf:
listenAddresses: "'*'"
maxConnections: "1500"
persistence:
enabled: true
size: 50Gi
service:
port: 5432
master:
nodeSelector: {}
affinity: {}
tolerations: []
slave:
nodeSelector: {}
affinity: {}
tolerations: []
resources: {}
# requests:
# memory: "512Mi"
# cpu: "100m"
# limits:
# memory: "1Gi"
# cpu: "500m"
## If NOT using the PostgreSQL in this chart (postgresql.enabled=false),
## you MUST specify custom database details here or Artifactory will NOT start
database:
type:
driver:
## If you set the url, leave host and port empty
url:
## If you would like this chart to create the secret containing the db
## password, use these values
user:
password:
## If you have existing Kubernetes secrets containing db credentials, use
## these values
secrets: {}
# user:
# name: "rds-artifactory"
# key: "db-user"
# password:
# name: "rds-artifactory"
# key: "db-password"
# url:
# name: "rds-artifactory"
# key: "db-url"
logger:
image:
2021-02-26 18:55:49 +00:00
registry: docker.bintray.io
repository: busybox
2020-09-23 20:05:24 +00:00
tag: 1.31.1
# Artifactory
artifactory:
name: artifactory-ha
# Note that by default we use appVersion to get image tag/version
image:
2021-02-26 18:55:49 +00:00
registry: docker.bintray.io
repository: jfrog/artifactory-pro
# tag:
2020-09-23 20:05:24 +00:00
pullPolicy: IfNotPresent
# Create a priority class for the Artifactory pods or use an existing one
# NOTE - Maximum allowed value of a user defined priority is 1000000000
priorityClass:
create: false
value: 1000000000
## Override default name
# name:
## Use an existing priority class
# existingPriorityClass:
# Delete the db.properties file in ARTIFACTORY_HOME/etc/db.properties
deleteDBPropertiesOnStartup: true
database:
maxOpenConnections: 80
tomcat:
connector:
maxThreads: 200
extraConfig: 'acceptCount="100"'
2021-02-26 18:55:49 +00:00
# Support for open metrics is only available for Artifactory 7.7.x (appVersions) and above.
# To enable set `.Values.artifactory.openMetrics.enabled` to `true`
# Refer - https://www.jfrog.com/confluence/display/JFROG/Open+Metrics
openMetrics:
enabled: false
2020-09-23 20:05:24 +00:00
# This directory is intended for use with NFS eventual configuration for HA
haDataDir:
enabled: false
path:
haBackupDir:
enabled: false
path:
# Files to copy to ARTIFACTORY_HOME/ on each Artifactory startup
copyOnEveryStartup:
# # Absolute path
# - source: /artifactory_bootstrap/binarystore.xml
# # Relative to ARTIFACTORY_HOME/
# target: etc/artifactory/
# # Absolute path
# - source: /artifactory_bootstrap/artifactory.lic
# # Relative to ARTIFACTORY_HOME/
# target: etc/artifactory/
# Sidecar containers for tailing Artifactory logs
loggers: []
# - access-audit.log
# - access-request.log
# - access-security-audit.log
# - access-service.log
# - artifactory-access.log
# - artifactory-event.log
# - artifactory-import-export.log
# - artifactory-request.log
# - artifactory-service.log
# - frontend-request.log
# - frontend-service.log
# - metadata-request.log
# - metadata-service.log
# - router-request.log
# - router-service.log
# - router-traefik.log
# - derby.log
# Loggers containers resources
loggersResources: {}
# requests:
# memory: "10Mi"
# cpu: "10m"
# limits:
# memory: "100Mi"
# cpu: "50m"
# Sidecar containers for tailing Tomcat (catalina) logs
catalinaLoggers: []
# - tomcat-catalina.log
# - tomcat-localhost.log
# Tomcat (catalina) loggers resources
catalinaLoggersResources: {}
# requests:
# memory: "10Mi"
# cpu: "10m"
# limits:
# memory: "100Mi"
# cpu: "50m"
# Migration support from 6.x to 7.x
migration:
enabled: true
timeoutSeconds: 3600
## Extra pre-start command in migration Init Container to install JDBC driver for MySql/MariaDb/Oracle
# preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar"
## Add custom init containers execution before predefined init containers
customInitContainersBegin: |
# - name: "custom-setup"
# image: "{{ .Values.initContainerImage }}"
# imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}"
# command:
# - 'sh'
# - '-c'
# - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup'
# volumeMounts:
# - mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
# name: volume
## Add custom init containers
## Add custom init containers execution after predefined init containers
customInitContainers: |
2021-02-26 18:55:49 +00:00
# - name: "custom-systemyaml-setup"
2020-09-23 20:05:24 +00:00
# image: "{{ .Values.initContainerImage }}"
# imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}"
# command:
# - 'sh'
# - '-c'
2021-02-26 18:55:49 +00:00
# - 'wget -O {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml https://<repo-url>/systemyaml'
2020-09-23 20:05:24 +00:00
# volumeMounts:
# - mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
# name: volume
## Add custom sidecar containers
# - The provided example uses a custom volume (customVolumes)
# - The provided example shows running container as root (id 0)
customSidecarContainers: |
# - name: "sidecar-list-etc"
# image: "{{ .Values.initContainerImage }}"
# imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}"
# securityContext:
2021-02-26 18:55:49 +00:00
# allowPrivilegeEscalation: false
2020-09-23 20:05:24 +00:00
# command:
# - 'sh'
# - '-c'
# - 'sh /scripts/script.sh'
# volumeMounts:
# - mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
# name: volume
# - mountPath: "/scripts/script.sh"
# name: custom-script
# subPath: script.sh
# resources:
# requests:
# memory: "32Mi"
# cpu: "50m"
# limits:
# memory: "128Mi"
# cpu: "100m"
## Add custom volumes
customVolumes: |
# - name: custom-script
# configMap:
# name: custom-script
## Add custom volumesMounts
customVolumeMounts: |
# - name: custom-script
# mountPath: "/scripts/script.sh"
# subPath: script.sh
# - name: posthook-start
# mountPath: "/scripts/posthoook-start.sh"
# subPath: posthoook-start.sh
# - name: prehook-start
# mountPath: "/scripts/prehook-start.sh"
# subPath: prehook-start.sh
# Add custom persistent volume mounts - Available for the pod
2021-02-26 18:55:49 +00:00
# If skipPrepareContainer is set to true , this will skip the prepare-custom-persistent-volume init container
2020-09-23 20:05:24 +00:00
customPersistentPodVolumeClaim: {}
# name:
# mountPath:
# accessModes:
# - "-"
# size:
# storageClassName:
2021-02-26 18:55:49 +00:00
# skipPrepareContainer: false
2020-09-23 20:05:24 +00:00
# Add custom persistent volume mounts - Available to the entire namespace
customPersistentVolumeClaim: {}
# name:
# mountPath:
# accessModes:
# - "-"
# size:
# storageClassName:
## Artifactory HA requires a unique master key. Each Artifactory node must have the same master key!
## You can generate one with the command: "openssl rand -hex 32"
## Pass it to helm with '--set artifactory.masterKey=${MASTER_KEY}'
## Alternatively, you can use a pre-existing secret with a key called master-key by specifying masterKeySecretName
## IMPORTANT: You should NOT use the example masterKey for a production deployment!
## IMPORTANT: This is a mandatory for fresh Install of 7.x (App version)
# masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
# masterKeySecretName:
## Join Key to connect to other services to Artifactory.
## IMPORTANT: Setting this value overrides the existing joinKey
## IMPORTANT: You should NOT use the example joinKey for a production deployment!
# joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE
## Alternatively, you can use a pre-existing secret with a key called join-key by specifying joinKeySecretName
# joinKeySecretName:
# Add custom secrets - secret per file
customSecrets:
# - name: custom-secret
# key: custom-secret.yaml
# data: >
# custom_secret_config:
# parameter1: value1
# parameter2: value2
# - name: custom-secret2
# key: custom-secret2.config
# data: |
# here the custom secret 2 config
## If false, all service console logs will not redirect to a common console.log
consoleLog: false
binarystore:
enabled: true
## admin allows to set the password for the default admin user.
## See: https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate
admin:
ip: "127.0.0.1"
username: "admin"
password:
secret:
dataKey:
## Artifactory license.
license:
## licenseKey is the license key in plain text. Use either this or the license.secret setting
licenseKey:
## If artifactory.license.secret is passed, it will be mounted as
## ARTIFACTORY_HOME/etc/artifactory.lic and loaded at run time.
secret:
## The dataKey should be the name of the secret data key created.
dataKey:
## Create configMap with artifactory.config.import.xml and security.import.xml and pass name of configMap in following parameter
configMapName:
# Add any list of configmaps to Artifactory
configMaps: |
# posthook-start.sh: |-
# echo "This is a post start script"
# posthook-end.sh: |-
# echo "This is a post end script"
## List of secrets for Artifactory user plugins.
## One Secret per plugin's files.
userPluginSecrets:
# - archive-old-artifacts
# - build-cleanup
# - webhook
# - '{{ template "my-chart.fullname" . }}'
## Extra pre-start command to install JDBC driver for MySql/MariaDb/Oracle
# preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar"
## Extra post-start command to run extra commands after container starts
# postStartCommand:
## Extra environment variables that can be used to tune Artifactory to your needs.
## Uncomment and set value as needed
extraEnvironmentVariables:
# - name: SERVER_XML_ARTIFACTORY_PORT
# value: "8081"
# - name: SERVER_XML_ARTIFACTORY_MAX_THREADS
# value: "200"
# - name: SERVER_XML_ACCESS_MAX_THREADS
# value: "50"
# - name: SERVER_XML_ARTIFACTORY_EXTRA_CONFIG
# value: ""
# - name: SERVER_XML_ACCESS_EXTRA_CONFIG
# value: ""
# - name: SERVER_XML_EXTRA_CONNECTOR
# value: ""
# - name: DB_POOL_MAX_ACTIVE
# value: "100"
# - name: DB_POOL_MAX_IDLE
# value: "10"
# - name: MY_SECRET_ENV_VAR
# valueFrom:
# secretKeyRef:
# name: my-secret-name
# key: my-secret-key
# TODO: Fix javaOpts for member nodes (currently uses primary settings for all nodes)
systemYaml: |
shared:
logging:
consoleLog:
enabled: {{ .Values.artifactory.consoleLog }}
extraJavaOpts: >
-Dartifactory.access.client.max.connections={{ .Values.access.tomcat.connector.maxThreads }}
{{- with .Values.artifactory.primary.javaOpts }}
-Dartifactory.async.corePoolSize={{ .corePoolSize }}
{{- if .xms }}
-Xms{{ .xms }}
{{- end }}
{{- if .xmx }}
-Xmx{{ .xmx }}
{{- end }}
{{- if .jmx.enabled }}
-Dcom.sun.management.jmxremote
-Dcom.sun.management.jmxremote.port={{ .jmx.port }}
-Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }}
-Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }}
{{- if .jmx.host }}
-Djava.rmi.server.hostname={{ tpl .jmx.host $ }}
{{- else }}
-Djava.rmi.server.hostname={{ template "artifactory-ha.fullname" $ }}
{{- end }}
{{- if .jmx.authenticate }}
-Dcom.sun.management.jmxremote.authenticate=true
-Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }}
-Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }}
{{- else }}
-Dcom.sun.management.jmxremote.authenticate=false
{{- end }}
{{- end }}
{{- if .other }}
{{ .other }}
{{- end }}
{{- end }}
database:
{{- if .Values.postgresql.enabled }}
type: postgresql
url: "jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}"
host: ""
driver: org.postgresql.Driver
username: "{{ .Values.postgresql.postgresqlUsername }}"
{{ else }}
type: "{{ .Values.database.type }}"
driver: "{{ .Values.database.driver }}"
{{- end }}
artifactory:
2021-02-26 18:55:49 +00:00
{{- if .Values.artifactory.openMetrics }}
metrics:
enabled: {{ .Values.artifactory.openMetrics.enabled }}
{{- end }}
2020-09-23 20:05:24 +00:00
{{- if or .Values.artifactory.haDataDir.enabled .Values.artifactory.haBackupDir.enabled }}
node:
{{- if .Values.artifactory.haDataDir.path }}
haDataDir: {{ .Values.artifactory.haDataDir.path }}
{{- end }}
{{- if .Values.artifactory.haBackupDir.path }}
haBackupDir: {{ .Values.artifactory.haBackupDir.path }}
{{- end }}
{{- end }}
database:
maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }}
tomcat:
connector:
maxThreads: {{ .Values.artifactory.tomcat.connector.maxThreads }}
extraConfig: {{ .Values.artifactory.tomcat.connector.extraConfig }}
2021-02-26 18:55:49 +00:00
frontend:
session:
timeMinutes: {{ .Values.frontend.session.timeoutMinutes | quote }}
2020-09-23 20:05:24 +00:00
access:
database:
maxOpenConnections: {{ .Values.access.database.maxOpenConnections }}
tomcat:
connector:
maxThreads: {{ .Values.access.tomcat.connector.maxThreads }}
extraConfig: {{ .Values.access.tomcat.connector.extraConfig }}
{{- if .Values.access.database.enabled }}
type: "{{ .Values.access.database.type }}"
url: "{{ .Values.access.database.url }}"
driver: "{{ .Values.access.database.driver }}"
username: "{{ .Values.access.database.user }}"
password: "{{ .Values.access.database.password }}"
{{- end }}
metadata:
database:
maxOpenConnections: {{ .Values.metadata.database.maxOpenConnections }}
{{- if .Values.artifactory.replicator.enabled }}
replicator:
enabled: true
{{- end }}
## IMPORTANT: If overriding artifactory.internalPort:
## DO NOT use port lower than 1024 as Artifactory runs as non-root and cannot bind to ports lower than 1024!
externalPort: 8082
internalPort: 8082
externalArtifactoryPort: 8081
internalArtifactoryPort: 8081
uid: 1030
2021-02-26 18:55:49 +00:00
gid: 1030
2020-09-23 20:05:24 +00:00
terminationGracePeriodSeconds: 30
2021-02-26 18:55:49 +00:00
## By default, the Artifactory StatefulSet is created with a securityContext that sets the `runAsUser` and the `fsGroup` to the `artifactory.uid` value.
## If you want to disable the securityContext for the Artifactory StatefulSet, set this tag to false
setSecurityContext: true
2020-09-23 20:05:24 +00:00
## The following settings are to configure the frequency of the liveness and readiness probes
livenessProbe:
enabled: true
path: /router/api/v1/system/health
initialDelaySeconds: 180
failureThreshold: 10
timeoutSeconds: 10
periodSeconds: 10
successThreshold: 1
readinessProbe:
enabled: true
path: /router/api/v1/system/health
initialDelaySeconds: 90
failureThreshold: 10
timeoutSeconds: 10
periodSeconds: 10
successThreshold: 1
persistence:
enabled: true
local: false
redundancy: 3
mountPath: "/var/opt/jfrog/artifactory"
accessMode: ReadWriteOnce
size: 200Gi
## Use a custom Secret to be mounted as your binarystore.xml
## NOTE: This will ignore all settings below that make up binarystore.xml
customBinarystoreXmlSecret:
maxCacheSize: 50000000000
cacheProviderDir: cache
eventual:
numberOfThreads: 10
## artifactory data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClassName: "-"
## Set the persistence storage type. This will apply the matching binarystore.xml to Artifactory config
## Supported types are:
## file-system (default)
## nfs
## google-storage
## aws-s3
## aws-s3-v3
## azure-blob
type: file-system
## Use binarystoreXml to provide a custom binarystore.xml
## This can be a template or hardcoded.
binarystoreXml: |
{{- if eq .Values.artifactory.persistence.type "file-system" }}
<!-- File system replication -->
{{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }}
<!-- File Storage - Dynamic for Artifactory files, pre-created for DATA and BACKUP -->
<config version="4">
<chain>
<provider id="cache-fs" type="cache-fs"> <!-- This is a cached filestore -->
<provider id="sharding" type="sharding"> <!-- This is a sharding provider -->
{{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}}
<sub-provider id="shard{{ $sharedClaimNumber }}" type="state-aware"/>
{{- end }}
</provider>
</provider>
</chain>
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
// Specify the read and write strategy and redundancy for the sharding binary provider
<provider id="sharding" type="sharding">
<readBehavior>roundRobin</readBehavior>
<writeBehavior>percentageFreeSpace</writeBehavior>
<redundancy>2</redundancy>
</provider>
{{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}}
//For each sub-provider (mount), specify the filestore location
<provider id="shard{{ $sharedClaimNumber }}" type="state-aware">
<fileStoreDir>filestore{{ $sharedClaimNumber }}</fileStoreDir>
</provider>
{{- end }}
</config>
{{- else }}
<config version="2">
<chain>
<provider id="cache-fs" type="cache-fs">
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<lenientLimit>2</lenientLimit>
<minSpareUploaderExecutor>2</minSpareUploaderExecutor>
<sub-provider id="state-aware" type="state-aware"/>
<dynamic-provider id="remote" type="remote"/>
<property name="zones" value="local,remote"/>
</provider>
</provider>
</chain>
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<!-- Shards add local file-system provider configuration -->
<provider id="state-aware" type="state-aware">
<fileStoreDir>shard-fs-1</fileStoreDir>
<zone>local</zone>
</provider>
<!-- Shards dynamic remote provider configuration -->
<provider id="remote" type="remote">
<checkPeriod>30</checkPeriod>
<serviceId>tester-remote1</serviceId>
<timeout>10000</timeout>
<zone>remote</zone>
<property name="header.remote.block" value="true"/>
</provider>
</config>
{{- end }}
{{- end }}
{{- if eq .Values.artifactory.persistence.type "google-storage" }}
<!-- Google storage -->
<config version="2">
<chain>
<provider id="cache-fs" type="cache-fs">
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<minSpareUploaderExecutor>2</minSpareUploaderExecutor>
<sub-provider id="eventual-cluster" type="eventual-cluster">
<provider id="retry" type="retry">
<provider id="google-storage" type="google-storage"/>
</provider>
</sub-provider>
<dynamic-provider id="remote" type="remote"/>
<property name="zones" value="local,remote"/>
</provider>
</provider>
</chain>
<!-- Set max cache-fs size -->
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<provider id="eventual-cluster" type="eventual-cluster">
<zone>local</zone>
</provider>
<provider id="remote" type="remote">
<checkPeriod>30</checkPeriod>
<timeout>10000</timeout>
<zone>remote</zone>
</provider>
<provider id="file-system" type="file-system">
<fileStoreDir>{{ .Values.artifactory.persistence.mountPath }}/data/filestore</fileStoreDir>
<tempDir>/tmp</tempDir>
</provider>
<provider id="google-storage" type="google-storage">
<providerId>google-cloud-storage</providerId>
<endpoint>{{ .Values.artifactory.persistence.googleStorage.endpoint }}</endpoint>
<httpsOnly>{{ .Values.artifactory.persistence.googleStorage.httpsOnly }}</httpsOnly>
<bucketName>{{ .Values.artifactory.persistence.googleStorage.bucketName }}</bucketName>
<identity>{{ .Values.artifactory.persistence.googleStorage.identity }}</identity>
<credential>{{ .Values.artifactory.persistence.googleStorage.credential }}</credential>
<path>{{ .Values.artifactory.persistence.googleStorage.path }}</path>
<bucketExists>{{ .Values.artifactory.persistence.googleStorage.bucketExists }}</bucketExists>
</provider>
</config>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }}
<!-- AWS S3 V3 -->
<config version="2">
<chain> <!--template="cluster-s3-storage-v3"-->
<provider id="cache-fs-eventual-s3" type="cache-fs">
<provider id="sharding-cluster-eventual-s3" type="sharding-cluster">
<sub-provider id="eventual-cluster-s3" type="eventual-cluster">
<provider id="retry-s3" type="retry">
<provider id="s3-storage-v3" type="s3-storage-v3"/>
</provider>
</sub-provider>
<dynamic-provider id="remote-s3" type="remote"/>
</provider>
</provider>
</chain>
<provider id="sharding-cluster-eventual-s3" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<property name="zones" value="local,remote"/>
</provider>
<provider id="remote-s3" type="remote">
<zone>remote</zone>
</provider>
<provider id="eventual-cluster-s3" type="eventual-cluster">
<zone>local</zone>
</provider>
<!-- Set max cache-fs size -->
<provider id="cache-fs-eventual-s3" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
{{- with .Values.artifactory.persistence.awsS3V3 }}
<provider id="s3-storage-v3" type="s3-storage-v3">
<testConnection>{{ .testConnection }}</testConnection>
{{- if .identity }}
<identity>{{ .identity }}</identity>
{{- end }}
{{- if .credential }}
<credential>{{ .credential }}</credential>
{{- end }}
<region>{{ .region }}</region>
<bucketName>{{ .bucketName }}</bucketName>
<path>{{ .path }}</path>
<endpoint>{{ .endpoint }}</endpoint>
{{- with .maxConnections }}
<maxConnections>{{ . }}</maxConnections>
{{- end }}
{{- with .kmsServerSideEncryptionKeyId }}
<kmsServerSideEncryptionKeyId>{{ . }}</kmsServerSideEncryptionKeyId>
{{- end }}
{{- with .kmsKeyRegion }}
<kmsKeyRegion>{{ . }}</kmsKeyRegion>
{{- end }}
{{- with .kmsCryptoMode }}
<kmsCryptoMode>{{ . }}</kmsCryptoMode>
{{- end }}
{{- if .useInstanceCredentials }}
<useInstanceCredentials>true</useInstanceCredentials>
{{- else }}
<useInstanceCredentials>false</useInstanceCredentials>
{{- end }}
<usePresigning>{{ .usePresigning }}</usePresigning>
<signatureExpirySeconds>{{ .signatureExpirySeconds }}</signatureExpirySeconds>
{{- with .cloudFrontDomainName }}
<cloudFrontDomainName>{{ . }}</cloudFrontDomainName>
{{- end }}
{{- with .cloudFrontKeyPairId }}
<cloudFrontKeyPairId>{{ .cloudFrontKeyPairId }}</cloudFrontKeyPairId>
{{- end }}
{{- with .cloudFrontPrivateKey }}
<cloudFrontPrivateKey>{{ . }}</cloudFrontPrivateKey>
{{- end }}
2021-02-26 18:55:49 +00:00
{{- with .enableSignedUrlRedirect }}
<enableSignedUrlRedirect>{{ . }}</enableSignedUrlRedirect>
{{- end }}
{{- with .enablePathStyleAccess }}
<enablePathStyleAccess>{{ . }}</enablePathStyleAccess>
{{- end }}
2020-09-23 20:05:24 +00:00
</provider>
{{- end }}
</config>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "aws-s3" }}
<!-- AWS S3 -->
<config version="2">
<chain> <!--template="cluster-s3"-->
<provider id="cache-fs" type="cache-fs">
<provider id="sharding-cluster" type="sharding-cluster">
<sub-provider id="eventual-cluster" type="eventual-cluster">
<provider id="retry-s3" type="retry">
<provider id="s3" type="s3"/>
</provider>
</sub-provider>
<dynamic-provider id="remote" type="remote"/>
</provider>
</provider>
</chain>
<!-- Set max cache-fs size -->
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<provider id="eventual-cluster" type="eventual-cluster">
<zone>local</zone>
</provider>
<provider id="remote" type="remote">
<checkPeriod>30</checkPeriod>
<timeout>10000</timeout>
<zone>remote</zone>
</provider>
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>{{ .Values.artifactory.persistence.redundancy }}</redundancy>
<property name="zones" value="local,remote"/>
</provider>
<provider id="s3" type="s3">
<endpoint>{{ .Values.artifactory.persistence.awsS3.endpoint }}</endpoint>
{{- if .Values.artifactory.persistence.awsS3.roleName }}
<roleName>{{ .Values.artifactory.persistence.awsS3.roleName }}</roleName>
<refreshCredentials>true</refreshCredentials>
{{- else }}
<refreshCredentials>{{ .Values.artifactory.persistence.awsS3.refreshCredentials }}</refreshCredentials>
{{- end }}
<s3AwsVersion>{{ .Values.artifactory.persistence.awsS3.s3AwsVersion }}</s3AwsVersion>
<testConnection>{{ .Values.artifactory.persistence.awsS3.testConnection }}</testConnection>
<httpsOnly>{{ .Values.artifactory.persistence.awsS3.httpsOnly }}</httpsOnly>
<region>{{ .Values.artifactory.persistence.awsS3.region }}</region>
<bucketName>{{ .Values.artifactory.persistence.awsS3.bucketName }}</bucketName>
{{- if .Values.artifactory.persistence.awsS3.identity }}
<identity>{{ .Values.artifactory.persistence.awsS3.identity }}</identity>
{{- end }}
{{- if .Values.artifactory.persistence.awsS3.credential }}
<credential>{{ .Values.artifactory.persistence.awsS3.credential }}</credential>
{{- end }}
<path>{{ .Values.artifactory.persistence.awsS3.path }}</path>
{{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }}
<property name="{{ $key }}" value="{{ $value }}"/>
{{- end }}
</provider>
</config>
{{- end }}
{{- if eq .Values.artifactory.persistence.type "azure-blob" }}
<!-- Azure Blob Storage -->
<config version="2">
<chain> <!--template="cluster-azure-blob-storage"-->
<provider id="cache-fs" type="cache-fs">
<provider id="sharding-cluster" type="sharding-cluster">
<sub-provider id="eventual-cluster" type="eventual-cluster">
<provider id="retry-azure-blob-storage" type="retry">
<provider id="azure-blob-storage" type="azure-blob-storage"/>
</provider>
</sub-provider>
<dynamic-provider id="remote" type="remote"/>
</provider>
</provider>
</chain>
<!-- Set max cache-fs size -->
<provider id="cache-fs" type="cache-fs">
<maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
<cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
</provider>
<!-- cluster eventual Azure Blob Storage Service default chain -->
<provider id="sharding-cluster" type="sharding-cluster">
<readBehavior>crossNetworkStrategy</readBehavior>
<writeBehavior>crossNetworkStrategy</writeBehavior>
<redundancy>2</redundancy>
<lenientLimit>1</lenientLimit>
<property name="zones" value="local,remote"/>
</provider>
<provider id="remote" type="remote">
<zone>remote</zone>
</provider>
<provider id="eventual-cluster" type="eventual-cluster">
<zone>local</zone>
</provider>
<!--cluster eventual template-->
<provider id="azure-blob-storage" type="azure-blob-storage">
<accountName>{{ .Values.artifactory.persistence.azureBlob.accountName }}</accountName>
<accountKey>{{ .Values.artifactory.persistence.azureBlob.accountKey }}</accountKey>
<endpoint>{{ .Values.artifactory.persistence.azureBlob.endpoint }}</endpoint>
<containerName>{{ .Values.artifactory.persistence.azureBlob.containerName }}</containerName>
<testConnection>{{ .Values.artifactory.persistence.azureBlob.testConnection }}</testConnection>
</provider>
</config>
{{- end }}
## For artifactory.persistence.type file-system
fileSystem:
## You may also use existing shared claims for the data and backup storage. This allows storage (NAS for example) to be used for Data and Backup dirs which are safe to share across multiple artifactory nodes.
## You may specify numberOfExistingClaims to indicate how many of these existing shared claims to mount. (Default = 1)
## Create PVCs with ReadWriteMany that match the naming convetions:
## {{ template "artifactory-ha.fullname" . }}-data-pvc-<claim-ordinal>
## {{ template "artifactory-ha.fullname" . }}-backup-pvc
## Example (using numberOfExistingClaims: 2)
## myexample-data-pvc-0
## myexample-data-pvc-1
## myexample-backup-pvc
## Note: While you need two PVC fronting two PVs, multiple PVs can be attached to the same storage in many cases allowing you to share an underlying drive.
## Need to have the following set
existingSharedClaim:
enabled: false
numberOfExistingClaims: 1
## Should be a child directory of {{ .Values.artifactory.persistence.mountPath }}
dataDir: "{{ .Values.artifactory.persistence.mountPath }}/artifactory-data"
backupDir: "/var/opt/jfrog/artifactory-backup"
## For artifactory.persistence.type nfs
## If using NFS as the shared storage, you must have a running NFS server that is accessible by your Kubernetes
## cluster nodes.
## Need to have the following set
nfs:
# Must pass actual IP of NFS server with '--set For artifactory.persistence.nfs.ip=${NFS_IP}'
ip:
haDataMount: "/data"
haBackupMount: "/backup"
dataDir: "/var/opt/jfrog/artifactory-ha"
backupDir: "/var/opt/jfrog/artifactory-backup"
capacity: 200Gi
mountOptions: []
## For artifactory.persistence.type google-storage
googleStorage:
## When using GCP buckets as your binary store (Available with enterprise license only)
gcpServiceAccount:
enabled: false
## Use either an existing secret prepared in advance or put the config (replace the content) in the values
## ref: https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/README.md#google-storage
# customSecretName:
# config: |
# {
# "type": "service_account",
# "project_id": "<project_id>",
# "private_key_id": "?????",
# "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n",
# "client_email": "???@j<project_id>.iam.gserviceaccount.com",
# "client_id": "???????",
# "auth_uri": "https://accounts.google.com/o/oauth2/auth",
# "token_uri": "https://oauth2.googleapis.com/token",
# "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
# "client_x509_cert_url": "https://www.googleapis.com/robot/v1....."
# }
2021-02-26 18:55:49 +00:00
endpoint: commondatastorage.googleapis.com
2020-09-23 20:05:24 +00:00
httpsOnly: false
# Set a unique bucket name
bucketName: "artifactory-ha-gcp"
identity:
credential:
path: "artifactory-ha/filestore"
bucketExists: false
## For artifactory.persistence.type aws-s3-v3
awsS3V3:
testConnection: false
identity:
credential:
region:
bucketName: artifactory-aws
path: artifactory/filestore
endpoint:
maxConnections: 50
kmsServerSideEncryptionKeyId:
kmsKeyRegion:
kmsCryptoMode:
useInstanceCredentials: true
usePresigning: false
signatureExpirySeconds: 300
cloudFrontDomainName:
cloudFrontKeyPairId:
cloudFrontPrivateKey:
2021-02-26 18:55:49 +00:00
enableSignedUrlRedirect: false
enablePathStyleAccess: false
2020-09-23 20:05:24 +00:00
## For artifactory.persistence.type aws-s3
## IMPORTANT: Make sure S3 `endpoint` and `region` match! See https://docs.aws.amazon.com/general/latest/gr/rande.html
awsS3:
# Set a unique bucket name
bucketName: "artifactory-ha-aws"
endpoint:
region:
roleName:
identity:
credential:
path: "artifactory-ha/filestore"
refreshCredentials: true
httpsOnly: true
testConnection: false
s3AwsVersion: "AWS4-HMAC-SHA256"
## Additional properties to set on the s3 provider
properties: {}
# httpclient.max-connections: 100
## For artifactory.persistence.type azure-blob
azureBlob:
accountName:
accountKey:
endpoint:
containerName:
testConnection: false
service:
name: artifactory
type: ClusterIP
## For supporting whitelist on the Artifactory service (useful if setting service.type=LoadBalancer)
## Set this to a list of IP CIDR ranges
## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32']
## or pass from helm command line
## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}'
loadBalancerSourceRanges: []
annotations: {}
## Which nodes in the cluster should be in the external load balancer pool (have external traffic routed to them)
## Supported pool values
## members
## all
pool: members
## The following Java options are passed to the java process running Artifactory.
## This will be passed to all cluster members. Primary and member nodes.
javaOpts: {}
# other: ""
## The following setting are to configure a dedicated Ingress object for Replicator service
replicator:
enabled: false
ingress:
name:
hosts: []
annotations: {}
# kubernetes.io/ingress.class: nginx
# nginx.ingress.kubernetes.io/proxy-buffering: "off"
# nginx.ingress.kubernetes.io/configuration-snippet: |
# chunked_transfer_encoding on;
tls: []
# Secrets must be manually created in the namespace.
# - hosts:
# - artifactory.domain.example
# secretName: chart-example-tls-secret
2021-02-26 18:55:49 +00:00
## When replicator is enabled and want to use tracker feature, trackerIngress.enabled flag should be set to true
## Please refer - https://www.jfrog.com/confluence/display/JFROG/JFrog+Peer-to-Peer+%28P2P%29+Downloads
trackerIngress:
enabled: false
name:
hosts: []
annotations: {}
# kubernetes.io/ingress.class: nginx
# nginx.ingress.kubernetes.io/proxy-buffering: "off"
# nginx.ingress.kubernetes.io/configuration-snippet: |
# chunked_transfer_encoding on;
tls: []
# Secrets must be manually created in the namespace.
# - hosts:
# - artifactory.domain.example
# secretName: chart-example-tls-secret
2020-09-23 20:05:24 +00:00
ssh:
enabled: false
internalPort: 1339
externalPort: 1339
annotations: {}
## Type specific configurations.
## There is a difference between the primary and the member nodes.
## Customising their resources and java parameters is done here.
primary:
name: artifactory-ha-primary
# preStartCommand specific to the primary node, to be run after artifactory.preStartCommand
# preStartCommand:
labels: {}
persistence:
## Set existingClaim to true or false
## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-primary-0`
existingClaim: false
2021-02-26 18:55:49 +00:00
## IMPORTANT: This value should remain at 1!
replicaCount: 1
# minAvailable: 1
2020-09-23 20:05:24 +00:00
## Resources for the primary node
resources: {}
# requests:
# memory: "1Gi"
# cpu: "500m"
# limits:
# memory: "2Gi"
# cpu: "1"
## The following Java options are passed to the java process running Artifactory primary node.
## You should set them according to the resources set above
javaOpts:
# xms: "1g"
# xmx: "2g"
corePoolSize: 16
jmx:
enabled: false
port: 9010
host:
ssl: false
# When authenticate is true, accessFile and passwordFile are required
authenticate: false
accessFile:
passwordFile:
# other: ""
nodeSelector: {}
tolerations: []
affinity: {}
## Only used if "affinity" is empty
podAntiAffinity:
## Valid values are "soft" or "hard"; any other value indicates no anti-affinity
type: ""
topologyKey: "kubernetes.io/hostname"
node:
name: artifactory-ha-member
# preStartCommand specific to the member node, to be run after artifactory.preStartCommand
# preStartCommand:
labels: {}
persistence:
## Set existingClaim to true or false
## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-member-0`
existingClaim: false
replicaCount: 2
minAvailable: 1
## Resources for the member nodes
resources: {}
# requests:
# memory: "1Gi"
# cpu: "500m"
# limits:
# memory: "2Gi"
# cpu: "1"
## The following Java options are passed to the java process running Artifactory member nodes.
## You should set them according to the resources set above
javaOpts:
# xms: "1g"
# xmx: "2g"
corePoolSize: 16
jmx:
enabled: false
port: 9010
host:
ssl: false
# When authenticate is true, accessFile and passwordFile are required
authenticate: false
accessFile:
passwordFile:
# other: ""
# xms: "1g"
# xmx: "2g"
# other: ""
nodeSelector: {}
## Wait for Artifactory primary
waitForPrimaryStartup:
enabled: true
## Setting time will override the built in test and will just wait the set time
time:
tolerations: []
## Complete specification of the "affinity" of the member nodes; if this is non-empty,
## "podAntiAffinity" values are not used.
affinity: {}
## Only used if "affinity" is empty
podAntiAffinity:
## Valid values are "soft" or "hard"; any other value indicates no anti-affinity
type: ""
topologyKey: "kubernetes.io/hostname"
2021-02-26 18:55:49 +00:00
frontend:
## Session settings
session:
## Time in minutes after which the frontend token will need to be refreshed
timeoutMinutes: '30'
2020-09-23 20:05:24 +00:00
access:
## Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file.
## ref: https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates
## When security.tls is set to true, JFrog Access will act as the Certificate Authority (CA) and sign the TLS certificates used by all the different JFrog Platform nodes.
## This ensures that the node to node communication is done over TLS.
accessConfig:
security:
tls: false
## You can use a pre-existing secret by specifying customCertificatesSecretName
## Example : Create a tls secret using `kubectl create secret tls <secret-name> --cert=ca.crt --key=ca.private.key`
# customCertificatesSecretName:
## When resetAccessCAKeys is true, Access will regenerate the CA certificate and matching private key
# resetAccessCAKeys: false
database:
maxOpenConnections: 80
tomcat:
connector:
maxThreads: 50
extraConfig: 'acceptCount="100"'
metadata:
database:
maxOpenConnections: 80
# Init containers
initContainers:
resources: {}
# requests:
# memory: "64Mi"
# cpu: "10m"
# limits:
# memory: "128Mi"
# cpu: "250m"
# Nginx
nginx:
enabled: true
kind: Deployment
name: nginx
labels: {}
replicaCount: 1
minAvailable: 0
uid: 104
gid: 107
# Note that by default we use appVersion to get image tag/version
image:
2021-02-26 18:55:49 +00:00
registry: docker.bintray.io
repository: jfrog/nginx-artifactory-pro
# tag:
2020-09-23 20:05:24 +00:00
pullPolicy: IfNotPresent
2021-02-26 18:55:49 +00:00
# Priority Class name to be used in deployment if provided
priorityClassName:
2020-09-23 20:05:24 +00:00
# Sidecar containers for tailing Nginx logs
loggers: []
# - access.log
# - error.log
# Loggers containers resources
loggersResources: {}
# requests:
# memory: "64Mi"
# cpu: "25m"
# limits:
# memory: "128Mi"
# cpu: "50m"
# Logs options
logs:
stderr: false
level: warn
mainConf: |
# Main Nginx configuration file
worker_processes 4;
{{ if .Values.nginx.logs.stderr }}
error_log stderr {{ .Values.nginx.logs.level }};
{{- else -}}
error_log {{ .Values.nginx.persistence.mountPath }}/logs/error.log {{ .Values.nginx.logs.level }};
{{- end }}
pid /tmp/nginx.pid;
{{- if .Values.artifactory.ssh.enabled }}
## SSH Server Configuration
stream {
server {
listen {{ .Values.nginx.ssh.internalPort }};
proxy_pass {{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.ssh.externalPort }};
}
}
{{- end }}
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
variables_hash_max_size 1024;
variables_hash_bucket_size 64;
server_names_hash_max_size 4096;
server_names_hash_bucket_size 128;
types_hash_max_size 2048;
types_hash_bucket_size 64;
proxy_read_timeout 2400s;
client_header_timeout 2400s;
client_body_timeout 2400s;
proxy_connect_timeout 75s;
proxy_send_timeout 2400s;
proxy_buffer_size 128k;
proxy_buffers 40 128k;
proxy_busy_buffers_size 128k;
proxy_temp_file_write_size 250m;
proxy_http_version 1.1;
client_body_buffer_size 128k;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format timing 'ip = $remote_addr '
'user = \"$remote_user\" '
'local_time = \"$time_local\" '
'host = $host '
'request = \"$request\" '
'status = $status '
'bytes = $body_bytes_sent '
'upstream = \"$upstream_addr\" '
'upstream_time = $upstream_response_time '
'request_time = $request_time '
'referer = \"$http_referer\" '
'UA = \"$http_user_agent\"';
access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
artifactoryConf: |
{{- if .Values.nginx.https.enabled }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt;
ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key;
ssl_session_cache shared:SSL:1m;
ssl_prefer_server_ciphers on;
{{- end }}
## server configuration
server {
{{- if .Values.nginx.internalPortHttps }}
listen {{ .Values.nginx.internalPortHttps }} ssl;
{{- else -}}
{{- if .Values.nginx.https.enabled }}
listen {{ .Values.nginx.https.internalPort }} ssl;
{{- end }}
{{- end }}
{{- if .Values.nginx.internalPortHttp }}
listen {{ .Values.nginx.internalPortHttp }};
{{- else -}}
{{- if .Values.nginx.http.enabled }}
listen {{ .Values.nginx.http.internalPort }};
{{- end }}
{{- end }}
server_name ~(?<repo>.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }}
{{- range .Values.ingress.hosts -}}
{{- if contains "." . -}}
{{ "" | indent 0 }} ~(?<repo>.+)\.{{ . }}
{{- end -}}
{{- end -}};
if ($http_x_forwarded_proto = '') {
set $http_x_forwarded_proto $scheme;
}
## Application specific logs
## access_log /var/log/nginx/artifactory-access.log timing;
## error_log /var/log/nginx/artifactory-error.log;
rewrite ^/artifactory/?$ / redirect;
if ( $repo != "" ) {
rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break;
}
chunked_transfer_encoding on;
client_max_body_size 0;
location / {
proxy_read_timeout 900;
proxy_pass_header Server;
proxy_cookie_path ~*^/.* /;
proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/;
{{- if .Values.nginx.service.ssloffload}}
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host;
{{- else }}
proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
proxy_set_header X-Forwarded-Port $server_port;
{{- end }}
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location /artifactory/ {
if ( $request_uri ~ ^/artifactory/(.*)$ ) {
proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1;
}
proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/;
}
}
}
service:
## For minikube, set this to NodePort, elsewhere use LoadBalancer
type: LoadBalancer
ssloffload: false
## For supporting whitelist on the Nginx LoadBalancer service
## Set this to a list of IP CIDR ranges
## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32']
## or pass from helm command line
## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}'
loadBalancerSourceRanges: []
## Provide static ip address
loadBalancerIP:
## There are two available options: “Cluster” (default) and “Local”.
externalTrafficPolicy: Cluster
labels: {}
# label-key: label-value
http:
enabled: true
externalPort: 80
internalPort: 80
https:
enabled: true
externalPort: 443
internalPort: 443
# DEPRECATED: The following will be replaced by L1065-L1076 in a future release
# externalPortHttp: 80
# internalPortHttp: 80
# externalPortHttps: 443
# internalPortHttps: 443
ssh:
internalPort: 1339
externalPort: 1339
## The following settings are to configure the frequency of the liveness and readiness probes
livenessProbe:
enabled: true
path: /router/api/v1/system/health
initialDelaySeconds: 180
failureThreshold: 10
timeoutSeconds: 10
periodSeconds: 10
successThreshold: 1
readinessProbe:
enabled: true
path: /router/api/v1/system/health
initialDelaySeconds: 120
failureThreshold: 10
timeoutSeconds: 10
periodSeconds: 10
successThreshold: 1
## The SSL secret that will be used by the Nginx pod
# tlsSecretName: chart-example-tls
## Custom ConfigMap for nginx.conf
customConfigMap:
## Custom ConfigMap for artifactory.conf
customArtifactoryConfigMap:
persistence:
mountPath: "/var/opt/jfrog/nginx"
enabled: false
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
size: 5Gi
## nginx data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClassName: "-"
resources: {}
# requests:
# memory: "250Mi"
# cpu: "100m"
# limits:
# memory: "250Mi"
# cpu: "500m"
nodeSelector: {}
tolerations: []
affinity: {}
# Filebeat Sidecar container
## The provided filebeat configuration is for Artifactory logs. It assumes you have a logstash installed and configured properly.
filebeat:
enabled: false
name: artifactory-filebeat
image:
repository: "docker.elastic.co/beats/filebeat"
2021-02-26 18:55:49 +00:00
version: 7.9.2
2020-09-23 20:05:24 +00:00
logstashUrl: "logstash:5044"
terminationGracePeriod: 10
livenessProbe:
exec:
command:
- sh
- -c
- |
#!/usr/bin/env bash -e
curl --fail 127.0.0.1:5066
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
exec:
command:
- sh
- -c
- |
#!/usr/bin/env bash -e
filebeat test output
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
resources: {}
# requests:
# memory: "100Mi"
# cpu: "100m"
# limits:
# memory: "100Mi"
# cpu: "100m"
filebeatYml: |
logging.level: info
path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat
name: artifactory-filebeat
queue.spool: ~
filebeat.inputs:
- type: log
enabled: true
close_eof: ${CLOSE:false}
paths:
- {{ .Values.artifactory.persistence.mountPath }}/log/*.log
fields:
service: "jfrt"
log_type: "artifactory"
output:
logstash:
hosts: ["{{ .Values.filebeat.logstashUrl }}"]
2021-02-26 18:55:49 +00:00
## Allows to add additional kubernetes resources
## Use --- as a separator between multiple resources
## For an example, refer - https://github.com/jfrog/log-analytics-prometheus/blob/master/artifactory-ha-values.yaml
additionalResources: |