diff --git a/packages/rke2-cilium/generated-changes/overlay/templates/cilium-configmap.yaml.orig b/packages/rke2-cilium/generated-changes/overlay/templates/cilium-configmap.yaml.orig new file mode 100644 index 0000000..d9dd121 --- /dev/null +++ b/packages/rke2-cilium/generated-changes/overlay/templates/cilium-configmap.yaml.orig @@ -0,0 +1,819 @@ +{{- if and (.Values.agent) (not .Values.preflight.enabled) }} +{{- /* Default values with backwards compatibility */ -}} +{{- $defaultEnableCnpStatusUpdates := "true" -}} +{{- $defaultBpfMapDynamicSizeRatio := 0.0 -}} +{{- $defaultBpfMasquerade := "false" -}} +{{- $defaultBpfClockProbe := "false" -}} +{{- $defaultBpfTProxy := "false" -}} +{{- $defaultIPAM := "cluster-pool" -}} +{{- $defaultSessionAffinity := "false" -}} +{{- $defaultOperatorApiServeAddr := "localhost:9234" -}} +{{- $defaultBpfCtTcpMax := 524288 -}} +{{- $defaultBpfCtAnyMax := 262144 -}} +{{- $enableIdentityMark := "true" -}} +{{- $fragmentTracking := "true" -}} +{{- $crdWaitTimeout := "5m" -}} +{{- $defaultKubeProxyReplacement := "probe" -}} + +{{- /* Default values when 1.8 was initially deployed */ -}} +{{- if semverCompare ">=1.8" (default "1.8" .Values.upgradeCompatibility) -}} + {{- $defaultEnableCnpStatusUpdates = "false" -}} + {{- $defaultBpfMapDynamicSizeRatio = 0.0025 -}} + {{- $defaultBpfMasquerade = "true" -}} + {{- $defaultBpfClockProbe = "true" -}} + {{- $defaultIPAM = "cluster-pool" -}} + {{- $defaultSessionAffinity = "true" -}} + {{- if .Values.ipv4.enabled }} + {{- $defaultOperatorApiServeAddr = "127.0.0.1:9234" -}} + {{- else -}} + {{- $defaultOperatorApiServeAddr = "[::1]:9234" -}} + {{- end }} + {{- $defaultBpfCtTcpMax = 0 -}} + {{- $defaultBpfCtAnyMax = 0 -}} +{{- end -}} + +{{- /* Default values when 1.10 was initially deployed */ -}} +{{- if semverCompare ">=1.10" (default "1.10" .Values.upgradeCompatibility) -}} + {{- $defaultKubeProxyReplacement = "disabled" -}} + {{- /* Needs to be explicitly disabled because it was enabled on all versions >=v1.8 above. */ -}} + {{- $defaultBpfMasquerade = "false" -}} +{{- end -}} + +{{- $ipam := (coalesce .Values.ipam.mode $defaultIPAM) -}} +{{- $bpfCtTcpMax := (coalesce .Values.bpf.ctTcpMax $defaultBpfCtTcpMax) -}} +{{- $bpfCtAnyMax := (coalesce .Values.bpf.ctAnyMax $defaultBpfCtAnyMax) -}} +{{- $kubeProxyReplacement := (coalesce .Values.kubeProxyReplacement $defaultKubeProxyReplacement) -}} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: {{ .Release.Namespace }} +data: +{{- if .Values.etcd.enabled }} + # The kvstore configuration is used to enable use of a kvstore for state + # storage. This can either be provided with an external kvstore or with the + # help of cilium-etcd-operator which operates an etcd cluster automatically. + kvstore: etcd + {{- if .Values.etcd.k8sService }} + kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config", "etcd.operator": "true"}' + {{- else }} + kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' + {{- end }} + + # This etcd-config contains the etcd endpoints of your cluster. If you use + # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config + etcd-config: |- + --- + endpoints: + {{- if .Values.etcd.managed }} + - https://cilium-etcd-client.{{ .Release.Namespace }}.svc:2379 + {{- else }} + {{- range .Values.etcd.endpoints }} + - {{ . }} + {{- end }} + {{- end }} + {{- if or .Values.etcd.ssl .Values.etcd.managed }} + trusted-ca-file: '/var/lib/etcd-secrets/etcd-client-ca.crt' + key-file: '/var/lib/etcd-secrets/etcd-client.key' + cert-file: '/var/lib/etcd-secrets/etcd-client.crt' + {{- end }} +{{- end }} + +{{- if hasKey .Values "conntrackGCInterval" }} + conntrack-gc-interval: {{ .Values.conntrackGCInterval | quote }} +{{- end }} + +{{- if hasKey .Values "disableEnvoyVersionCheck" }} + disable-envoy-version-check: {{ .Values.disableEnvoyVersionCheck | quote }} +{{- end }} + + # Identity allocation mode selects how identities are shared between cilium + # nodes by setting how they are stored. The options are "crd" or "kvstore". + # - "crd" stores identities in kubernetes as CRDs (custom resource definition). + # These can be queried with: + # kubectl get ciliumid + # - "kvstore" stores identities in an etcd kvstore, that is + # configured below. Cilium versions before 1.6 supported only the kvstore + # backend. Upgrades from these older cilium versions should continue using + # the kvstore by commenting out the identity-allocation-mode below, or + # setting it to "kvstore". + identity-allocation-mode: {{ .Values.identityAllocationMode }} +{{- if hasKey .Values "identityHeartbeatTimeout" }} + identity-heartbeat-timeout: "{{ .Values.identityHeartbeatTimeout }}" +{{- end }} +{{- if hasKey .Values "identityGCInterval" }} + identity-gc-interval: "{{ .Values.identityGCInterval }}" +{{- end }} +{{- if hasKey .Values.operator "endpointGCInterval" }} + cilium-endpoint-gc-interval: "{{ .Values.operator.endpointGCInterval }}" +{{- end }} + +{{- if hasKey .Values.operator "nodeGCInterval" }} + nodes-gc-interval: "{{ .Values.operator.nodeGCInterval | default "0s" }}" +{{- end }} + +{{- if hasKey .Values "disableEndpointCRD" }} + # Disable the usage of CiliumEndpoint CRD + disable-endpoint-crd: "{{ .Values.disableEndpointCRD }}" +{{- end }} + +{{- if hasKey .Values "identityChangeGracePeriod" }} + # identity-change-grace-period is the grace period that needs to pass + # before an endpoint that has changed its identity will start using + # that new identity. During the grace period, the new identity has + # already been allocated and other nodes in the cluster have a chance + # to whitelist the new upcoming identity of the endpoint. + identity-change-grace-period: {{ default "5s" .Values.identityChangeGracePeriod | quote }} +{{- end }} + +{{- if hasKey .Values "labels" }} + # To include or exclude matched resources from cilium identity evaluation + labels: {{ .Values.labels | quote }} +{{- end }} + + # If you want to run cilium in debug mode change this value to true + debug: {{ .Values.debug.enabled | quote }} + +{{- if hasKey .Values.debug "verbose" }} + debug-verbose: "{{ .Values.debug.verbose }}" +{{- end }} + +{{- if ne (int .Values.healthPort) 9876 }} + # Set the TCP port for the agent health status API. This is not the port used + # for cilium-health. + agent-health-port: "{{ .Values.healthPort }}" +{{- end }} + +{{- if hasKey .Values "clusterHealthPort" }} + # Set the TCP port for the agent health API. This port is used for cilium-health. + cluster-health-port: "{{ .Values.clusterHealthPort }}" +{{- end }} + +{{- if hasKey .Values "policyEnforcementMode" }} + # The agent can be put into the following three policy enforcement modes + # default, always and never. + # https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes + enable-policy: "{{ lower .Values.policyEnforcementMode }}" +{{- end }} + +{{- if .Values.prometheus.enabled }} + # If you want metrics enabled in all of your Cilium agents, set the port for + # which the Cilium agents will have their metrics exposed. + # This option deprecates the "prometheus-serve-addr" in the + # "cilium-metrics-config" ConfigMap + # NOTE that this will open the port on ALL nodes where Cilium pods are + # scheduled. + prometheus-serve-addr: ":{{ .Values.prometheus.port }}" + # Port to expose Envoy metrics (e.g. "9095"). Envoy metrics listener will be disabled if this + # field is not set. + {{- if .Values.proxy.prometheus.enabled }} + proxy-prometheus-port: "{{ .Values.proxy.prometheus.port }}" + {{- end }} + {{- if .Values.prometheus.metrics }} + # Metrics that should be enabled or disabled from the default metric + # list. (+metric_foo to enable metric_foo , -metric_bar to disable + # metric_bar). + metrics: {{- range .Values.prometheus.metrics }} + {{ . }} + {{- end }} + {{- end }} +{{- end }} + +{{- if .Values.operator.prometheus.enabled }} + # If you want metrics enabled in cilium-operator, set the port for + # which the Cilium Operator will have their metrics exposed. + # NOTE that this will open the port on the nodes where Cilium operator pod + # is scheduled. + operator-prometheus-serve-addr: ":{{ .Values.operator.prometheus.port }}" + enable-metrics: "true" +{{- end }} + +{{- if .Values.operator.skipCRDCreation }} + skip-crd-creation: "true" +{{- end }} + + # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 + # address. + enable-ipv4: {{ .Values.ipv4.enabled | quote }} + + # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 + # address. + enable-ipv6: {{ .Values.ipv6.enabled | quote }} + +{{- if .Values.cleanState }} + # If a serious issue occurs during Cilium startup, this + # invasive option may be set to true to remove all persistent + # state. Endpoints will not be restored using knowledge from a + # prior Cilium run, so they may receive new IP addresses upon + # restart. This also triggers clean-cilium-bpf-state. + clean-cilium-state: "true" +{{- end }} + +{{- if .Values.cleanBpfState }} + # If you want to clean cilium BPF state, set this to true; + # Removes all BPF maps from the filesystem. Upon restart, + # endpoints are restored with the same IP addresses, however + # any ongoing connections may be disrupted briefly. + # Loadbalancing decisions will be reset, so any ongoing + # connections via a service may be loadbalanced to a different + # backend after restart. + clean-cilium-bpf-state: "true" +{{- end }} + +{{- if hasKey .Values.cni "customConf" }} + # Users who wish to specify their own custom CNI configuration file must set + # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. + custom-cni-conf: "{{ .Values.cni.customConf }}" +{{- end }} + +{{- if hasKey .Values "bpfClockProbe" }} + enable-bpf-clock-probe: {{ .Values.bpfClockProbe | quote }} +{{- else if eq $defaultBpfClockProbe "true" }} + enable-bpf-clock-probe: {{ $defaultBpfClockProbe | quote }} +{{- end }} + +{{- if hasKey .Values.bpf "tproxy" }} + enable-bpf-tproxy: {{ .Values.bpf.tproxy | quote }} +{{- else if eq $defaultBpfTProxy "true" }} + enable-bpf-tproxy: {{ $defaultBpfTProxy | quote }} +{{- end }} + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: {{ .Values.bpf.monitorAggregation }} + + # The monitor aggregation interval governs the typical time between monitor + # notification events for each allowed connection. + # + # Only effective when monitor aggregation is set to "medium" or higher. + monitor-aggregation-interval: {{ .Values.bpf.monitorInterval }} + + # The monitor aggregation flags determine which TCP flags which, upon the + # first observation, cause monitor notifications to be generated. + # + # Only effective when monitor aggregation is set to "medium" or higher. + monitor-aggregation-flags: {{ .Values.bpf.monitorFlags }} + + + + +{{- if hasKey .Values.bpf "mapDynamicSizeRatio" }} + # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic + # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. + bpf-map-dynamic-size-ratio: {{ .Values.bpf.mapDynamicSizeRatio | quote }} +{{- else if ne $defaultBpfMapDynamicSizeRatio 0.0 }} + # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic + # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. + bpf-map-dynamic-size-ratio: {{ $defaultBpfMapDynamicSizeRatio | quote }} +{{- end }} + +{{- if hasKey .Values.bpf "hostRouting" }} + enable-host-legacy-routing: {{ .Values.bpf.hostRouting | quote }} +{{- end }} + +{{- if or $bpfCtTcpMax $bpfCtAnyMax }} + # bpf-ct-global-*-max specifies the maximum number of connections + # supported across all endpoints, split by protocol: tcp or other. One pair + # of maps uses these values for IPv4 connections, and another pair of maps + # use these values for IPv6 connections. + # + # If these values are modified, then during the next Cilium startup the + # tracking of ongoing connections may be disrupted. As a result, reply + # packets may be dropped and the load-balancing decisions for established + # connections may change. + # + # For users upgrading from Cilium 1.2 or earlier, to minimize disruption + # during the upgrade process, set bpf-ct-global-tcp-max to 1000000. +{{- if $bpfCtTcpMax }} + bpf-ct-global-tcp-max: {{ $bpfCtTcpMax | quote }} +{{- end }} +{{- if $bpfCtAnyMax }} + bpf-ct-global-any-max: {{ $bpfCtAnyMax | quote }} +{{- end }} +{{- end }} +{{- if hasKey .Values.bpf "natMax" }} + # bpf-nat-global-max specified the maximum number of entries in the + # BPF NAT table. + bpf-nat-global-max: "{{ .Values.bpf.natMax }}" +{{- end }} +{{- if hasKey .Values.bpf "neighMax" }} + # bpf-neigh-global-max specified the maximum number of entries in the + # BPF neighbor table. + bpf-neigh-global-max: "{{ .Values.bpf.neighMax }}" +{{- end }} +{{- if hasKey .Values.bpf "policyMapMax" }} + # bpf-policy-map-max specifies the maximum number of entries in endpoint + # policy map (per endpoint) + bpf-policy-map-max: "{{ .Values.bpf.policyMapMax }}" +{{- end }} +{{- if hasKey .Values.bpf "lbMapMax" }} + # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, + # backend and affinity maps. + bpf-lb-map-max: "{{ .Values.bpf.lbMapMax }}" +{{- end }} + # bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass + # optimization for nodeport reverse NAT handling. +{{- if hasKey .Values.bpf "lbBypassFIBLookup" }} + bpf-lb-bypass-fib-lookup: {{ .Values.bpf.lbBypassFIBLookup | quote }} +{{- end }} +{{- if hasKey .Values.bpf "lbExternalClusterIP" }} + bpf-lb-external-clusterip: {{ .Values.bpf.lbExternalClusterIP | quote }} +{{- end }} + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # As a result, reply packets may be dropped and the load-balancing decisions + # for established connections may change. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "{{ .Values.bpf.preallocateMaps }}" + + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "{{ .Values.proxy.sidecarImageRegex }}" + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: {{ .Values.cluster.name }} + +{{- if hasKey .Values.cluster "id" }} + # Unique ID of the cluster. Must be unique across all conneted clusters and + # in the range of 1 and 255. Only relevant when building a mesh of clusters. + cluster-id: "{{ .Values.cluster.id }}" +{{- end }} + + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve +{{- if .Values.gke.enabled }} + tunnel: "disabled" + enable-endpoint-routes: "true" + enable-local-node-route: "false" +{{- else }} + tunnel: {{ .Values.tunnel }} +{{- end }} + +{{- if hasKey .Values "tunnelPort" }} + tunnel-port: "{{ .Values.tunnelPort }}" +{{- end }} + +{{- if .Values.eni.enabled }} + enable-endpoint-routes: "true" + auto-create-cilium-node-resource: "true" +{{- if .Values.eni.updateEC2AdapterLimitViaAPI }} + update-ec2-adapter-limit-via-api: "true" +{{- end }} +{{- if .Values.eni.awsReleaseExcessIPs }} + aws-release-excess-ips: "true" +{{- end }} + ec2-api-endpoint: {{ .Values.eni.ec2APIEndpoint | quote }} + eni-tags: {{ .Values.eni.eniTags | toRawJson | quote }} + subnet-ids-filter: {{ .Values.eni.subnetIDsFilter | quote }} + subnet-tags-filter: {{ .Values.eni.subnetTagsFilter | quote }} +{{- end }} + +{{- if .Values.azure.enabled }} + enable-endpoint-routes: "true" + auto-create-cilium-node-resource: "true" + enable-local-node-route: "false" +{{- if .Values.azure.userAssignedIdentityID }} + azure-user-assigned-identity-id: {{ .Values.azure.userAssignedIdentityID | quote }} +{{- end }} +{{- end }} + +{{- if .Values.alibabacloud.enabled }} + enable-endpoint-routes: "true" + auto-create-cilium-node-resource: "true" +{{- end }} + +{{- if hasKey .Values "l7Proxy" }} + # Enables L7 proxy for L7 policy enforcement and visibility + enable-l7-proxy: {{ .Values.l7Proxy | quote }} +{{- end }} + +{{- if ne .Values.cni.chainingMode "none" }} + # Enable chaining with another CNI plugin + # + # Supported modes: + # - none + # - aws-cni + # - flannel + # - portmap (Enables HostPort support for Cilium) + cni-chaining-mode: {{ .Values.cni.chainingMode }} + +{{- if hasKey .Values "enableIdentityMark" }} + enable-identity-mark: {{ .Values.enableIdentityMark | quote }} +{{- else if (ne $enableIdentityMark "true") }} + enable-identity-mark: "false" +{{- end }} +{{- if ne .Values.cni.chainingMode "portmap" }} + # Disable the PodCIDR route to the cilium_host interface as it is not + # required. While chaining, it is the responsibility of the underlying plugin + # to enable routing. + enable-local-node-route: "false" +{{- end }} +{{- end }} + + enable-ipv4-masquerade: {{ .Values.enableIPv4Masquerade | quote }} + enable-ipv6-masquerade: {{ .Values.enableIPv6Masquerade | quote }} + +{{- if hasKey .Values.bpf "masquerade" }} + enable-bpf-masquerade: {{ .Values.bpf.masquerade | quote }} +{{- else if eq $defaultBpfMasquerade "true" }} + enable-bpf-masquerade: {{ $defaultBpfMasquerade | quote }} +{{- end }} +{{- if hasKey .Values "egressMasqueradeInterfaces" }} + egress-masquerade-interfaces: {{ .Values.egressMasqueradeInterfaces }} +{{- end }} +{{- if and .Values.ipMasqAgent .Values.ipMasqAgent.enabled }} + enable-ip-masq-agent: "true" +{{- end }} + +{{- if .Values.encryption.enabled }} + {{- if eq .Values.encryption.type "ipsec" }} + enable-ipsec: {{ .Values.encryption.enabled | quote }} + + {{- if and .Values.encryption.ipsec.mountPath .Values.encryption.ipsec.keyFile }} + ipsec-key-file: {{ .Values.encryption.ipsec.mountPath }}/{{ .Values.encryption.ipsec.keyFile }} + {{- else }} + ipsec-key-file: {{ .Values.encryption.mountPath }}/{{ .Values.encryption.keyFile }} + {{- end }} + {{- if .Values.encryption.ipsec.interface }} + encrypt-interface: {{ .Values.encryption.ipsec.interface }} + {{- else if .Values.encryption.interface }} + encrypt-interface: {{ .Values.encryption.interface }} + {{- end }} + + {{- if .Values.encryption.nodeEncryption }} + encrypt-node: {{ .Values.encryption.nodeEncryption | quote }} + {{- end }} + {{- else if eq .Values.encryption.type "wireguard" }} + enable-wireguard: {{ .Values.encryption.enabled | quote }} + {{- if .Values.encryption.wireguard.userspaceFallback }} + enable-wireguard-userspace-fallback: {{ .Values.encryption.wireguard.userspaceFallback | quote }} + {{- end }} + {{- end }} +{{- end }} + +{{- if hasKey .Values "datapathMode" }} +{{- if eq .Values.datapathMode "ipvlan" }} + datapath-mode: ipvlan + ipvlan-master-device: {{ .Values.ipvlan.masterDevice }} +{{- end }} +{{- end }} + + enable-xt-socket-fallback: {{ .Values.enableXTSocketFallback | quote }} + install-iptables-rules: {{ .Values.installIptablesRules | quote }} +{{- if or (.Values.azure.enabled) (.Values.eni.enabled) (.Values.gke.enabled) (ne .Values.cni.chainingMode "none") }} + install-no-conntrack-iptables-rules: "false" +{{- else }} + install-no-conntrack-iptables-rules: {{ .Values.installNoConntrackIptablesRules | quote }} +{{- end}} + +{{- if hasKey .Values "iptablesRandomFully" }} + iptables-random-fully: {{ .Values.iptablesRandomFully | quote }} +{{- end }} + +{{- if hasKey .Values "iptablesLockTimeout" }} + iptables-lock-timeout: {{ .Values.iptablesLockTimeout | quote }} +{{- end }} + + auto-direct-node-routes: {{ .Values.autoDirectNodeRoutes | quote }} + enable-bandwidth-manager: {{ .Values.bandwidthManager | quote }} + +{{- if hasKey .Values "localRedirectPolicy" }} + enable-local-redirect-policy: {{ .Values.localRedirectPolicy | quote }} +{{- end }} + +{{- if hasKey .Values "nativeRoutingCIDR" }} + ipv4-native-routing-cidr: {{ .Values.nativeRoutingCIDR }} +{{- else if hasKey .Values "ipv4NativeRoutingCIDR" }} + ipv4-native-routing-cidr: {{ .Values.ipv4NativeRoutingCIDR }} +{{- end }} + +{{- if hasKey .Values "fragmentTracking" }} + enable-ipv4-fragment-tracking: {{ .Values.fragmentTracking | quote }} +{{- else if (ne $fragmentTracking "true") }} + enable-ipv4-fragment-tracking: "false" +{{- end }} + +{{- if and .Values.hostFirewall .Values.hostFirewall.enabled }} + enable-host-firewall: {{ .Values.hostFirewall.enabled | quote }} +{{- end}} + +{{- if hasKey .Values "devices" }} + # List of devices used to attach bpf_host.o (implements BPF NodePort, + # host-firewall and BPF masquerading) + devices: {{ join " " .Values.devices | quote }} +{{- end }} + + kube-proxy-replacement: {{ $kubeProxyReplacement | quote }} +{{- if ne $kubeProxyReplacement "disabled" }} + kube-proxy-replacement-healthz-bind-address: {{ default "" .Values.kubeProxyReplacementHealthzBindAddr | quote}} +{{- end }} + +{{- if hasKey .Values "hostServices" }} +{{- if .Values.hostServices.enabled }} + enable-host-reachable-services: {{ .Values.hostServices.enabled | quote }} +{{- end }} +{{- if ne .Values.hostServices.protocols "tcp,udp" }} + host-reachable-services-protos: {{ .Values.hostServices.protocols }} +{{- end }} +{{- if hasKey .Values.hostServices "hostNamespaceOnly" }} + bpf-lb-sock-hostns-only: {{ .Values.hostServices.hostNamespaceOnly | quote }} +{{- end }} +{{- end }} +{{- if hasKey .Values "hostPort" }} +{{- if eq $kubeProxyReplacement "partial" }} + enable-host-port: {{ .Values.hostPort.enabled | quote }} +{{- end }} +{{- end }} +{{- if hasKey .Values "externalIPs" }} +{{- if eq $kubeProxyReplacement "partial" }} + enable-external-ips: {{ .Values.externalIPs.enabled | quote }} +{{- end }} +{{- end }} +{{- if hasKey .Values "nodePort" }} +{{- if eq $kubeProxyReplacement "partial" }} + enable-node-port: {{ .Values.nodePort.enabled | quote }} +{{- end }} +{{- if hasKey .Values.nodePort "range" }} + node-port-range: {{ .Values.nodePort.range | quote }} +{{- end }} +{{- if hasKey .Values.nodePort "directRoutingDevice" }} + direct-routing-device: {{ .Values.nodePort.directRoutingDevice | quote }} +{{- end }} +{{- if hasKey .Values.nodePort "enableHealthCheck" }} + enable-health-check-nodeport: {{ .Values.nodePort.enableHealthCheck | quote}} +{{- end }} + node-port-bind-protection: {{ .Values.nodePort.bindProtection | quote }} + enable-auto-protect-node-port-range: {{ .Values.nodePort.autoProtectPortRange | quote }} +{{- end }} +{{- if hasKey .Values "loadBalancer" }} +{{- if .Values.loadBalancer.standalone }} + datapath-mode: lb-only +{{- end }} +{{- if hasKey .Values.loadBalancer "mode" }} + bpf-lb-mode: {{ .Values.loadBalancer.mode | quote }} +{{- end }} +{{- if hasKey .Values.loadBalancer "algorithm" }} + bpf-lb-algorithm: {{ .Values.loadBalancer.algorithm | quote }} +{{- end }} +{{- if hasKey .Values.loadBalancer "acceleration" }} + bpf-lb-acceleration: {{ .Values.loadBalancer.acceleration | quote }} +{{- end }} +{{- if hasKey .Values.loadBalancer "dsrDispatch" }} + bpf-lb-dsr-dispatch: {{ .Values.loadBalancer.dsrDispatch | quote }} +{{- end }} +{{- if hasKey .Values.loadBalancer "serviceTopology" }} + enable-service-topology: {{ .Values.loadBalancer.serviceTopology | quote }} +{{- end }} + +{{- end }} +{{- if hasKey .Values.maglev "tableSize" }} + bpf-lb-maglev-table-size: {{ .Values.maglev.tableSize | quote}} +{{- end }} +{{- if hasKey .Values.maglev "hashSeed" }} + bpf-lb-maglev-hash-seed: {{ .Values.maglev.hashSeed | quote}} +{{- end }} +{{- if .Values.sessionAffinity }} + enable-session-affinity: {{ .Values.sessionAffinity | quote }} +{{- else if eq $defaultSessionAffinity "true" }} + enable-session-affinity: {{ $defaultSessionAffinity | quote }} +{{- end }} +{{- if .Values.svcSourceRangeCheck }} + enable-svc-source-range-check: {{ .Values.svcSourceRangeCheck | quote }} +{{- end }} + +{{- if hasKey .Values "l2NeighDiscovery" }} +{{- if hasKey .Values.l2NeighDiscovery "enabled" }} + enable-l2-neigh-discovery: {{ .Values.l2NeighDiscovery.enabled | quote }} +{{- end }} +{{- if hasKey .Values.l2NeighDiscovery "refreshPeriod" }} + arping-refresh-period: {{ .Values.l2NeighDiscovery.refreshPeriod | quote }} +{{- end }} +{{- end }} + +{{- if and .Values.pprof .Values.pprof.enabled }} + pprof: {{ .Values.pprof.enabled | quote }} +{{- end }} +{{- if .Values.logSystemLoad }} + log-system-load: {{ .Values.logSystemLoad | quote }} +{{- end }} +{{- if .Values.logOptions }} + log-opt: {{ .Values.logOptions | toJson | quote }} +{{- end }} +{{- if and .Values.sockops .Values.sockops.enabled }} + sockops-enable: {{ .Values.sockops.enabled | quote }} +{{- end }} +{{- if hasKey .Values.k8s "requireIPv4PodCIDR" }} + k8s-require-ipv4-pod-cidr: {{ .Values.k8s.requireIPv4PodCIDR | quote }} +{{- end }} +{{- if hasKey .Values.k8s "requireIPv6PodCIDR" }} + k8s-require-ipv6-pod-cidr: {{ .Values.k8s.requireIPv6PodCIDR | quote }} +{{- end }} +{{- if .Values.endpointStatus.enabled }} + endpoint-status: {{ required "endpointStatus.status required: policy, health, controllers, logs and / or state. For 2 or more options use a comma: \"policy, health\"" .Values.endpointStatus.status | quote }} +{{- end }} +{{- if and .Values.endpointRoutes .Values.endpointRoutes.enabled }} + enable-endpoint-routes: {{ .Values.endpointRoutes.enabled | quote }} +{{- end }} +{{- if .Values.cni.configMap }} + read-cni-conf: {{ .Values.cni.confFileMountPath }}/{{ .Values.cni.configMapKey }} + write-cni-conf-when-ready: {{ .Values.cni.hostConfDirMountPath }}/05-cilium.conflist +{{- else if .Values.cni.readCniConf }} + read-cni-conf: {{ .Values.cni.readCniConf }} +{{- end }} +{{- if .Values.kubeConfigPath }} + k8s-kubeconfig-path: {{ .Values.kubeConfigPath | quote }} +{{- end }} +{{- if and ( .Values.endpointHealthChecking.enabled ) (or (eq .Values.cni.chainingMode "portmap") (eq .Values.cni.chainingMode "none")) }} + enable-endpoint-health-checking: "true" +{{- else}} + # Disable health checking, when chaining mode is not set to portmap or none + enable-endpoint-health-checking: "false" +{{- end }} +{{- if hasKey .Values "healthChecking" }} + enable-health-checking: {{ .Values.healthChecking | quote }} +{{- end }} +{{- if or .Values.wellKnownIdentities.enabled .Values.etcd.managed }} + enable-well-known-identities: "true" +{{- else }} + enable-well-known-identities: "false" +{{- end }} + enable-remote-node-identity: {{ .Values.remoteNodeIdentity | quote }} + +{{- if hasKey .Values "synchronizeK8sNodes" }} + synchronize-k8s-nodes: {{ .Values.synchronizeK8sNodes | quote }} +{{- end }} + +{{- if hasKey .Values "policyAuditMode" }} + policy-audit-mode: {{ .Values.policyAuditMode | quote }} +{{- end }} + +{{- if ne $defaultOperatorApiServeAddr "localhost:9234" }} + operator-api-serve-addr: {{ $defaultOperatorApiServeAddr | quote }} +{{- end }} + +{{- if .Values.hubble.enabled }} + # Enable Hubble gRPC service. + enable-hubble: {{ .Values.hubble.enabled | quote }} + # UNIX domain socket for Hubble server to listen to. + hubble-socket-path: {{ .Values.hubble.socketPath | quote }} +{{- if hasKey .Values.hubble "eventQueueSize" }} + # Buffer size of the channel for Hubble to receive monitor events. If this field is not set, + # the buffer size is set to the default monitor queue size. + hubble-event-queue-size: {{ .Values.hubble.eventQueueSize | quote }} +{{- end }} +{{- if hasKey .Values.hubble "flowBufferSize" }} + # DEPRECATED: this block should be removed in 1.11 + hubble-flow-buffer-size: {{ .Values.hubble.flowBufferSize | quote }} +{{- end }} +{{- if hasKey .Values.hubble "eventBufferCapacity" }} + # Capacity of the buffer to store recent events. + hubble-event-buffer-capacity: {{ .Values.hubble.eventBufferCapacity | quote }} +{{- end }} +{{- if .Values.hubble.metrics.enabled }} + # Address to expose Hubble metrics (e.g. ":7070"). Metrics server will be disabled if this + # field is not set. + hubble-metrics-server: ":{{ .Values.hubble.metrics.port }}" + # A space separated list of metrics to enable. See [0] for available metrics. + # + # https://github.com/cilium/hubble/blob/master/Documentation/metrics.md + hubble-metrics: {{- range .Values.hubble.metrics.enabled }} + {{.}} +{{- end }} +{{- end }} +{{- if hasKey .Values.hubble "listenAddress" }} + # An additional address for Hubble server to listen to (e.g. ":4244"). + hubble-listen-address: {{ .Values.hubble.listenAddress | quote }} +{{- if .Values.hubble.tls.enabled }} + hubble-disable-tls: "false" + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt +{{- else }} + hubble-disable-tls: "true" +{{- end }} +{{- end }} +{{- end }} +{{- if hasKey .Values "disableIptablesFeederRules" }} + # A space separated list of iptables chains to disable when installing feeder rules. + disable-iptables-feeder-rules: {{ .Values.disableIptablesFeederRules | join " " | quote }} +{{- end }} + ipam: {{ $ipam | quote }} + +{{- if eq $ipam "cluster-pool" }} +{{- if .Values.ipv4.enabled }} + {{- if .Values.ipam.operator.clusterPoolIPv4PodCIDRList }} + cluster-pool-ipv4-cidr: {{ .Values.ipam.operator.clusterPoolIPv4PodCIDRList | join " " | quote }} + {{- else }} + cluster-pool-ipv4-cidr: {{ .Values.ipam.operator.clusterPoolIPv4PodCIDR | quote }} + {{- end }} + cluster-pool-ipv4-mask-size: {{ .Values.ipam.operator.clusterPoolIPv4MaskSize | quote }} +{{- end }} +{{- if .Values.ipv6.enabled }} + {{- if .Values.ipam.operator.clusterPoolIPv6PodCIDRList }} + cluster-pool-ipv6-cidr: {{ .Values.ipam.operator.clusterPoolIPv6PodCIDRList | join " " | quote }} + {{- else }} + cluster-pool-ipv6-cidr: {{ .Values.ipam.operator.clusterPoolIPv6PodCIDR | quote }} + {{- end }} + cluster-pool-ipv6-mask-size: {{ .Values.ipam.operator.clusterPoolIPv6MaskSize | quote }} +{{- end }} +{{- end }} + +{{- if .Values.enableCnpStatusUpdates }} + disable-cnp-status-updates: {{ (not .Values.enableCnpStatusUpdates) | quote }} +{{- else if (eq $defaultEnableCnpStatusUpdates "false") }} + disable-cnp-status-updates: "true" +{{- end }} + +{{- if .Values.egressGateway.enabled }} + enable-ipv4-egress-gateway: "true" +{{- end }} + +{{- if .Values.enableK8sEventHandover }} + enable-k8s-event-handover: "true" +{{- end }} + +{{- if hasKey .Values "crdWaitTimeout" }} + crd-wait-timeout: {{ .Values.crdWaitTimeout | quote }} +{{- else if ( ne $crdWaitTimeout "5m" ) }} + crd-wait-timeout: {{ $crdWaitTimeout | quote }} +{{- end }} + +{{- if .Values.enableK8sEndpointSlice }} + enable-k8s-endpoint-slice: {{ .Values.enableK8sEndpointSlice | quote }} +{{- end }} + +{{- if hasKey .Values.k8s "serviceProxyName" }} + # Configure service proxy name for Cilium. + k8s-service-proxy-name: {{ .Values.k8s.serviceProxyName | quote }} +{{- end }} + +{{- if and .Values.customCalls .Values.customCalls.enabled }} + # Enable tail call hooks for custom eBPF programs. + enable-custom-calls: {{ .Values.customCalls.enabled | quote }} +{{- end }} + +{{- if and .Values.bgp.enabled (and (not .Values.bgp.announce.loadbalancerIP) (not .Values.bgp.announce.podCIDR)) }} + {{ fail "BGP was enabled, but no announcements were enabled. Please enable one or more announcements." }} +{{- end }} + +{{- if and .Values.bgp.enabled .Values.bgp.announce.loadbalancerIP }} + bgp-announce-lb-ip: {{ .Values.bgp.announce.loadbalancerIP | quote }} +{{- end }} + +{{- if and .Values.bgp.enabled .Values.bgp.announce.podCIDR }} + bgp-announce-pod-cidr: {{ .Values.bgp.announce.podCIDR | quote }} +{{- end}} + +{{- if hasKey .Values.cgroup "hostRoot" }} + cgroup-root: {{ .Values.cgroup.hostRoot | quote }} +{{- end }} + +{{- if hasKey .Values.bpf "vlanBypass" }} + # A space separated list of explicitly allowed vlan id's + vlan-bpf-bypass: {{ .Values.bpf.vlanBypass | join " " | quote }} +{{- end }} + +{{- if .Values.enableCiliumEndpointSlice }} + enable-cilium-endpoint-slice: "true" +{{- end }} + +{{- if hasKey .Values "enableK8sTerminatingEndpoint" }} + enable-k8s-terminating-endpoint: {{ .Values.enableK8sTerminatingEndpoint | quote }} +{{- end }} + +{{- if .Values.annotateK8sNode }} + annotate-k8s-node: "true" +{{- end }} + +{{- if .Values.operator.removeNodeTaints }} + remove-cilium-node-taints: "true" +{{- end }} +{{- if .Values.operator.setNodeNetworkStatus }} + set-cilium-is-up-condition: "true" +{{- end }} + +{{- if .Values.operator.unmanagedPodWatcher.restart }} + unmanaged-pod-watcher-interval: {{ .Values.operator.unmanagedPodWatcher.intervalSeconds | quote }} +{{- else }} + unmanaged-pod-watcher-interval: "0" +{{- end }} + +{{- if .Values.extraConfig }} + {{ toYaml .Values.extraConfig | nindent 2 }} +{{- end }} +{{- end }} diff --git a/packages/rke2-cilium/generated-changes/patch/Chart.yaml.patch b/packages/rke2-cilium/generated-changes/patch/Chart.yaml.patch index 1d22176..c4164e6 100644 --- a/packages/rke2-cilium/generated-changes/patch/Chart.yaml.patch +++ b/packages/rke2-cilium/generated-changes/patch/Chart.yaml.patch @@ -2,10 +2,10 @@ +++ charts/Chart.yaml @@ -71,8 +71,7 @@ apiVersion: v2 - appVersion: 1.11.4 + appVersion: 1.11.5 description: eBPF-based Networking, Security, and Observability -home: https://cilium.io/ --icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.11.4/Documentation/images/logo-solo.svg +-icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.11.5/Documentation/images/logo-solo.svg +home: https://docs.rke2.io/ keywords: - BPF @@ -19,4 +19,4 @@ sources: -- https://github.com/cilium/cilium +- https://github.com/rancher/rke2-charts - version: 1.11.4 + version: 1.11.5 diff --git a/packages/rke2-cilium/generated-changes/patch/templates/cilium-configmap.yaml.patch b/packages/rke2-cilium/generated-changes/patch/templates/cilium-configmap.yaml.patch index 0022f59..c6654b8 100644 --- a/packages/rke2-cilium/generated-changes/patch/templates/cilium-configmap.yaml.patch +++ b/packages/rke2-cilium/generated-changes/patch/templates/cilium-configmap.yaml.patch @@ -1,6 +1,6 @@ --- charts-original/templates/cilium-configmap.yaml +++ charts/templates/cilium-configmap.yaml -@@ -195,7 +195,11 @@ +@@ -199,7 +199,11 @@ # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 # address. diff --git a/packages/rke2-cilium/generated-changes/patch/values.yaml.patch b/packages/rke2-cilium/generated-changes/patch/values.yaml.patch index a2e8d31..1db93a6 100644 --- a/packages/rke2-cilium/generated-changes/patch/values.yaml.patch +++ b/packages/rke2-cilium/generated-changes/patch/values.yaml.patch @@ -6,10 +6,10 @@ override: ~ - repository: quay.io/cilium/cilium + repository: rancher/mirrored-cilium-cilium - tag: v1.11.4 + tag: v1.11.5 pullPolicy: IfNotPresent - # cilium-digest -- digest: "sha256:d9d4c7759175db31aa32eaa68274bb9355d468fbc87e23123c80052e3ed63116" +- digest: "sha256:79e66c3c2677e9ecc3fd5b2ed8e4ea7e49cf99ed6ee181f2ef43400c4db5eef0" - useDigest: true + useDigest: false @@ -28,7 +28,7 @@ podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: -@@ -569,7 +561,7 @@ +@@ -572,7 +564,7 @@ hubble: # -- Enable Hubble (true by default). @@ -37,7 +37,7 @@ # -- Buffer size of the channel Hubble uses to receive monitor events. If this # value is not set, the queue size is set to the default monitor queue size. -@@ -705,7 +697,8 @@ +@@ -716,7 +708,8 @@ # -- Node labels for pod assignment # ref: https://kubernetes.io/docs/user-guide/node-selection/ @@ -47,7 +47,7 @@ # -- Annotations to be added to hubble-relay pods podAnnotations: {} -@@ -878,7 +871,8 @@ +@@ -889,7 +882,8 @@ # -- Node labels for pod assignment # ref: https://kubernetes.io/docs/user-guide/node-selection/ @@ -57,7 +57,7 @@ # -- Node tolerations for pod assignment on nodes with taints # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ -@@ -939,7 +933,7 @@ +@@ -950,7 +944,7 @@ ipam: # -- Configure IP Address Management mode. # ref: https://docs.cilium.io/en/stable/concepts/networking/ipam/ @@ -66,7 +66,7 @@ operator: # -- Deprecated in favor of ipam.operator.clusterPoolIPv4PodCIDRList. # IPv4 CIDR range to delegate to individual nodes for IPAM. -@@ -1136,7 +1130,7 @@ +@@ -1147,7 +1141,7 @@ # -- Configure prometheus metrics on the configured port at /metrics prometheus: @@ -75,7 +75,7 @@ port: 9090 serviceMonitor: # -- Enable service monitors. -@@ -1265,7 +1259,8 @@ +@@ -1276,7 +1270,8 @@ # -- Node labels for cilium-etcd-operator pod assignment # ref: https://kubernetes.io/docs/user-guide/node-selection/ @@ -85,27 +85,27 @@ # -- Annotations to be added to cilium-etcd-operator pods podAnnotations: {} -@@ -1326,17 +1321,9 @@ +@@ -1337,17 +1332,9 @@ # -- cilium-operator image. image: override: ~ - repository: quay.io/cilium/operator + repository: rancher/mirrored-cilium-operator - tag: v1.11.4 + tag: v1.11.5 - # operator-generic-digest -- genericDigest: "sha256:bf75ad0dc47691a3a519b8ab148ed3a792ffa2f1e309e6efa955f30a40e95adc" +- genericDigest: "sha256:8ace281328b27d4216218c604d720b9a63a8aec2bd1996057c79ab0168f9d6d8" - # operator-azure-digest -- azureDigest: "sha256:e507e3ece5a3cb45daf2e879bd3888681f297c76523661551ecdd38d8c46c798" +- azureDigest: "sha256:e6565a0bafbd6a6c45c2467010b6a3032e32db9889083214e988e2706e84816e" - # operator-aws-digest -- awsDigest: "sha256:3ef7a13d962d977815d12846c04fade989195722ba6628bffaf865fb46d4b6a0" +- awsDigest: "sha256:8ff67ee754ef752af98d41819fb25261f9506872f47d17c6a552ed5c5e063d2d" - # operator-alibabacloud-digest -- alibabacloudDigest: "sha256:bb6eb1d389d87d435b6ddb7c32955459ccdea15d6542da2bcb38738c8dca15d9" +- alibabacloudDigest: "sha256:063956884c549d8d5e5f540fb84d39c7ba62b02cb52f5f7297c46652379963a7" - useDigest: true + useDigest: false pullPolicy: IfNotPresent suffix: "" -@@ -1405,7 +1392,8 @@ +@@ -1416,7 +1403,8 @@ # -- Node labels for cilium-operator pod assignment # ref: https://kubernetes.io/docs/user-guide/node-selection/ @@ -115,7 +115,7 @@ # -- Annotations to be added to cilium-operator pods podAnnotations: {} -@@ -1445,7 +1433,7 @@ +@@ -1459,7 +1447,7 @@ # -- Enable prometheus metrics for cilium-operator on the configured port at # /metrics prometheus: @@ -124,7 +124,7 @@ port: 6942 serviceMonitor: # -- Enable service monitors. -@@ -1462,12 +1450,12 @@ +@@ -1490,12 +1478,12 @@ nodeinit: # -- Enable the node initialization DaemonSet @@ -139,7 +139,7 @@ tag: 62bfbe88c17778aad7bef9fa57ff9e2d4a9ba0d8 pullPolicy: IfNotPresent -@@ -1510,7 +1498,8 @@ +@@ -1538,7 +1526,8 @@ # -- Node labels for nodeinit pod assignment # ref: https://kubernetes.io/docs/user-guide/node-selection/ @@ -149,31 +149,7 @@ # -- Annotations to be added to node-init pods. podAnnotations: {} -@@ -1546,11 +1535,9 @@ - # -- Cilium pre-flight image. - image: - override: ~ -- repository: quay.io/cilium/cilium -+ repository: rancher/mirrored-cilium-cilium - tag: v1.11.4 -- # cilium-digest -- digest: "sha256:d9d4c7759175db31aa32eaa68274bb9355d468fbc87e23123c80052e3ed63116" -- useDigest: true -+ useDigest: false - pullPolicy: IfNotPresent - - # -- The priority class to use for the preflight pod. -@@ -1600,7 +1587,8 @@ - - # -- Node labels for preflight pod assignment - # ref: https://kubernetes.io/docs/user-guide/node-selection/ -- nodeSelector: {} -+ nodeSelector: -+ kubernetes.io/os: linux - - # -- Annotations to be added to preflight pods - podAnnotations: {} -@@ -1840,3 +1828,6 @@ +@@ -1868,3 +1857,6 @@ # -- Configure whether to enable auto detect of terminating state for endpoints # in order to support graceful termination. enableK8sTerminatingEndpoint: true diff --git a/packages/rke2-cilium/package.yaml b/packages/rke2-cilium/package.yaml index c2684ba..0c6284b 100644 --- a/packages/rke2-cilium/package.yaml +++ b/packages/rke2-cilium/package.yaml @@ -1,2 +1,2 @@ -url: https://helm.cilium.io/cilium-1.11.4.tgz +url: https://helm.cilium.io/cilium-1.11.5.tgz packageVersion: 01