From d1cbb21e34f379f7ee31e446200e3bc0d264326a Mon Sep 17 00:00:00 2001 From: Christophe Jauffret Date: Thu, 17 Feb 2022 11:00:13 +0100 Subject: [PATCH 01/13] update nutanix CSI to 2.5.1 --- .../generated-changes/patch/Chart.yaml.patch | 2 +- .../nutanix-csi-storage/generated-changes/patch/README.md.patch | 2 +- packages/nutanix-csi-storage/package.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/nutanix-csi-storage/generated-changes/patch/Chart.yaml.patch b/packages/nutanix-csi-storage/generated-changes/patch/Chart.yaml.patch index 5c7dfe1d4..32a213f91 100644 --- a/packages/nutanix-csi-storage/generated-changes/patch/Chart.yaml.patch +++ b/packages/nutanix-csi-storage/generated-changes/patch/Chart.yaml.patch @@ -8,5 +8,5 @@ + catalog.cattle.io/release-name: nutanix-csi-storage + catalog.cattle.io/display-name: Nutanix CSI Storage apiVersion: v1 - appVersion: 2.5.0 + appVersion: 2.5.1 description: Nutanix Container Storage Interface (CSI) Driver diff --git a/packages/nutanix-csi-storage/generated-changes/patch/README.md.patch b/packages/nutanix-csi-storage/generated-changes/patch/README.md.patch index 733224d8c..38825b1ac 100644 --- a/packages/nutanix-csi-storage/generated-changes/patch/README.md.patch +++ b/packages/nutanix-csi-storage/generated-changes/patch/README.md.patch @@ -1,6 +1,6 @@ --- charts-original/README.md +++ charts/README.md -@@ -41,6 +41,7 @@ +@@ -43,6 +43,7 @@ - Kubernetes 1.17 or later - Kubernetes worker nodes must have the iSCSI package installed (Nutanix Volumes mode) and/or NFS tools (Nutanix Files mode) - This chart have been validated on RHEL/CentOS 7/8 and Ubuntu 18.04/20.04/21.04/21.10, but the new architecture enables easy portability to other distributions. diff --git a/packages/nutanix-csi-storage/package.yaml b/packages/nutanix-csi-storage/package.yaml index 522582876..9dfb5d063 100644 --- a/packages/nutanix-csi-storage/package.yaml +++ b/packages/nutanix-csi-storage/package.yaml @@ -1,2 +1,2 @@ -url: https://github.com/nutanix/helm/releases/download/nutanix-csi-storage-2.5.0/nutanix-csi-storage-2.5.0.tgz +url: https://github.com/nutanix/helm/releases/download/nutanix-csi-storage-2.5.1/nutanix-csi-storage-2.5.1.tgz packageVersion: 00 \ No newline at end of file From e1cb1e944d48ae4d8e8ff262bffd043098c16093 Mon Sep 17 00:00:00 2001 From: Christophe Jauffret Date: Thu, 17 Feb 2022 11:01:34 +0100 Subject: [PATCH 02/13] Result of running `make charts` --- .../nutanix-csi-storage-2.5.100.tgz | Bin 0 -> 9571 bytes .../nutanix-csi-storage/2.5.100/.helmignore | 21 ++ .../nutanix-csi-storage/2.5.100/Chart.yaml | 37 ++++ .../nutanix-csi-storage/2.5.100/README.md | 182 ++++++++++++++++++ .../nutanix-csi-storage/2.5.100/app-readme.md | 1 + .../nutanix-csi-storage/2.5.100/questions.yml | 123 ++++++++++++ .../2.5.100/templates/NOTES.txt | 3 + .../2.5.100/templates/_helpers.tpl | 43 +++++ .../2.5.100/templates/csi-driver.yaml | 11 ++ .../2.5.100/templates/ntnx-csi-node-ds.yaml | 146 ++++++++++++++ .../templates/ntnx-csi-provisioner-sts.yaml | 150 +++++++++++++++ .../2.5.100/templates/ntnx-csi-rbac.yaml | 130 +++++++++++++ .../2.5.100/templates/ntnx-csi-scc.yaml | 30 +++ .../2.5.100/templates/ntnx-sc.yaml | 82 ++++++++ .../2.5.100/templates/ntnx-secret.yaml | 11 ++ .../templates/service-prometheus-csi.yaml | 46 +++++ .../nutanix-csi-storage/2.5.100/values.yaml | 119 ++++++++++++ index.yaml | 41 ++++ 18 files changed, 1176 insertions(+) create mode 100644 assets/nutanix-csi-storage/nutanix-csi-storage-2.5.100.tgz create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/.helmignore create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/Chart.yaml create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/README.md create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/app-readme.md create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/questions.yml create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/NOTES.txt create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/_helpers.tpl create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/csi-driver.yaml create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-node-ds.yaml create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-provisioner-sts.yaml create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-rbac.yaml create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-scc.yaml create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-sc.yaml create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-secret.yaml create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/service-prometheus-csi.yaml create mode 100644 charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/values.yaml diff --git a/assets/nutanix-csi-storage/nutanix-csi-storage-2.5.100.tgz b/assets/nutanix-csi-storage/nutanix-csi-storage-2.5.100.tgz new file mode 100644 index 0000000000000000000000000000000000000000..fea4e776ae710cb374deb546bbe2b28468cf9cb5 GIT binary patch literal 9571 zcmV-pC7jwHiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBxcigt|X#eJ?n7d9JJ3nzBk}SJ>s`e_9QtQ-~b-a%I>&eNn zuq1XD6$vl^xYE}0{p|PPMS_og=wZ1@#Gb}05*Q2ygTY{CfT6iWlw5_8Afb?qqbcqk z&JdT~1xk}Y+{@GN_xpRhyZYaLzhD1%dwXZ^4};z9{?6`TZ)a!k5B{+Wa}_ewDrG3TKvOJwGn}M73%^$p9Ta6^ z#$*5xA&C+;?M6t-1ZxJs3LL@l5HiFi#k{qc9rmy}dgZt`YjK=lB(Sa3BF+Mokq?*) z!srm@g8*e&@u%Y0K@ejRags^>s%V$lA#tt436+>nP=w*>;ppU<<*8uCRKt#PQ=*Bc z7{}2`5iKB*0$$K}jwIq@yK9Q)0&_J>FqOJy-rF1OJ%73P{BL4^yWjuQe1A8%z>7=9 zV=)Xuhhq>HlnKFbng~Mpfh780`|U76LjCy;$FGnKLO8@!z8wW2{5a03%+;IGNpO*m zaY2`F|GE!@?%+kYANIcueD(~&^(;D`j zITrV)0IXU6cV9do)Yt!=-QCCa{~?|yaD*o)Pb8R8hY90tbF&*ff%h{afCxbFuY)(Q z!wKUlk`l)-A&D+ouODXaOmUaquK~0%QV-c_pw^0F~ANml!TE&OpvEup|bKmGHn2GbOFSDK)w~ zY6*#r0-$6MJdIddF7bjqQ@B%%V~eie4x`CJp^#@&j$#ams(uhjka|yrL`i~UxFm80 zO3;H{D`AW$jAPjDZg;N&(}5vOP$IDEo z#u=tDrg96U8VEgJ>MO?r^Er+KCWb&6)eR_do?v-IIG`k9mjc)vb53H_h$7mI2?+^{ z44K%()%aGZal)LU1cXEqcY_kv5IVg%;=P27d-iQdwf6UfjZp$4+a*nPlq8U`SOfuO zF&^OrN0RX&eEJ+nmSFA_ke_}INQ$QTEKia%mJmg!lgTNQXR2dV2AVu(3D#>_m2%OH zF@bf9mopO0G~>Wivm9>e#c&Ib78Io<(tkYz%hHhs=8b#AF_JhA=DMzK%MF?L%BZCs zH%H8uP7tU`P^nYAsMS#KJ3$Z|oPMZRtG}<+wx2DKpq#%eXmo@l5@Wbj(JC`~wrO={hB&s9R0)y^cmo2AL!D@9UU<_u2r`AoF^$g{p>ha=epmf(5Cls1 zG8zerQ$hnJtCXTuXehniJO~7iIF@E8sH9h+d9k9W$5%w?89g#3tthid>7fP$6kmb? zWyXRs>2sp_*FYHBo&-+{@{CGq+F>oU)?qQ86Y+kL;UQd2L=YtN)Kqrdmdw*5BGl$- zHwY+}myBPG@U++@c`{26gx{v(f>%tG#VK5y}XB@hM_-LZ-QS9XwIv#OEZ!DWgOQn*ab)wGzdy9{>p&<3w+E0qmM4 zW-L!)NRfPi`BDV8&wi?04~@lu2+%Z8wqQ6tQC z_s&=h8IH(=L_Rr!jI%jW`Y+aqYoR!VUvjii`7EvE zKYxDQ=zpze_5A-h!%2p@=*sL4rhs+!|DEUCwfX;I_r>G>|3RKlpL*ZHoTNjonJN`b zE;78IDzZc~JcRFhpFanxRB(KiDOF6VI(i$;Ci-`zy+wg77b?Az&?N%Ih>ox}0lFWI@ky0@Yj&&{u9fObBiP%~KS?^uClhi7ov;LkX{>)6To1M445(;hk|)VR zDL#p|L<7n)-HUGUPi&y;@=~>-dL&?sBa{maf~6S#k&iK_SYl!NJ|Q@Xh1w%QLQ*2l z9#ZeipN>_VILC+~p?ODSu_?=i!czd!$XjeTBC{%A zB{i88lyx5q>bf#Z3v5JwwV!c+e51QE+~0hJJv=lsCVvl^?Ew<1A4N zaOZzI(D~T8{XDB#*fzx6S0`E+)mr`3mzu$@I-rptzQbs`D7 z>-7KK-RITyf4e{E?>(;n5AnD;cZf1HCJB)Qi|)bMi4_u}`w9u@xEZT9oP_n}gWloj z#B3cpN`3cTtc|keY+YA!j3pWrbN2J+;DXS22t`{#iY1DXM8m?%c;hXLT8nk0gH#I6 zJN!#dxNjcHSbRb!>@9u6a%zokUwe?g;In%EJIj!@*WA)4xO)B%2K^WP+Wa33wx2)F z|A%;PoJ?mDw)@+I!km3VBW<(BS18RArHvYsQ$EI#Oss_(!U_CEX}urS&f%|t zov=p;r;LuUtW0;5WkQLA<|HmSO!HfoHj2(@q3iil4=3=NiHINyKXc&CVf;+SJ0_P=?0 zaCG$Ucr<#}C}!P@MeihGG|Z5k?SJ$1=-tVO<98q59sm8~=;39RYCWN*s#_CDyrDE7}wr3ZHuy z6i^?9P8ebuXN*v}|IO3G(aFc-)1$MuC#Ub9b(%#LP9)y{=BeiRlcTmW<^0*d=~U}% zB1wE4+eJW5$}Ti|CS|^x#+4jtpfdbCokG7vsi?>;A#NN){t6_ zA}T^KkTWC!(S-q{Lm=`D2vv)}u}&8H1R#zZkTT9PG({4}FhoRZ1l6t9%AB#b zu_+_qEXAFPSOe9eSUH6WXQ`oBE(o1MG*jU2LbiK`?R%Mr2hbVoQrixG|( zjfI-8l`@i~nB}tgyi;BMFefZ7K5kb&P7q0Qj^EEX7BiN_eq}c&U5`WUGv81ateWhw zN>Z%Q&MUtA2hh-cZG6>I6^51Z+aDGT_vuq;!XKYjbOt|!1g=X@fLF$SepVt{2|OuK zP)$hoFO(+Gg=?Th$3enNSI@H;t-#2OU}-_f`|9E2N4?w@9mhm1JGd2^qmm$+x2lZ3 za%FJ4i;ouWqN=Y-9S3%#g$(111~#p{dKWxyzROvK?^|xX4Nbh4EvUD_{YDgPU8q*I zT3PbK6>HT>Ani-vI%Hc`wUw=`1pc+{R*%p9JR917z3947$Q#;$*V%t}2etE`+uJYp z9{oQL@_Z5d??@tvCwX!+3$V{)cQFKOo9pQYU^&hb5+S8*-pGQy9O5kv$;$z*wUWEtNR9D!I?M7jLuP++k6iF}-<&0w#i~Ye~rG(wyp*R+B_Mvz7 z;jkD%wGp-)Z*;lB*3f7JUrCNaQ#f?>_ZucTRn)4W;Y8?|D|{t)+Yll)LPX)f62+v| zaCjFDzk$a7=SG>Th5nX?b9W?2?H|Kr2Mcba@H)9Gi{as@UT1TMP`Dg>d zH=B#L@PD^YXVu(HFp4n`wGbwZ?mK^c`8W;A#lF7 zHSXb`4eh^tjG|i~0az3NzqhwHsK@_5A3VnYKFCwG{|;U@?7xTp6$>DlJ=v?{Lj2Re z_JV_(n~cLStY6V)t;iK?=X#^@hK6F*o;_EP*n zorDPe?=QvJ$q(uX<@ zQG!UiF(S6nJGIp_QE&AoH|S5*Hqtui-laVol6S+t;5nvEZdx%?oAjsM=+oA`Wf)uG zj{UoJyZ68+w{Lqj-`p#C%@vc4rpm!x)9-SkehIX0PKdP)MS?69ZY>Sq#97v8iYvfB3UlpXtu29H6PjOn zZq0guJj={IrYNy1N3&2aY$RC-7B3r~hHQA+GC?_Ps;RdE(Gb+u%=~!O%MG3lnqU96~Ip@vHHrFYUpcmRZB}7`ev%~@+S|G zmiwi_DyK#8P%V9pd35gvN7G`8Ubl{C=78GJAuNEokmw zwgz*Tt3!R)&c-VJD#7-8s}nXTSYAl&NU=Gf^*iHMXjCUfzq>YVwJ9#hRh83B$wCM8ts+Lw+a&eXEEFhytfOOMl8lL=p;@d$%f{KcAe_1 zO+%NpsSCQ^ad+As0*+=3LJFNhzx#5h+wcB)F#PlGZr|2P7p_(Zu5D8Rzy&rD#pOlX zrqUn9P+zEB1k!p5rHZ#Mn^alMHlbSB+r>1hC_Z0%^w-UQYmN=e)JCe{O+~>v|Ic1M z|MT|t?(U1n`Tr2l>hu3_(`e2o!O8%JB|cqyfH}16_GJS&j7`P*X7%-S_Yu<3JUpMd zLn-diwaFNLR)5_*fl}TMjit^PN7-pK#ke~kZskmm`UAt^DZ zI#;h5bhw;h3get4I$NyE{$8X2E{@%c8BP+ICTwi@oY3hOaGaPd^%;^g?>(Zij{YiNIZ z@9)&}|LyMXJm&v;kmpI|5Rc0tY_kHs!bxf}5(UANCvZX~XK}9YA_#)_GYq8{%hfzl z>*GYNwNDR6C(m3iR|{~$xSvWZ!E>x?i2?~f@vnPl630sB5)6V4)EOrxk5U;T>8zT? ziDW#<6SdkV#ALY^-5~g<5^V;eyH`gQZ$jCs)C5K$%@awKCY-TYK!hluD8fRRjInCd zS5~DlphPg3p}EPfJjN7Hhy*qC#7Scb}U?Y{aDzyTl-HhPcwsRm;x(1QAO!Ms;#x$pC@SXNCRD;$RX)1t$|&upF`k z=@VZjt`iUi?2;P$Ort9m2>k_g7TGIG7i2kncci#Q zeFL2is|){Pxq&1Vw>X?_UETCg6}f3a{rq$$WhREb-jv8$KGvGD%Ob6Gs-EN+_fn*G z9z8u;qSprKnVETpuoIRf?9!!e|EbOx_txkWLu*2(;e@j^Y*`sX1uxVvLURl-#Z*31 z6EniGKJFiaWgfdf9Qo%>*XLS3vv zNMSB|F%!dJE$wX!3zl=35H6$#SPKvR#gnQ>L`Y7?xrP)oEKFAMoW=>R%o|T4RA;B- zbkm&fW`#z@)G53P-coUYv!ELUXEt3T)4~iX>0FV5I$`uy%Ry9_AuSN;)JXUgY1!)~ zsWanM%Pgm5Q#GGl7Y>rdr@0zCy>tjOI22!2;p*19@QPik^=(V-zZG@u9S;@O^AIMq z%J19VzL7twuFX(hh)+f(p}C#~xxm1ZxPn5xSL21tybtFlt=>5lK0yWKTy6i)yK3?( zW+~+S*@wg4*@rSW{UzfUPO{^A&&|r!G8Qq5@*?geaO|PwSmKkqmnrqrl@`4iOGFa! zH)H8IiT4jjC!tXhLNh_b`S!=>!}ix2n;*P?g|ipxKX38tp!J1q3ZC4`eRI5782z&~e1;=~X+2;udIH$`0oTM=I?3NVj# z42`{4MaKy%gnk$mne}z5{nAolK%RZDHVFq&>p8xDRZYyFvRL6rMv5>8xnywo>flWM znqexf9;BA$k*6_4UZ*!m*YD`22Ih=o9l$Jz#6k^>C&32Y!3$s<5+pINelk}$>f12{ zq`8o#ie*r@yH#(FJ*^L`>gqFvXOD4BNXb|t%5o(^o&}-V@*34jH#NqX!W<b zg?F!xU-u3%m2XGzqW4l$;Ky-JWe$Uv-TrQGyRZK{u>T$Ojb7rW5aN+yhe$Ch7<-%m zjKo6QmQ1+QLd{YpXKFTi@?CTthGxYiM%brTXBzu-U{tr5um~k^u_GWdHc*|h*4_T0 zua#EIWHC4jW=e!0bo$J012jaZksO-+!TYL$WOdO(QYfi(?n(LcVGx|3pGS-emf%3G z3&3&40L8J3hyo`moQSXs3Snj)2~0KDtOwow?zJc}5K{O{VJTFLf?hzQ86y$?t3pcm z(yZ$^2<#7|@sBaYjAEPGLHA5a`viP_15&J39wJiRN3U~6lQu70;SJGth&jwwYaF%R zf+vfOu-j^{rp7uZ!Ywax7e>tN01U3n8>}i2`e?YJYq6AQ9Lkn7UYZbIxJ(z$8X8uq z|9WbsqQ)4BS@7!k^_!1}uMXb5*Qu}e{dky8{p;xL;7}FOUTuBgpcB}fUZ!$w>F)%A zb9b0?&mg2&Wx7?o_06ZYRzDYz3~gf3df03Q00=Yq_EX1M*gHe`)bU8y8Jcg}c-s}T zo=r8aBRe|7&bOTg+|QkFJ6p>u>G{!K(_qy3w)1%lI>vsl+DBPd3vEgfMN=Gy<3(rK zQFEm8`Sa&*gBmd+K`eKE^T`{J-*T4)s`JtBWawMT@Hh?hG%)KiX9-?)+WvP!oaMBs zouU1m&|^74#^z*o>opU<6XI+{o8svB?}R9IS!>+xx71&LCxoemqWrE2Vgoe17>nNr zIUo#mLWUG`x9>V-mu^q;Q``Rc=ci9d@j8?y^MOQMI5K6=h3QMr5UF^(^_3TGV zmTj+d_o-($BkFTyPwnlmTDI4=mcQTJY%5&30?;`AJg0>A#3e-SjP%Ce6;`jya|gyr zRa_Yus(u?A0iv<`LU>_=n42)6Ah?Fgb5>X^%coWZ;0TL|lS~sGmKHp+P5A|a>u~L} zL6Hp$UI*8uEq2*+4L@4zF2{4sjV0W~PfQs1xAoe zN!+k}IyZ43o9hmVmxIWikES5H&dpY1U7xBFwt~2-T<_35fEZ+{kI>5q) z=o=T1YOQQZQMNq0VW9hg{cZ5C!Q@wTfU6KS?wT8QbIz>y?ArQ{Ob2R*o16UaxY8M8 z%Y4LIf|){&^$bvR=6RKn)MujE#Q129VNaVQ5qFl_r}#?lZo$<=JOj8sznTaGyX9PT zjmL`#>*SX`WDS+G)Xv9pZ6~ae?(Qu0dKvcm!y7k{MVN_o{EQdQ!=kFx;Oq0%E0{b_ z$CxY9l)$VbiA*4wr{KbWui~ZX0)n146)6W)C?YjJyrF@j(h)fpsOZSa*upqDD{A6;<=en>N$p6*8V1T6;Q>fw1?=Qd!VeOu14Q0 zfFj~3Guw*7?OIbKTEp+peMC*Xx@H=UvWX+Bw)IpofR66fU(bpYLT>kH3l@Y+*px(i z&A)gl!XgOX=&$G3LEtzIqiE(uCzP(X^Dq=x!UbOJ>n*}hTgA_x&jInN4ja(n48CLV z92hTR2N58%OkB>0g!6D>b{qOg(|Ol8*s52a+w(7C7z8JtgR_jfTDX5U%l9dhj81=>>~+%vSBj=;^(e4b(zzVek*CKNS98M%LR{=v-FzbwxAW|KG0u zpZ(?(y?vvp*a}t!+rM&;{ZQ9W_6zmnqgFpmDWh=gUrod<>AKY*styQs2kWh#Qi+VC zF{q{$(CRpVEg+nR{d*r1`=VucvTrG83)FT?KK50uVqdlMkxf3z8P@06J{s-DjQ)Wg zd6pTetpk4(p@S=g7Fh(xiXo6EN^}bkH&VV7E6g?{7j5&tma3r|lT8Fn@+4U}9Tvw* zOSI(+hrH5E^`=828Y3PT8gJp0Hzuw`F})%>zt@SBQb4ui*hz2yR&;<}7axAt$XVWra+T3NHDw1sJoj{5fCVHoaQ%i_qnRKpa z;6E#NaxJlAjBfx^N2}CL{+ zGnQavuFZE_mNM;aT)O9m(AmuYWS~#Y)!ChC!p10pkvY$xKl*qVE}jb3EzXa%IG5R$ z*C4$V_=YIgcPbUkSGaG!Xb(()ZoKq}!Hs%o{G8Q(8U0yXZ(r-rI^zD;eHs~8$a`*1 z#jHCaLnuT_6J`^4E~nU=>5iKP@h_my;oNLAn;X+=$J-obez!5r(`uhPlu=6=y9+hB zOQ=rCWUoZtvX@M5l@}8J+)AeeN5;_5?y^(63AJ@t+i3tXW0xPvWaw}x_u1Fit=sSG z>q+JBPEaFW#jH>v+nM?T$M)6^pI$<8wPgaESefujp6zPy?G92{Q=<-8uwKQv^(tC_ z#+T@RomHHnq|(tJ?aSMC(V0<;tEdb=+g=)zOoq8W(!uCMbkmuBs^jVI-txLxDY$V* ziz~LQ6G(OZ%XF*OuXZzijcW_Dc-4M>ciFuZbj=N5y#Q+{Qtw*LC|BuJAurt5yRxcV zhHB2>%Ngdz>U35f&$q@achFm(t&6LjYbX;Nj(bCu&^E`W9$`<8v`B1EI$uqM623(tI|x8*6!yZ6mH^qrM-=}}HS z;%6VeST}WmK%ckg$}jA1KmsQ*Sl?3ixJZu z{nMYCU2oY3ykbH&rmu?&IPll`yUQ-4LboapH7$Ed59^vyvpZ)<&MMtGzS7L4mGk@U zht>CyoPC+Dl+Kq*hfaSOfj>s4_n-&WI%WWu3f9efwnQ=b+PnD2=ka-b9-o^(|33f# N|NnDW2)_W*003{b+mHYN literal 0 HcmV?d00001 diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/.helmignore b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/Chart.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/Chart.yaml new file mode 100644 index 000000000..894f63d61 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/Chart.yaml @@ -0,0 +1,37 @@ +annotations: + artifacthub.io/changes: | + - Update Nutanix CSI Driver to 2.5.0 + artifacthub.io/containsSecurityUpdates: "true" + artifacthub.io/displayName: Nutanix CSI Storage + artifacthub.io/links: | + - name: Nutanix CSI Driver documentation + url: https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_5_0:CSI-Volume-Driver-v2_5_0 + artifacthub.io/maintainers: | + - name: Nutanix Cloud Native Team + email: cloudnative@nutanix.com + artifacthub.io/recommendations: | + - url: https://artifacthub.io/packages/helm/nutanix/nutanix-csi-snapshot + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Nutanix CSI Storage + catalog.cattle.io/release-name: nutanix-csi-storage +apiVersion: v1 +appVersion: 2.5.1 +description: Nutanix Container Storage Interface (CSI) Driver +home: https://github.com/nutanix/helm +icon: https://avatars2.githubusercontent.com/u/6165865?s=200&v=4 +keywords: +- Nutanix +- Storage +- Volumes +- Files +- StorageClass +- RedHat +- CentOS +- Ubuntu +- CSI +kubeVersion: '>= 1.17.0-0' +maintainers: +- email: cloudnative@nutanix.com + name: nutanix-cloud-native-bot +name: nutanix-csi-storage +version: 2.5.100 diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/README.md b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/README.md new file mode 100644 index 000000000..c9595fdee --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/README.md @@ -0,0 +1,182 @@ +# Nutanix CSI Storage Driver Helm chart + +## Introduction + +The Container Storage Interface (CSI) Volume Driver for Kubernetes leverages Nutanix Volumes and Nutanix Files to provide scalable and persistent storage for stateful applications. + +When Files is used for persistent storage, applications on multiple pods can access the same storage, and also have the benefit of multi-pod read and write access. + +## Important notice + +Starting with version 2.5 of this chart we separate the Snapshot components to a second independent Chart. +If you plan to update an existing Nutanix CSI Chart version < v2.5.x with this Chart, you need to check below recommendation. + +- Once you upgrade to version 2.5+, the snapshot-controler will be removed, but previously installed Snapshot CRD stay in place. You will then need to install the [nutanix-csi-snapshot](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-snapshot) Helm Chart following the [Important notice](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-snapshot#upgrading-from-nutanix-csi-storage-helm-chart-deployment) procedure. +- If you create Storageclass automatically with a previous Nutanix CSI Chart version < v2.5.x, take care to remove Storageclass before `Helm upgrade`. + +If you previously installed Nutanix CSI Storage Driver with yaml file please follow the [Upgrading from yaml based deployment](#upgrading-from-yaml-based-deployment) section below. + +If this is your first deployment and your Kubernetes Distribution does not bundle the snapshot components, you need to install first the [Nutanix CSI Snapshot Controller Helm chart](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-snapshot). + +Please note that starting with v2.2.0, Nutanix CSI driver has changed format of driver name from com.nutanix.csi to csi.nutanix.com. All deployment yamls uses this new driver name format. However, if you initially installed CSI driver in version < v2.2.0 then you should need to continue to use old driver name com.nutanix.csi by setting `legacy` parameter to `true`. If not existing PVC/PV will not work with the new driver name. + +## Nutanix CSI driver documentation +https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_5:CSI-Volume-Driver-v2_5 + +## Features list + +- Nutanix CSI Driver v2.5.0 +- Nutanix Volumes support +- Nutanix Files support +- Volume clone +- Volume snapshot and Restore +- IP Address Whitelisting +- LVM Volume supporting multi vdisks volume group +- NFS Dynamic share provisioning +- PV resize support for Volumes and Dynamic Files mode +- iSCSI Auto CHAP Authentication +- OS independence +- Volume metrics and CSI operations metrics support + +## Prerequisites + +- Kubernetes 1.17 or later +- Kubernetes worker nodes must have the iSCSI package installed (Nutanix Volumes mode) and/or NFS tools (Nutanix Files mode) +- This chart have been validated on RHEL/CentOS 7/8 and Ubuntu 18.04/20.04/21.04/21.10, but the new architecture enables easy portability to other distributions. +- This Chart is not made to be installed on the local k3s cluster (by default iscsi prerequisite is missing) + +## Installing the Chart + +To install the chart with the name `nutanix-csi`: + +```console +helm repo add nutanix https://nutanix.github.io/helm/ + +helm install nutanix-csi nutanix/nutanix-csi-storage -n +``` + +## Upgrade + +Upgrades can be done using the normal Helm upgrade mechanism + +``` +helm repo update +helm upgrade nutanix-csi nutanix/nutanix-csi-storage +``` + +### Upgrading from yaml based deployment +Starting with CSI driver v2.5.0, yaml based deployment is discontinued. So to upgrade from yaml based deployment, you need to patch your existing CSI deployment with helm annotations. Please follow the following procedure. + +```bash +HELM_CHART_NAME="nutanix-csi" +HELM_CHART_NAMESPACE="ntnx-system" +DRIVER_NAME="csi.nutanix.com" + +kubectl delete sts csi-provisioner-ntnx-plugin -n ${HELM_CHART_NAMESPACE} +kubectl patch ds csi-node-ntnx-plugin -n ${HELM_CHART_NAMESPACE} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' + +kubectl patch csidriver ${DRIVER_NAME} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' + +kubectl patch sa csi-provisioner -n ${HELM_CHART_NAMESPACE} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' +kubectl patch sa csi-node-ntnx-plugin -n ${HELM_CHART_NAMESPACE} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' + +kubectl patch clusterrole external-provisioner-runner -n ${HELM_CHART_NAMESPACE} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' +kubectl patch clusterrole csi-node-runner -n ${HELM_CHART_NAMESPACE} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' + +kubectl patch clusterrolebinding csi-provisioner-role -n ${HELM_CHART_NAMESPACE} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' +kubectl patch clusterrolebinding csi-node-role -n ${HELM_CHART_NAMESPACE} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' + +kubectl patch service csi-provisioner-ntnx-plugin -n ${HELM_CHART_NAMESPACE} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' + +kubectl patch service csi-metrics-service -n ${HELM_CHART_NAMESPACE} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' + +kubectl patch servicemonitor csi-driver -n ${HELM_CHART_NAMESPACE} -p '{"metadata": {"annotations":{"meta.helm.sh/release-name":"'"${HELM_CHART_NAME}"'","meta.helm.sh/release-namespace":"'"${HELM_CHART_NAMESPACE}"'"}, "labels":{"app.kubernetes.io/managed-by":"Helm"}}}' --type=merge +``` + +Now follow [Installing the Chart](#installing-the-chart) section to finish upgrading the CSI driver. + +## Uninstalling the Chart + +To uninstall/delete the `nutanix-csi` deployment: + +```console +helm delete nutanix-csi -n +``` + +## Configuration + +The following table lists the configurable parameters of the Nutanix-CSI chart and their default values. + +| Parameter | Description | Default | +|----------------------------------|----------------------------------------|--------------------------------| +| `legacy` | Use old reverse notation for CSI driver name | `false` | +| `volumeClass` | Activate Nutanix Volumes Storage Class | `false` | +| `volumeClassName` | Name of the Nutanix Volumes Storage Class | `nutanix-volume` | +| `fileClass` | Activate Nutanix Files Storage Class | `false` | +| `fileClassName` | Name of the Nutanix Files Storage Class | `nutanix-file` | +| `dynamicFileClass` | Activate Nutanix Dynamic Files Storage Class | `false` | +| `dynamicFileClassName` | Name of the Nutanix Dynamic Files Storage Class | `nutanix-dynamicfile` | +| `defaultStorageClass` | Choose your default Storage Class (none, volume, file, dynfile) | `none`| +| `prismEndPoint` | Cluster Virtual IP Address |`10.0.0.1`| +| `username` | Name used for the admin role (if created) |`admin`| +| `password` | Password for the admin role (if created) |`nutanix/4u`| +| `secretName` | Name of the secret to use for admin role| `ntnx-secret`| +| `createSecret` | Create secret for admin role (if false use existing)| `true`| +| `storageContainer` | Nutanix storage container name | `default`| +| `fsType` | Type of file system you are using (ext4, xfs) |`xfs`| +| `networkSegmentation` | Activate Volumes Network Segmentation support |`false`| +| `lvmVolume` | Activate LVM to use multiple vdisks by Volume |`false`| +| `lvmDisks` | Number of vdisks by volume if lvm enabled | `4`| +| `fileHost` | NFS server IP address | `10.0.0.3`| +| `filePath` | Path of the NFS share |`share`| +| `fileServerName` | Name of the Nutanix FIle Server | `file`| +| `kubeletDir` | allows overriding the host location of kubelet's internal state | `/var/lib/kubelet`| +| `nodeSelector` | Add nodeSelector to all pods | `{}` | +| `tolerations` | Add tolerations to all pods | `[]` | +| `imagePullPolicy` | Specify imagePullPolicy for all pods| `IfNotPresent`| +| `provisioner.nodeSelector` | Add nodeSelector to provisioner pod | `{}` | +| `provisioner.tolerations` | Add tolerations to provisioner pod | `[]` | +| `node.nodeSelector` | Add nodeSelector to node pods | `{}` | +| `node.tolerations` | Add tolerations to node pods | `[]` | +| `servicemonitor.enabled` | Create ServiceMonitor to scrape CSI metrics | `false` | +| `servicemonitor.labels` | Labels to add to the ServiceMonitor (for match the Prometheus serviceMonitorSelector logic) | `k8s-app: csi-driver`| + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a a file whit `-f value.yaml`. + +### Configuration examples: + +Install the driver in the `ntnx-system` namespace: + +```console +helm install nutanix-storage nutanix/nutanix-csi-storage -n ntnx-system --create-namespace +``` + +Install the driver in the `ntnx-system` namespace and create a volume storageclass: + +```console +helm install nutanix-storage nutanix/nutanix-csi-storage -n ntnx-system --create-namespace --set volumeClass=true --set prismEndPoint=X.X.X.X --set username=admin --set password=xxxxxxxxx --set storageContainer=container_name --set fsType=xfs +``` + +Install the driver in the `ntnx-system` namespace, create a volume and a dynamic file storageclass and set the volume storage class as default: + +```console +helm install nutanix-storage nutanix/nutanix-csi-storage -n ntnx-system --create-namespace --set volumeClass=true --set prismEndPoint=X.X.X.X --set username=admin --set password=xxxxxxxxx --set storageContainer=container_name --set fsType=xfs --set defaultStorageClass=volume --set dynamicFileClass=true --set fileServerName=name_of_the_file_server +``` + +All the options can also be specified in a value.yaml file: + +```console +helm install nutanix-storage nutanix/nutanix-csi-storage -n ntnx-system --create-namespace -f value.yaml +``` + +## Support + +The Nutanix CSI Volume Driver is fully supported by Nutanix. Please use the standard support procedure to file a ticket [here](https://www.nutanix.com/support-services/product-support). + +## Community + +Please file any issues, questions or feature requests you may have [here](https://github.com/nutanix/csi-plugin/issues) for the Nutanix CSI Driver or [here](https://github.com/nutanix/helm/issues) for the Helm chart. + +## Contributing + +We value all feedback and contributions. If you find any issues or want to contribute, please feel free to open an issue or file a PR. diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/app-readme.md b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/app-readme.md new file mode 100644 index 000000000..bffca7493 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/app-readme.md @@ -0,0 +1 @@ +A Helm chart for installing Nutanix CSI Volume/File Storage Driver diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/questions.yml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/questions.yml new file mode 100644 index 000000000..a4004fe14 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/questions.yml @@ -0,0 +1,123 @@ +questions: + - variable: volumeClass + label: "Volumes Storage Class" + type: boolean + default: true + description: "Activate Nutanix Volumes Storage Class" + group: "global Settings" + - variable: fileClass + label: "Files Storage Class" + type: boolean + default: false + description: "Activate Nutanix Files Storage Class" + group: "global Settings" + - variable: dynamicFileClass + label: "Dynamic Files Storage Class" + type: boolean + default: false + description: "Activate Nutanix Files Storage Class with dynamic share provisioning" + group: "global Settings" + - variable: legacy + label: "Driver Name Legacy mode" + type: boolean + default: false + description: "Set to True to continue to use old driver name in case of initial install with chart < 2.2.0" + group: "global Settings" + - variable: defaultStorageClass + label: "Default Storage Class" + type: enum + default: "none" + options: ["none", "volume", "file", "dynfile"] + description: "Select the default Storage Class you want" + group: "global Settings" + show_if: "volumeClass=true||dynamicFileClass=true||fileClass=true" + + - variable: prismEndPoint + label: "Prism Endpoint" + type: string + required: true + description: "Please specify the cluster virtual address" + group: "global Settings" + show_if: "volumeClass=true||dynamicFileClass=true" + - variable: username + label: "Username" + type: string + required: true + description: "Specify username with cluster admin permission" + group: "global Settings" + show_if: "volumeClass=true||dynamicFileClass=true" + - variable: password + label: "Password" + type: password + required: true + description: "Specify password of the user" + group: "global Settings" + show_if: "volumeClass=true||dynamicFileClass=true" + + - variable: servicemonitor.enabled + label: "Prometheus ServiceMonitor" + type: boolean + default: false + description: "Activate Prometheus ServiceMonitor to scrape CSI metrics" + group: "global Settings" + + - variable: storageContainer + label: "Storage Container" + type: string + required: true + description: "Specify Nutanix container name where the Persistent Volume will be stored" + group: "Nutanix Volumes Settings" + show_if: "volumeClass=true" + - variable: fsType + label: "Filesystem" + type: enum + options: ["xfs", "ext4"] + description: "Select the filesystem for the Persistent Volume" + group: "Nutanix Volumes Settings" + show_if: "volumeClass=true" + - variable: networkSegmentation + label: "Volumes Network Segmentation" + type: boolean + default: false + description: "Activate Volumes Network Segmentation support" + group: "Nutanix Volumes Settings" + show_if: "volumeClass=true" + - variable: lvmVolume + label: "LVM Volume" + type: boolean + default: false + description: "Activate LVM to support multi vdisks volume group for PV" + group: "Nutanix Volumes Settings" + show_if: "volumeClass=true" + - variable: lvmDisks + label: "LVM Disks" + type: int + required: true + default: "4" + min: 1 + max: 8 + description: "Number of vdisk for each PV" + group: "Nutanix Volumes Settings" + show_if: "lvmVolume=true&&volumeClass=true" + + - variable: fileHost + label: "File Server Address" + type: string + required: true + description: "Specify Nutanix Files address" + group: "Nutanix Files Settings" + show_if: "fileClass=true" + - variable: filePath + label: "Export share" + type: string + required: true + description: "Specify Nutanix Files share path" + group: "Nutanix Files Settings" + show_if: "fileClass=true" + - variable: fileServerName + label: "NFS File Server Name" + type: string + required: true + description: "Specify Nutanix Files server name" + group: "Nutanix Files Settings" + show_if: "dynamicFileClass=true" diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/NOTES.txt b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/NOTES.txt new file mode 100644 index 000000000..3921d167a --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/NOTES.txt @@ -0,0 +1,3 @@ +Driver name: {{ include "nutanix-csi-storage.drivername" . }} + +Nutanix CSI provider was deployed in namespace {{ .Release.Namespace }} diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/_helpers.tpl b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/_helpers.tpl new file mode 100644 index 000000000..5fe53b26a --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nutanix-csi-storage.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "nutanix-csi-storage.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nutanix-csi-storage.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create CSI driver name. +*/}} +{{- define "nutanix-csi-storage.drivername" -}} +{{- if .Values.legacy -}} +com.nutanix.csi +{{- else -}} +csi.nutanix.com +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/csi-driver.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/csi-driver.yaml new file mode 100644 index 000000000..839304d12 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/csi-driver.yaml @@ -0,0 +1,11 @@ +{{- if .Capabilities.APIVersions.Has "storage.k8s.io/v1/CSIDriver" }} +apiVersion: storage.k8s.io/v1 +{{- else }} +apiVersion: storage.k8s.io/v1beta1 +{{- end }} +kind: CSIDriver +metadata: + name: {{ include "nutanix-csi-storage.drivername" . }} +spec: + attachRequired: false + podInfoOnMount: true \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-node-ds.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-node-ds.yaml new file mode 100644 index 000000000..2f471838c --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-node-ds.yaml @@ -0,0 +1,146 @@ +# Copyright 2021 Nutanix Inc +# +# example usage: kubectl create -f + +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-node-ntnx-plugin + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: csi-node-ntnx-plugin + template: + metadata: + labels: + app: csi-node-ntnx-plugin + spec: + serviceAccount: csi-node-ntnx-plugin + hostNetwork: true + containers: + - name: driver-registrar + image: {{ .Values.sidecars.registrar.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: {{ .Values.kubeletDir }}/plugins/{{ include "nutanix-csi-storage.drivername" . }}/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: plugin-dir + mountPath: /csi/ + - name: registration-dir + mountPath: /registration + - name: csi-node-ntnx-plugin + securityContext: + privileged: true + allowPrivilegeEscalation: true + image: {{ .Values.node.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + args : + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(NODE_ID)" + - "--drivername={{ include "nutanix-csi-storage.drivername" . }}" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: {{ .Values.kubeletDir }} + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - mountPath: /dev + name: device-dir + - mountPath: /etc/iscsi + name: iscsi-dir + - mountPath: /host + name: root-dir + # This is needed because mount is run from host using chroot. + mountPropagation: "Bidirectional" + ports: + - containerPort: 9808 + name: http-endpoint + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: http-endpoint + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + failureThreshold: 3 + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + image: {{ .Values.sidecars.livenessprobe.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + args: + - --csi-address=/csi/csi.sock + - --http-endpoint=:9808 + {{- with (.Values.node.nodeSelector | default .Values.nodeSelector) }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with (.Values.node.tolerations | default .Values.tolerations) }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: registration-dir + hostPath: + path: {{ .Values.kubeletDir }}/plugins_registry/ + type: Directory + - name: plugin-dir + hostPath: + path: {{ .Values.kubeletDir }}/plugins/{{ include "nutanix-csi-storage.drivername" . }}/ + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: {{ .Values.kubeletDir }} + type: Directory + - name: device-dir + hostPath: + path: /dev + - name: iscsi-dir + hostPath: + path: /etc/iscsi + type: Directory + - name: root-dir + hostPath: + path: / + type: Directory diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-provisioner-sts.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-provisioner-sts.yaml new file mode 100644 index 000000000..ecb8369c5 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-provisioner-sts.yaml @@ -0,0 +1,150 @@ +# Copyright 2021 Nutanix Inc +# +# example usage: kubectl create -f + +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-provisioner-ntnx-plugin + namespace: {{ .Release.Namespace }} +spec: + serviceName: csi-provisioner-ntnx-plugin + replicas: 1 + selector: + matchLabels: + app: csi-provisioner-ntnx-plugin + template: + metadata: + labels: + app: csi-provisioner-ntnx-plugin + spec: + serviceAccount: csi-provisioner + hostNetwork: true + containers: + - name: csi-provisioner + image: {{ .Values.sidecars.provisioner.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + args: + - --csi-address=$(ADDRESS) + - --timeout=60s + - --worker-threads=16 + # This adds PV/PVC metadata to create volume requests + - --extra-create-metadata=true + - --default-fstype=ext4 + # This is used to collect CSI operation metrics + - --http-endpoint=:9809 + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-resizer + image: {{ .Values.sidecars.resizer.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + args: + - --v=5 + - --csi-address=$(ADDRESS) + - --timeout=60s + - --leader-election=false + # NTNX CSI dirver supports online volume expansion. + - --handle-volume-inuse-error=false + - --http-endpoint=:9810 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-snapshotter + {{- if .Capabilities.APIVersions.Has "snapshot.storage.k8s.io/v1" }} + image: {{ .Values.sidecars.snapshotter.image }} + {{- else }} + image: {{ .Values.sidecars.snapshotter.imageBeta }} + {{- end }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + args: + - --csi-address=$(ADDRESS) + - --leader-election=false + - --logtostderr=true + - --timeout=300s + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: ntnx-csi-plugin + image: {{ .Values.provisioner.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + securityContext: + allowPrivilegeEscalation: true + privileged: true + args: + - --endpoint=$(CSI_ENDPOINT) + - --nodeid=$(NODE_ID) + - --drivername={{ include "nutanix-csi-storage.drivername" . }} + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + # This is needed for static NFS volume feature. + - mountPath: /host + name: root-dir + ports: + - containerPort: 9807 + name: http-endpoint + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: http-endpoint + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + failureThreshold: 3 + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: {{ .Values.sidecars.livenessprobe.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + args: + - --csi-address=/csi/csi.sock + - --http-endpoint=:9807 + {{- with (.Values.provisioner.nodeSelector | default .Values.nodeSelector) }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with (.Values.provisioner.tolerations | default .Values.tolerations) }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - emptyDir: {} + name: socket-dir + - hostPath: + path: / + type: Directory + name: root-dir diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-rbac.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-rbac.yaml new file mode 100644 index 000000000..316aa0935 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-rbac.yaml @@ -0,0 +1,130 @@ +# Copyright 2018 Nutanix Inc +# +# Configuration to deploy the Nutanix CSI driver +# +# example usage: kubectl create -f + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-provisioner + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-provisioner-runner + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: csi-provisioner + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +# needed for StatefulSet +kind: Service +apiVersion: v1 +metadata: + name: csi-provisioner-ntnx-plugin + namespace: {{ .Release.Namespace }} + labels: + app: csi-provisioner-ntnx-plugin +spec: + selector: + app: csi-provisioner-ntnx-plugin + ports: + - name: dummy + port: 12345 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-node-ntnx-plugin + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-node-runner + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "update"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-node-role + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: csi-node-ntnx-plugin + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-node-runner + apiGroup: rbac.authorization.k8s.io + diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-scc.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-scc.yaml new file mode 100644 index 000000000..89a543a54 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-csi-scc.yaml @@ -0,0 +1,30 @@ +{{- if eq .Values.os "openshift4"}} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: ntnx-csi-scc +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: true +allowHostPID: false +allowHostPorts: true +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: [] +defaultAddCapabilities: [] +fsGroup: + type: RunAsAny +groups: [] +priority: +readOnlyRootFilesystem: false +requiredDropCapabilities: [] +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +supplementalGroups: + type: RunAsAny +users: + - system:serviceaccount:{{ .Release.Namespace }}:csi-provisioner + - system:serviceaccount:{{ .Release.Namespace }}:csi-node-ntnx-plugin +{{- end}} \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-sc.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-sc.yaml new file mode 100644 index 000000000..b44667029 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-sc.yaml @@ -0,0 +1,82 @@ +{{- if eq .Values.volumeClass true }} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.volumeClassName }} +{{- if eq .Values.defaultStorageClass "volume" }} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{{- end }} +provisioner: {{ include "nutanix-csi-storage.drivername" . }} +parameters: + storageType: NutanixVolumes + csi.storage.k8s.io/provisioner-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }} + csi.storage.k8s.io/node-publish-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/node-publish-secret-namespace: {{ .Release.Namespace }} + csi.storage.k8s.io/controller-expand-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Release.Namespace }} + storageContainer: {{ .Values.storageContainer }} + csi.storage.k8s.io/fstype: {{ .Values.fsType }} + isSegmentedIscsiNetwork: {{ quote .Values.networkSegmentation }} +{{- if eq .Values.lvmVolume true }} + isLVMVolume: "true" + numLVMDisks: {{ quote .Values.lvmDisks }} +{{- end }} +allowVolumeExpansion: true +reclaimPolicy: Delete +--- +{{- if .Capabilities.APIVersions.Has "snapshot.storage.k8s.io/v1" }} +apiVersion: snapshot.storage.k8s.io/v1 +{{- else }} +apiVersion: snapshot.storage.k8s.io/v1beta1 +{{- end }} +kind: VolumeSnapshotClass +metadata: + name: nutanix-snapshot-class +driver: {{ include "nutanix-csi-storage.drivername" . }} +parameters: + storageType: NutanixVolumes + csi.storage.k8s.io/snapshotter-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/snapshotter-secret-namespace: {{ .Release.Namespace }} +deletionPolicy: Delete +{{- end }} +--- +{{- if eq .Values.fileClass true }} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.fileClassName }} +{{- if eq .Values.defaultStorageClass "file" }} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{{- end }} +provisioner: {{ include "nutanix-csi-storage.drivername" . }} +parameters: + storageType: NutanixFiles + nfsServer: {{ .Values.fileHost }} + nfsPath: {{ .Values.filePath }} +{{- end }} +--- +{{- if eq .Values.dynamicFileClass true }} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: {{ .Values.dynamicFileClassName }} +{{- if eq .Values.defaultStorageClass "dynfile" }} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{{- end }} +provisioner: {{ include "nutanix-csi-storage.drivername" . }} +parameters: + storageType: NutanixFiles + dynamicProv: ENABLED + nfsServerName: {{ .Values.fileServerName }} + csi.storage.k8s.io/provisioner-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }} + csi.storage.k8s.io/node-publish-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/node-publish-secret-namespace: {{ .Release.Namespace }} + csi.storage.k8s.io/controller-expand-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Release.Namespace }} +allowVolumeExpansion: true +{{- end }} \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-secret.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-secret.yaml new file mode 100644 index 000000000..dbf22bdfc --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/ntnx-secret.yaml @@ -0,0 +1,11 @@ +{{- if eq .Values.createSecret true }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.secretName }} + namespace: {{ .Release.Namespace }} +data: + # base64 encoded prism-ip:prism-port:admin:password. + # E.g.: echo -n "10.83.0.91:9440:admin:mypassword" | base64 + key: {{ printf "%s:9440:%s:%s" .Values.prismEndPoint .Values.username .Values.password | b64enc}} +{{- end }} \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/service-prometheus-csi.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/service-prometheus-csi.yaml new file mode 100644 index 000000000..e55eabfa5 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/templates/service-prometheus-csi.yaml @@ -0,0 +1,46 @@ +# Copyright 2021 Nutanix Inc +# +# example usage: kubectl create -f +# + +apiVersion: v1 +kind: Service +metadata: + name: csi-metrics-service + namespace: {{ .Release.Namespace }} + labels: + app: csi-provisioner-ntnx-plugin +spec: + type: ClusterIP + selector: + app: csi-provisioner-ntnx-plugin + ports: + - name: provisioner + port: 9809 + targetPort: 9809 + protocol: TCP + - name: resizer + port: 9810 + targetPort: 9810 + protocol: TCP +{{- if eq .Values.servicemonitor.enabled true }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: +{{- with .Values.servicemonitor.labels }} + {{- toYaml . | nindent 4 }} +{{- end }} + name: csi-driver + namespace: {{ .Release.Namespace }} +spec: + endpoints: + - interval: 30s + port: provisioner + - interval: 30s + port: resizer + selector: + matchLabels: + app: csi-provisioner-ntnx-plugin +{{- end }} \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/values.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/values.yaml new file mode 100644 index 000000000..45a241201 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.5.100/values.yaml @@ -0,0 +1,119 @@ +# Default values for nutanix-csi-storage. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# parameters + +# Legacy mode +# +# if legacy set to true we keep the old reverse domain notation for CSI driver name (com.nutanix.csi). +# need to be set to true only if upgrade and initialy installed with helm package before 2.2.x +legacy: false + +# OS settings +# +# Starting v2.3.1 CSI driver is OS independent, this value is reserved +os: none + +# kubeletDir allows overriding the host location of kubelet's internal state. +kubeletDir: "/var/lib/kubelet" + +# Global Settings for all pods + +nodeSelector: {} +tolerations: [] +imagePullPolicy: IfNotPresent + +# Storage Class settings +# +# choose for which mode (Volume, File, Dynamic File) storageclass need to be created +volumeClass: false +volumeClassName: "nutanix-volume" + +fileClass: false +fileClassName: "nutanix-file" + +dynamicFileClass: false +dynamicFileClassName: "nutanix-dynamicfile" + + +# Default Storage Class settings +# +# Decide wich storageclass will be the default +# value are: node, volume, file, dynfile +defaultStorageClass: none + +# Nutanix Prism Elements settings +# +# Allow dynamic creation of Volumes and Fileshare +# needed if volumeClass or dynamicFileClass is set to true + +prismEndPoint: 10.0.0.1 + +username: admin +password: nutanix/4u + +secretName: ntnx-secret + +# Nutanix Prism Elements Existing Secret +# +# if set to false a new secret will not be created +createSecret: true + + +# Volumes Settings +# +storageContainer: default +fsType: xfs + +lvmVolume: false +lvmDisks: 4 + +networkSegmentation: false + +# Files Settings +# +fileHost: 10.0.0.3 +filePath: share + +# Dynamic Files Settings +# +fileServerName: file + + +# Volume metrics and CSI operations metrics configuration +# + +servicemonitor: + enabled: false + labels: + # This should match the serviceMonitorSelector logic configured + # on the prometheus. + k8s-app: csi-driver + + +# Pod pecific Settings +# + +provisioner: + image: quay.io/karbon/ntnx-csi:v2.5.1 + nodeSelector: {} + tolerations: [] + +node: + image: quay.io/karbon/ntnx-csi:v2.5.1 + nodeSelector: {} + tolerations: [] + +sidecars: + registrar: + image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 + provisioner: + image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2 + snapshotter: + image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1 + imageBeta: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3 + resizer: + image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0 + livenessprobe: + image: k8s.gcr.io/sig-storage/livenessprobe:v2.3.0 diff --git a/index.yaml b/index.yaml index 117403bf8..81fa8b62b 100755 --- a/index.yaml +++ b/index.yaml @@ -2506,6 +2506,47 @@ entries: - assets/nutanix-csi-snapshot/nutanix-csi-snapshot-1.0.0.tgz version: 1.0.0 nutanix-csi-storage: + - annotations: + artifacthub.io/changes: | + - Update Nutanix CSI Driver to 2.5.0 + artifacthub.io/containsSecurityUpdates: "true" + artifacthub.io/displayName: Nutanix CSI Storage + artifacthub.io/links: | + - name: Nutanix CSI Driver documentation + url: https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_5_0:CSI-Volume-Driver-v2_5_0 + artifacthub.io/maintainers: | + - name: Nutanix Cloud Native Team + email: cloudnative@nutanix.com + artifacthub.io/recommendations: | + - url: https://artifacthub.io/packages/helm/nutanix/nutanix-csi-snapshot + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Nutanix CSI Storage + catalog.cattle.io/release-name: nutanix-csi-storage + apiVersion: v1 + appVersion: 2.5.1 + created: "2022-02-17T11:01:02.445518+01:00" + description: Nutanix Container Storage Interface (CSI) Driver + digest: 9780b825e3298991fb93e6fa7764be7cd8fad51470b8684cd1cac13a5a26e187 + home: https://github.com/nutanix/helm + icon: https://avatars2.githubusercontent.com/u/6165865?s=200&v=4 + keywords: + - Nutanix + - Storage + - Volumes + - Files + - StorageClass + - RedHat + - CentOS + - Ubuntu + - CSI + kubeVersion: '>= 1.17.0-0' + maintainers: + - email: cloudnative@nutanix.com + name: nutanix-cloud-native-bot + name: nutanix-csi-storage + urls: + - assets/nutanix-csi-storage/nutanix-csi-storage-2.5.100.tgz + version: 2.5.100 - annotations: artifacthub.io/changes: | - Update Nutanix CSI Driver to 2.5.0 From 932a292a18989cd66f99d6fdf5821c40cc07869e Mon Sep 17 00:00:00 2001 From: "akankshakumari393@gmail.com" Date: Thu, 17 Feb 2022 18:49:34 +0530 Subject: [PATCH 03/13] Changes in Package to add K10 Chart 4.5.9 --- .../generated-changes/overlay/app-readme.md | 5 + .../generated-changes/overlay/questions.yaml | 295 ++++++++++++++++++ .../generated-changes/patch/Chart.yaml.patch | 18 ++ packages/k10/package.yaml | 2 + 4 files changed, 320 insertions(+) create mode 100644 packages/k10/generated-changes/overlay/app-readme.md create mode 100644 packages/k10/generated-changes/overlay/questions.yaml create mode 100644 packages/k10/generated-changes/patch/Chart.yaml.patch create mode 100644 packages/k10/package.yaml diff --git a/packages/k10/generated-changes/overlay/app-readme.md b/packages/k10/generated-changes/overlay/app-readme.md new file mode 100644 index 000000000..1b221891b --- /dev/null +++ b/packages/k10/generated-changes/overlay/app-readme.md @@ -0,0 +1,5 @@ +The K10 data management platform, purpose-built for Kubernetes, provides enterprise operations teams an easy-to-use, scalable, and secure system for backup/restore, disaster recovery, and mobility of Kubernetes applications. + +K10’s application-centric approach and deep integrations with relational and NoSQL databases, Kubernetes distributions, and all clouds provide teams the freedom of infrastructure choice without sacrificing operational simplicity. Policy-driven and extensible, K10 provides a native Kubernetes API and includes features such as full-spectrum consistency, database integrations, automatic application discovery, multi-cloud mobility, and a powerful web-based user interface. + +For more information, refer to the docs [https://docs.kasten.io/](https://docs.kasten.io/) diff --git a/packages/k10/generated-changes/overlay/questions.yaml b/packages/k10/generated-changes/overlay/questions.yaml new file mode 100644 index 000000000..713fcb116 --- /dev/null +++ b/packages/k10/generated-changes/overlay/questions.yaml @@ -0,0 +1,295 @@ +questions: +# ======================== +# SECRETS And Configuration +# ======================== + +### AWS Configuration + +- variable: secrets.awsAccessKeyId + description: "AWS access key ID (required for AWS deployment)" + type: password + label: AWS Access Key ID + required: false + group: "AWS Configuration" + +- variable: secrets.awsSecretAccessKey + description: "AWS access key secret (required for AWS deployment)" + type: password + label: AWS Secret Access Key + required: false + group: "AWS Configuration" + +- variable: secrets.awsIamRole + description: "ARN of the AWS IAM role assumed by K10 to perform any AWS operation." + type: string + label: ARN of the AWS IAM role + required: false + group: "AWS Configuration" + +- variable: awsConfig.assumeRoleDuration + description: "Duration of a session token generated by AWS for an IAM role" + type: string + label: Role Duration + required: false + default: "" + group: "AWS Configuration" + +- variable: awsConfig.efsBackupVaultName + description: "Specifies the AWS EFS backup vault name" + type: string + label: EFS Backup Vault Name + required: false + default: "k10vault" + group: "AWS Configuration" + +### Google Cloud Configuration + +- variable: secrets.googleApiKey + description: "Required If cluster is deployed on Google Cloud" + type: multiline + label: Non-default base64 encoded GCP Service Account key file + required: false + group: "GoogleApi Configuration" + +### Azure Configuration + +- variable: secrets.azureTenantId + description: "Azure tenant ID (required for Azure deployment)" + type: string + label: Tenant ID + required: false + group: "Azure Configuration" + +- variable: secrets.azureClientId + description: "Azure Service App ID" + type: password + label: Service App ID + required: false + group: "Azure Configuration" + +- variable: secrets.azureClientSecret + description: "Azure Service App secret" + type: password + label: Service App secret + required: false + group: "Azure Configuration" + +- variable: secrets.azureResourceGroup + description: "Resource Group name that was created for the Kubernetes cluster" + type: string + label: Resource Group + required: false + group: "Azure Configuration" + +- variable: secrets.azureSubscriptionID + description: "Subscription ID in your Azure tenant" + type: string + label: Subscription ID + required: false + group: "Azure Configuration" + +- variable: secrets.azureResourceMgrEndpoint + description: "Resource management endpoint for the Azure Stack instance" + type: string + label: Resource management endpoint + required: false + group: "Azure Configuration" + +- variable: secrets.azureADEndpoint + description: "Azure Active Directory login endpoint" + type: string + label: Active Directory login endpoint + required: false + group: "Azure Configuration" + +- variable: secrets.azureADResourceID + description: "Azure Active Directory resource ID to obtain AD tokens" + type: string + label: Active Directory resource ID + required: false + group: "Azure Configuration" + +# ======================== +# Authentication +# ======================== + +- variable: auth.basicAuth.enabled + description: "Configures basic authentication for the K10 dashboard" + type: boolean + label: Enable Basic Authentication + required: false + group: "Authentication" + show_subquestion_if: true + subquestions: + - variable: auth.basicAuth.htpasswd + description: "A username and password pair separated by a colon character" + type: password + label: Authentication Details (htpasswd) + - variable: auth.basicAuth.secretName + description: "Name of an existing Secret that contains a file generated with htpasswd" + type: string + label: Secret Name + +- variable: auth.tokenAuth.enabled + description: "Configures token based authentication for the K10 dashboard" + type: boolean + label: Enable Token Based Authentication + required: false + group: "Authentication" + +- variable: auth.oidcAuth.enabled + description: "Configures Open ID Connect based authentication for the K10 dashboard" + type: boolean + label: Enable OpenID Connect Based Authentication + required: false + group: "Authentication" + show_subquestion_if: true + subquestions: + - variable: auth.oidcAuth.providerURL + description: "URL for the OIDC Provider" + type: string + label: OIDC Provider URL + - variable: auth.oidcAuth.redirectURL + description: "URL for the K10 gateway Provider" + type: string + label: OIDC Redirect URL + - variable: auth.oidcAuth.scopes + description: "Space separated OIDC scopes required for userinfo. Example: `profile email`" + type: string + label: OIDC scopes + - variable: auth.oidcAuth.prompt + description: "The type of prompt to be used during authentication (none, consent, login, or select_account)" + type: enum + options: + - none + - consent + - login + - select_account + default: none + label: The type of prompt to be used during authentication (none, consent, login, or select_account) + - variable: auth.oidcAuth.clientID + description: "Client ID given by the OIDC provider for K10" + type: password + label: OIDC Client ID + - variable: auth.oidcAuth.clientSecret + description: "Client secret given by the OIDC provider for K10" + type: password + label: OIDC Client Secret + - variable: auth.oidcAuth.usernameClaim + description: "The claim to be used as the username" + type: string + label: OIDC UserName Claim + - variable: auth.oidcAuth.usernamePrefix + description: "Prefix that has to be used with the username obtained from the username claim" + type: string + label: OIDC UserName Prefix + - variable: auth.oidcAuth.groupClaim + description: "Name of a custom OpenID Connect claim for specifying user groups" + type: string + label: OIDC group Claim + - variable: auth.oidcAuth.groupPrefix + description: "All groups will be prefixed with this value to prevent conflicts" + type: string + label: OIDC group Prefix + +# ======================== +# External Gateway +# ======================== + +- variable: externalGateway.create + description: "Configures an external gateway for K10 API services" + type: boolean + label: Create External Gateway + required: false + group: "External Gateway" + show_subquestion_if: true + subquestions: + - variable: externalGateway.annotations + description: "Standard annotations for the services" + type: multiline + default: "" + label: Annotation + - variable: externalGateway.fqdn.name + description: "Domain name for the K10 API services" + type: string + label: Domain Name + - variable: externalGateway.fqdn.type + description: "Supported gateway type: `route53-mapper` or `external-dns`" + type: string + label: Gateway Type route53-mapper or external-dns + - variable: externalGateway.awsSSLCertARN + description: "ARN for the AWS ACM SSL certificate used in the K10 API server" + type: multiline + label: ARN for the AWS ACM SSL certificate + +# ======================== +# Storage Management +# ======================== + +- variable: global.persistence.storageClass + label: StorageClass Name + description: "Specifies StorageClass Name to be used for PVCs" + type: string + required: false + default: "" + group: "Storage Management" + +- variable: prometheus.server.persistentVolume.storageClass + type: string + label: StorageClass Name for Prometheus PVC + description: "StorageClassName used to create Prometheus PVC. Setting this option overwrites global StorageClass value" + default: "" + required: false + group: "Storage Management" + +- variable: prometheus.server.persistentVolume.enabled + type: boolean + label: Enable PVC for Prometheus server + description: "If true, K10 Prometheus server will create a Persistent Volume Claim" + default: true + required: false + group: "Storage Management" + +- variable: global.persistence.enabled + type: boolean + label: Storage Enabled + description: "If true, K10 will use Persistent Volume Claim" + default: true + required: false + group: "Storage Management" + +# ======================== +# Service Account +# ======================== + +- variable: serviceAccount.name + description: "Name of a service account in the target namespace that has cluster-admin permissions. This is needed for the K10 to be able to protect cluster resources." + type: string + label: Service Account Name + required: false + group: "Service Account" + +# ======================== +# License +# ======================== + +- variable: license + description: "License string obtained from Kasten" + type: multiline + label: License String + group: "License" +- variable: eula.accept + description: "Whether to enable accept EULA before installation" + type: boolean + label: Enable accept EULA before installation + group: "License" + show_subquestion_if: true + subquestions: + - variable: eula.company + description: "Company name. Required field if EULA is accepted" + type: string + label: Company Name + - variable: eula.email + description: "Contact email. Required field if EULA is accepted" + type: string + label: Contact Email diff --git a/packages/k10/generated-changes/patch/Chart.yaml.patch b/packages/k10/generated-changes/patch/Chart.yaml.patch new file mode 100644 index 000000000..d6abc00bb --- /dev/null +++ b/packages/k10/generated-changes/patch/Chart.yaml.patch @@ -0,0 +1,18 @@ +--- charts-original/Chart.yaml ++++ charts/Chart.yaml +@@ -2,9 +2,14 @@ + appVersion: 4.5.9 + description: Kasten’s K10 Data Management Platform + home: https://kasten.io/ +-icon: https://docs.kasten.io/_static/kasten.png ++icon: https://docs.kasten.io/_static/kasten-logo-vertical.png ++kubeVersion: '>= 1.17.0-0' + maintainers: + - email: support@kasten.io + name: kastenIO + name: k10 + version: 4.5.9 ++annotations: ++ catalog.cattle.io/certified: partner ++ catalog.cattle.io/display-name: K10 ++ catalog.cattle.io/release-name: k10 diff --git a/packages/k10/package.yaml b/packages/k10/package.yaml new file mode 100644 index 000000000..43c2821b0 --- /dev/null +++ b/packages/k10/package.yaml @@ -0,0 +1,2 @@ +url: https://charts.kasten.io/k10-4.5.9.tgz +packageVersion: 00 From 4184f0d50fca7d95892f844177b897d941f74270 Mon Sep 17 00:00:00 2001 From: "akankshakumari393@gmail.com" Date: Thu, 17 Feb 2022 18:54:33 +0530 Subject: [PATCH 04/13] Result of running 'make charts' --- assets/k10/k10-4.5.900.tgz | Bin 0 -> 113824 bytes charts/k10/k10/4.5.900/Chart.yaml | 15 + charts/k10/k10/4.5.900/README.md | 226 ++ charts/k10/k10/4.5.900/app-readme.md | 5 + .../k10/4.5.900/charts/grafana/.helmignore | 23 + .../k10/k10/4.5.900/charts/grafana/Chart.yaml | 22 + .../k10/k10/4.5.900/charts/grafana/README.md | 528 ++++ .../charts/grafana/templates/NOTES.txt | 54 + .../charts/grafana/templates/_definitions.tpl | 3 + .../charts/grafana/templates/_helpers.tpl | 235 ++ .../4.5.900/charts/grafana/templates/_pod.tpl | 509 ++++ .../charts/grafana/templates/clusterrole.yaml | 27 + .../grafana/templates/clusterrolebinding.yaml | 26 + .../configmap-dashboard-provider.yaml | 31 + .../charts/grafana/templates/configmap.yaml | 99 + .../templates/dashboards-json-configmap.yaml | 37 + .../charts/grafana/templates/deployment.yaml | 52 + .../grafana/templates/headless-service.yaml | 20 + .../4.5.900/charts/grafana/templates/hpa.yaml | 22 + .../templates/image-renderer-deployment.yaml | 117 + .../image-renderer-network-policy.yaml | 78 + .../templates/image-renderer-service.yaml | 32 + .../charts/grafana/templates/ingress.yaml | 80 + .../grafana/templates/networkpolicy.yaml | 18 + .../templates/poddisruptionbudget.yaml | 24 + .../grafana/templates/podsecuritypolicy.yaml | 51 + .../4.5.900/charts/grafana/templates/pvc.yaml | 33 + .../charts/grafana/templates/role.yaml | 34 + .../charts/grafana/templates/rolebinding.yaml | 27 + .../charts/grafana/templates/secret-env.yaml | 16 + .../charts/grafana/templates/secret.yaml | 28 + .../charts/grafana/templates/service.yaml | 58 + .../grafana/templates/serviceaccount.yaml | 15 + .../grafana/templates/servicemonitor.yaml | 42 + .../charts/grafana/templates/statefulset.yaml | 55 + .../k10/4.5.900/charts/grafana/values.yaml | 2701 +++++++++++++++++ .../k10/4.5.900/charts/prometheus/Chart.yaml | 30 + .../k10/4.5.900/charts/prometheus/README.md | 224 ++ .../charts/prometheus/templates/NOTES.txt | 112 + .../prometheus/templates/_definitions.tpl | 3 + .../charts/prometheus/templates/_helpers.tpl | 400 +++ .../templates/alertmanager/clusterrole.yaml | 21 + .../alertmanager/clusterrolebinding.yaml | 20 + .../prometheus/templates/alertmanager/cm.yaml | 19 + .../templates/alertmanager/deploy.yaml | 161 + .../templates/alertmanager/headless-svc.yaml | 31 + .../templates/alertmanager/ingress.yaml | 57 + .../templates/alertmanager/netpol.yaml | 20 + .../templates/alertmanager/pdb.yaml | 14 + .../templates/alertmanager/psp.yaml | 46 + .../templates/alertmanager/pvc.yaml | 39 + .../templates/alertmanager/role.yaml | 24 + .../templates/alertmanager/rolebinding.yaml | 23 + .../templates/alertmanager/service.yaml | 53 + .../alertmanager/serviceaccount.yaml | 11 + .../templates/alertmanager/sts.yaml | 187 ++ .../templates/node-exporter/daemonset.yaml | 146 + .../templates/node-exporter/psp.yaml | 55 + .../templates/node-exporter/role.yaml | 17 + .../templates/node-exporter/rolebinding.yaml | 19 + .../node-exporter/serviceaccount.yaml | 11 + .../templates/node-exporter/svc.yaml | 47 + .../templates/pushgateway/clusterrole.yaml | 21 + .../pushgateway/clusterrolebinding.yaml | 16 + .../templates/pushgateway/deploy.yaml | 119 + .../templates/pushgateway/ingress.yaml | 54 + .../templates/pushgateway/netpol.yaml | 20 + .../prometheus/templates/pushgateway/pdb.yaml | 14 + .../prometheus/templates/pushgateway/psp.yaml | 42 + .../prometheus/templates/pushgateway/pvc.yaml | 37 + .../templates/pushgateway/service.yaml | 41 + .../templates/pushgateway/serviceaccount.yaml | 11 + .../templates/server/clusterrole.yaml | 48 + .../templates/server/clusterrolebinding.yaml | 16 + .../prometheus/templates/server/cm.yaml | 82 + .../prometheus/templates/server/deploy.yaml | 261 ++ .../templates/server/headless-svc.yaml | 37 + .../prometheus/templates/server/ingress.yaml | 59 + .../prometheus/templates/server/netpol.yaml | 18 + .../prometheus/templates/server/pdb.yaml | 14 + .../prometheus/templates/server/psp.yaml | 51 + .../prometheus/templates/server/pvc.yaml | 41 + .../templates/server/rolebinding.yaml | 20 + .../prometheus/templates/server/service.yaml | 60 + .../templates/server/serviceaccount.yaml | 13 + .../prometheus/templates/server/sts.yaml | 285 ++ .../prometheus/templates/server/vpa.yaml | 24 + .../k10/4.5.900/charts/prometheus/values.yaml | 1737 +++++++++++ charts/k10/k10/4.5.900/config.json | 0 charts/k10/k10/4.5.900/eula.txt | 458 +++ charts/k10/k10/4.5.900/files/favicon.png | Bin 0 -> 1802 bytes charts/k10/k10/4.5.900/files/kasten-logo.svg | 24 + charts/k10/k10/4.5.900/files/styles.css | 113 + charts/k10/k10/4.5.900/license | 1 + charts/k10/k10/4.5.900/questions.yaml | 295 ++ charts/k10/k10/4.5.900/templates/NOTES.txt | 47 + .../k10/4.5.900/templates/_definitions.tpl | 184 ++ charts/k10/k10/4.5.900/templates/_helpers.tpl | 645 ++++ .../k10/4.5.900/templates/_k10_container.tpl | 652 ++++ .../k10/4.5.900/templates/_k10_metering.tpl | 261 ++ .../4.5.900/templates/_k10_serviceimage.tpl | 51 + .../k10/4.5.900/templates/_k10_template.tpl | 190 ++ .../4.5.900/templates/api-tls-secrets.yaml | 13 + .../k10/k10/4.5.900/templates/apiservice.yaml | 25 + .../k10/k10/4.5.900/templates/daemonsets.yaml | 26 + .../k10/4.5.900/templates/deployments.yaml | 30 + .../templates/fluentbit-configmap.yaml | 34 + .../k10/4.5.900/templates/gateway-ext.yaml | 33 + charts/k10/k10/4.5.900/templates/gateway.yaml | 134 + .../k10/4.5.900/templates/grafana-scc.yaml | 44 + charts/k10/k10/4.5.900/templates/ingress.yaml | 46 + .../k10/k10/4.5.900/templates/k10-config.yaml | 228 ++ .../k10/k10/4.5.900/templates/k10-eula.yaml | 21 + .../4.5.900/templates/kopia-tls-certs.yaml | 33 + charts/k10/k10/4.5.900/templates/license.yaml | 25 + .../4.5.900/templates/mutatingwebhook.yaml | 51 + .../k10/4.5.900/templates/networkpolicy.yaml | 192 ++ .../templates/prometheus-configmap.yaml | 70 + .../4.5.900/templates/prometheus-service.yaml | 44 + charts/k10/k10/4.5.900/templates/rbac.yaml | 234 ++ charts/k10/k10/4.5.900/templates/route.yaml | 36 + charts/k10/k10/4.5.900/templates/scc.yaml | 43 + charts/k10/k10/4.5.900/templates/secrets.yaml | 241 ++ .../k10/4.5.900/templates/serviceaccount.yaml | 27 + .../k10/k10/4.5.900/templates/v0services.yaml | 162 + charts/k10/k10/4.5.900/triallicense | 1 + charts/k10/k10/4.5.900/values.schema.json | 1089 +++++++ charts/k10/k10/4.5.900/values.yaml | 455 +++ index.yaml | 20 + 129 files changed, 16552 insertions(+) create mode 100644 assets/k10/k10-4.5.900.tgz create mode 100644 charts/k10/k10/4.5.900/Chart.yaml create mode 100644 charts/k10/k10/4.5.900/README.md create mode 100644 charts/k10/k10/4.5.900/app-readme.md create mode 100644 charts/k10/k10/4.5.900/charts/grafana/.helmignore create mode 100644 charts/k10/k10/4.5.900/charts/grafana/Chart.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/README.md create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/NOTES.txt create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/_definitions.tpl create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/_helpers.tpl create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/_pod.tpl create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/clusterrole.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/clusterrolebinding.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/configmap-dashboard-provider.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/configmap.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/dashboards-json-configmap.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/deployment.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/headless-service.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/hpa.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-deployment.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-network-policy.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-service.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/ingress.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/networkpolicy.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/poddisruptionbudget.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/podsecuritypolicy.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/pvc.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/role.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/rolebinding.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/secret-env.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/secret.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/service.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/servicemonitor.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/templates/statefulset.yaml create mode 100644 charts/k10/k10/4.5.900/charts/grafana/values.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/Chart.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/README.md create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/NOTES.txt create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/_definitions.tpl create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/_helpers.tpl create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/clusterrole.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/cm.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/deploy.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/headless-svc.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/ingress.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/netpol.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/pdb.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/psp.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/pvc.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/role.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/rolebinding.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/service.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/sts.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/daemonset.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/psp.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/role.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/rolebinding.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/svc.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/clusterrole.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/deploy.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/ingress.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/netpol.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/pdb.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/psp.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/pvc.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/service.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/clusterrole.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/clusterrolebinding.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/cm.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/deploy.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/headless-svc.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/ingress.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/netpol.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/pdb.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/psp.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/pvc.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/rolebinding.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/service.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/sts.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/templates/server/vpa.yaml create mode 100644 charts/k10/k10/4.5.900/charts/prometheus/values.yaml create mode 100644 charts/k10/k10/4.5.900/config.json create mode 100644 charts/k10/k10/4.5.900/eula.txt create mode 100644 charts/k10/k10/4.5.900/files/favicon.png create mode 100644 charts/k10/k10/4.5.900/files/kasten-logo.svg create mode 100644 charts/k10/k10/4.5.900/files/styles.css create mode 100644 charts/k10/k10/4.5.900/license create mode 100644 charts/k10/k10/4.5.900/questions.yaml create mode 100644 charts/k10/k10/4.5.900/templates/NOTES.txt create mode 100644 charts/k10/k10/4.5.900/templates/_definitions.tpl create mode 100644 charts/k10/k10/4.5.900/templates/_helpers.tpl create mode 100644 charts/k10/k10/4.5.900/templates/_k10_container.tpl create mode 100644 charts/k10/k10/4.5.900/templates/_k10_metering.tpl create mode 100644 charts/k10/k10/4.5.900/templates/_k10_serviceimage.tpl create mode 100644 charts/k10/k10/4.5.900/templates/_k10_template.tpl create mode 100644 charts/k10/k10/4.5.900/templates/api-tls-secrets.yaml create mode 100644 charts/k10/k10/4.5.900/templates/apiservice.yaml create mode 100644 charts/k10/k10/4.5.900/templates/daemonsets.yaml create mode 100644 charts/k10/k10/4.5.900/templates/deployments.yaml create mode 100644 charts/k10/k10/4.5.900/templates/fluentbit-configmap.yaml create mode 100644 charts/k10/k10/4.5.900/templates/gateway-ext.yaml create mode 100644 charts/k10/k10/4.5.900/templates/gateway.yaml create mode 100644 charts/k10/k10/4.5.900/templates/grafana-scc.yaml create mode 100644 charts/k10/k10/4.5.900/templates/ingress.yaml create mode 100644 charts/k10/k10/4.5.900/templates/k10-config.yaml create mode 100644 charts/k10/k10/4.5.900/templates/k10-eula.yaml create mode 100644 charts/k10/k10/4.5.900/templates/kopia-tls-certs.yaml create mode 100644 charts/k10/k10/4.5.900/templates/license.yaml create mode 100644 charts/k10/k10/4.5.900/templates/mutatingwebhook.yaml create mode 100644 charts/k10/k10/4.5.900/templates/networkpolicy.yaml create mode 100644 charts/k10/k10/4.5.900/templates/prometheus-configmap.yaml create mode 100644 charts/k10/k10/4.5.900/templates/prometheus-service.yaml create mode 100644 charts/k10/k10/4.5.900/templates/rbac.yaml create mode 100644 charts/k10/k10/4.5.900/templates/route.yaml create mode 100644 charts/k10/k10/4.5.900/templates/scc.yaml create mode 100644 charts/k10/k10/4.5.900/templates/secrets.yaml create mode 100644 charts/k10/k10/4.5.900/templates/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.900/templates/v0services.yaml create mode 100644 charts/k10/k10/4.5.900/triallicense create mode 100644 charts/k10/k10/4.5.900/values.schema.json create mode 100644 charts/k10/k10/4.5.900/values.yaml diff --git a/assets/k10/k10-4.5.900.tgz b/assets/k10/k10-4.5.900.tgz new file mode 100644 index 0000000000000000000000000000000000000000..a8b6f25fd792883c0fe787427b46c22ad9bfd383 GIT binary patch literal 113824 zcmV)SK(fCdiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ{cH21eAl(11o&ulJ^L0EM$&xSW#OK#@R*_^|u_Y_IIrjYK zxFHgfV37oy04*!&oU^a7ueVRK2N&K+iLzxoJrm98iAe&5szRYqC=?3wYGt=RgAOY% zp=tc|B)>|fQh9rDApWgXD)E0SZ}$&=svhiB_74u;Ru10&RH;@Ef8F~DRGtQ%=+C7L zIzLq&-Bz5qzsL_-7GaQKV$q`l02*Y_Ak(r288c8BlU)rt3{No9kAMvwW+5lFn2xD! zz-7sTCOQIV)k)T!2ssoJ>j>QK6`*bVj|b($^8130sODgsiC1Tk zGGzVV|NVa`Zvr$RgP;v9I7KG180Z?1O^9O_W`x6^F=o@F-QBsU#oI4njf3$%=tQIC z;OhydEfxQ7x!od{i$ia6=2QJ0dR6td_Q`X}%wkS)> zzDg+;3k9l0NS{E1qC&w!Y(bp4VpN=j!Yj};ZMKvWz=?p^nL^t}dO7G3Kz(kp8Ds(_ zY*7XcLrfn4)50q-B?4U`1;jG2CGo`*ut30e$Q{-ZU|g0V`y;deDwK4RH~%Va7D|dOGC1R$@{4b+3W}ucTJ1!&Z`wZ7oei} zj4UqPy5Hjfyb?XBqY1W<{svG+95_XF15(-%eeqx6SN{mr;e;{*ilt&&wPNuNSj@0C z13IR1MCnTlnpl&}T1QS9fY(!d4-gB~F8iDcXz9?=0kHx|Cuhw!{4@Cf!G(>iK7-nPM|~Fm zgpm=V%U?oGLzK3Oj*dVNLH*Lf3|&~7WIDxP&=J_HoM72$h#?1CQ`v?7XZ2Ot9?_4g znOicZsUHk7ohBfvZ&L?Opas2eF}=x!Qt|un1ebl)$$3aQuOW@_Q&jRnO&nqZ+d+4j zxYQfy6#Un3!W5`-_2eK?|3qQ|s5@3lz~!2j5g(R=CdlEU?O|LFiF=uoBt{d0x~Hz> zz)||Y>;vj!4nz>ZD>>YxvBDH-4xtpd_SAtoqHiMjbw_m{5T-)s>BvBQ^5FWwK}ww} zX_v#@B7S|HTXvWNgHYxJTFkJ4RD)bV1=~C(Mf&VeNWrUDph39eSwLS61K*wn%H9$m zY|0!2O`s8F#Q}ffdG!hmE*cj{;D6oWJ2dA*V>X>)M&EGIIQl(icqxH6NC#Z!nxYY#H{wmbYQ#X@#Fo;d0Knlna>Z04DbfD6L{a&dBq z%hdepPoM!5TY)GP3a?(tIbwzDWOUEBejJSq;-9uAT<-+K?!QT08K08%LE=srIrbdPmAfTN@PLc!Aug9t6571Atyi6m=@4+>mV!(=cZG{WQK zJ6l3D&r}1iTC%>FEgfPQ$N_9=hsPXvrP=^we2X+DK8Rsj$ge^8>%r1iOB>`G!usMy zqRkXu;FXv-l?^rl&Ea`Xl|Xu-U|vxkfOH-09hL0lObhX=LeJHc&Xe=g@R_d zV!|i@pc8KPDC=CkFB?RIMiIQy<0fFFK%o$tCJKNP=P!P_{4+R@tc8R*BT9spU1|lB zSSqhxEWZK`p>H^W*upHZYYNJc(q??-r@$eE0nOCK65GP8J|hdO0U6AY0&j7hlpJUv zhj}wmdS$Lf`+RO~Bj#W&OVSd{)#R|Yh>n`~HgQ;HW423YN|rK8P|blhs@L>8O!?&Q zzemoU2lf>hwi?1_ojSy|u`Wy}Ww3j_E6p@vk7q0xg=WNTS?7kzJ@Zx(j%(HEkn*Yf z?cw46LHKotSUo~m2#ES{!V!s9m?U31h#^aA%KeJ2%o+zWG+p|w6N5|0(AJ?hQwwtl z-uGRHE0b!a^0)Bop#|?CHaKySk4y~GUKpI5?-pPP4Io%cO@Nx zcgHh}6T+WnI;G2hTFIn*rGcv_*bG{-$Zw-70W6N3Xzk;NLFzVa(cqD9Gjh8znT zUE;7K@UBv+2-T7DGFTndY%IT2D|?6hlft^jR}-qvyI+6h?Qla>Fa|DYP51-O(6Z&3 z({Mqht?GXYKD40OBZg;1PYIbCsAl7!j83#Ua_ZsrNBEVTlO(h+u7d{1f))#!=I`nT zM(OV&CbW3pLzK9VhSbmuKJ?wOzo*e^q*ZB8ou;MR1X~e^wMP0|uUY`i#sh=a9Ts^G zIDyAzpG?>}Tp}mJqSyznnxu14e7&PKx7lJysJH25yO7caaU{bEVV@13zZ|~E6|M6L z`k!)!E_pNb94-G<5K4f$CUQ8}c`@4|+@v`AFZIU_*8d?TpogY%Yeq>#XssL-4*#1S zT1SId!egZw#MS*ZAy6g+Fo}-5ntYh+=bmj5t}Io692hE9`C+apk^%tAFJx|@G3QR&8pVLyi=tG z8(kuE^lRM~@MpxyAG&oEmlgnC_2mX8(#0H4<%;=mzhpw&M$Q|5GAQYm^dmFRUKgnM zI)0N}$ZPdB==aY(ADjl6@;_lK1ugK}AW#Qt{p;)~_1{0QBZt*`9jQ`;uZin>u4QY6 zK^EtjuR;F#ufi(;N}wn{REy!KJ&xWOQmoZf?bH=94GmPTcxKGz+DsSLgAUL=recuD za-xb?BvwJLH9X(`0vm?!dgJ^Ns0gnaUxAT1vR2?lu)Z2?wW4u^{ld{0_RfK~6L-;S z)cq35P?XE^cyX^o)Ku+>p5%;1Y@`;0eH&^BP-H_#cyvVzvH)<fr`T z9Vy-0nw-TOvJo|>H^3C%A&ajA6A%UdGN{3;?alWA1o!DgsQ7{GRE6zEr)5b2>4xa09g;t9g4T5L>w4%1G!_s%bm5rh%L;m?N6>2Mx zsTjnlZ{P1LUXk|~7aY0y$GLu1t^7iR(xR?USE8=k#k%kcL?^buEm^Ncyx0^)x9Ovw zpA!8IWc?=G)`$5uWmA&c?3TDWCZx7tL1beD$w^Fc3lNWXs`Ccyj9pJRK3 z96VXZ3J7!u=l$@zF}CzZ#|O?IyO*)YethuBKItRqXftoQ%=JMC5C9Wwh^-|5h3hYS z{8w17F%~W(x;^%e*NMHd#on!hbZ;pY`7S#DA=tGc(`Lw_o=wDmCGkbT_g;A>lz5kc z<6mi5%&QgQUf|4>v0os^`TGEkikfrAolc7R#z}C7gg_3fntJzZR?s^pq1#I!JXKDwak9DNM zbmxwU9O8CcUsy*(^|;9;|4;LGs2{kLV8xH7fK{7Pv3E8B2)O^{qd&RZV*j^O`M}>A!viMs1Ozkf2Z45 zm>s$14!Sb3VV+$q)d3xHKUby8F62tC#+`U#i$ALcF3urWq#~k+C^5vgHD4bV1V?1I zr*fR~R^uP*#1#Fd6flcjZOa~)NgpzOqL^Ifg&QkamUxl9d zlF(mw3;elx64d32S~_<`4KPP?3y$;5ocpe5VMS5bK~y+4>g}@_Ue#Ut?5-$E=#*PfAY+0gv2 z8BqQ2(s!ErEdqT^h=HJ0OqBZeCJB_QpY*MZ<`lh$iO2|Phmz7 zbFf{w=`45@NCv0d1sDXs9 z(nQNy0_*+0rG81iO&vHv{P}aSz(3HjMGmB+Bw#2zIDccAVw$J5aHP!h&=;J`c%Jqs z{_;#Md0Wu=TU#lhe(9Wo>X*+&aRQfhlzYGQ(YyZZD$c2}{vV7>{j!2_w`9D*u>HX` ziGw<`@q=;szvP>pGma^3c@-5ox3p#0I48o}F)gD0&}W)|-RH7oHrDEoYD_pGpQ$Zz z%Cd`wH8OaFOV+jvHMUOV?u_B&g(Sl?<>!7;4o zX)oBMcupM|94mb$4TkQYcgV@(YR}WAI&J$*{ilxdKG)2n&I!MO=ckbV6n3y>YN1Cv zyMvL7_wu-=`%MB(3;sjQ;1^ z{EnsO_z@NH#IYw{=8_+bequ%^Gras@`IfED=h9KgHQ?5RL`2G(CysAXtBGvf<6H?> zUg7w9B<)^aoac*=Su;@9@+JOTWr}=n(KwMLKYw8Z^bss@#xNOTmq6irbzD4x@Nkxz z|IDtBXW;qI*w0mMjP*So+W8Kqz_Ce>EE`zV3y~d{=$r1wY?iXOZd_; zS^g=;%>%PCJ;L$d3U);CP+9vnM`dk~Y>bLH$N9oMnLIrfD@!~q*wD|N_*~%RdRi)JY!nn_`Suzt_s?T z?-q(4Ot{>zDO>j5F5eC=C~kqc*i-!kY#?)WXkk0+pi4ES7qEArZJ zecL#aBY*4h!?9HRTZjK7anIqz|J#KezAWJBAg@eY@Gwq?Z!>rT(B7p8PX=1b;S+$5 zT+#3Z@S`_9JRt$03nQMOmEhiqCjlN@U$JHI-YpnU0e0)!C3z>ulfw6M%x<@l*bO01 z0{lra_wFls+7PnPWf2dH#){uAkqsqbwnQSr)P)oeLn4&HcH4%Hm(=!&BjQps=5E~p zf0BsoL<-;Ma*Ty?`fj`|eEd2pg%i=l!~d~th-6lNE-u)smQIN_*KX==b2*)n6#8MR zZdDHHhe7nSKCe|b84Jp%QM_w2No-kwLaC&`ldzy%**pI2xsW%;XWOyaxnp@YP%5jo z^K!R)jlX5ki4^-! z>`8s10g-hNz!NV(JiFo&v)LK3@rr{*iFTq38FAb8wz={o8MXh~2cI7?r)n!BZ64&c z*#4<}bvgUf&IqZhd)vaBZDHqYAa6sV2IcKAFTGnTubO$nkggoTWt#2&wPMQTZ9PA_ z?tAUJ$C}mRFGYS)-g_RQGP`3}b>hkyX7#KR>x!ax;reP&X?)J2GIlH3HptENyFUoQ z*!{=bKyZb{vSD|u$tAPWrxyn{`9n`XOR!#@t>!*~wJQj97>pDyN==!v0;IR_! zUDW;jQtsdO{TJz^4axH^EB}j7m%RY+u3mmR84TU!{B(`T%c-9ZzPzmc>EL^pYCm-g zYa)&w-Ad#J+^26wUbXmi?F84wKXDsRO^AO%`x6eqZO&&Ks^hG@&ObD9xbAi}@uoAT zcaQyFyQ6ZZ2?l=+bsfIu{^?rryZ%XA@pd}uwI}R;m!f_|0-m(B=U5{AJI>h8Ku_#$ z{^!@Hh7QxW_y01g)IpR%hjmw^xPE*i&!gd1ZdW}Tx~JJXq~Fg;#DjR{wbP$tTkaA+ zZF{2o2j}UY;q5dr#+Kel_p&7#cu3c9_}$B=hMyxt{-ays3c8P=qpiM?EB`*xCVNC8 z5x|;$N??#@Yi7;9P;r8dJSUL1-mbeY*8j;`#(Ish7oZrK5F0PkG-ve7v}`+Qf*c3w z-r0^PLL>8Q#`fYWqvnSac&>K25@aEZpl7BGk==syBJ#b3YrSQM{_>-e}J?{s{Ev>Blc zeESxzTJ9l%Zkg-qLu?t4?-+ca?XQBVJX}s=k-QiHyLvZ@()#g-C@+U0}oD66j}P z1&|$q-+8Y{c_I#Y4J|zcU*zT~`{^PwzMP@uj{hkZx>$}xtATIdsNWq&ySvrBU(1zp zrCdGYwC%>(!h8JH?(a(8vswtmJ~}MoRv!8w8Y2WUV`w>~(7Apw*Rn z=BNPt5dfx$0eYtaCA%eb&+?6c5&;XWqu&cPOBX{7b6pi0WW(NhK}zk>?ry)?8@1}q z8xNVARyR|M0zDAY2(pWL_rCTy?A^v&7MIa-&90xz8-l+rbtCKfmrA?|^T=?EGH%9WE%q&{Xu7L~X@@l6(@#7JvTgQ~j+7 zN~#QR2>e&~;=EPA0{q)QelHYyXo4Kh)88}f0%3=;T_rHPa6wCa>W0`t4lO}zxtJjb zWxX%Ym}$I{_+o2X3UxA{Zc1oQOJP~0zZdv)ZxROqY;jQ&ugisC?7Xyr_Qb!a{ePpQ z3AV6cO_|xocG_5N|5p$85B6gA|6yhS*B|!(bNs%3-TgbLxr~S-d0c0|0XhMlivci+ zjweg-_gyiSlz@;8B>W|1sOv&XpwagdQGn97Zxe@DjE@`dTr1!e`HNBt5C>5fsV)q0 z09`f%V}pzhJVDx0GmuKd3^d4~L8d@+mNp|GoT&kzJ7h5my!;j5mW(Md$aKo*A8+~s zl<_IyZtRFN$Z}4BHeQJ)jiBh~rGz#X%Zdq}uB@(X?kJ(0#0dHuqQuEC5T$ACi(o1n5PoSyEQaDFbL6<~glSW9r(7s9IVcr%t;n_JMbJrG(lXx5zN~yhAD5*Xxgxlh!6NncCL-{As z7c21zrC;r~R0pc%Ftlw7?!?-3HAQ@6__gKZstUG4CfHEY;ytXaz|9QoRZtJB^$qVO zYz0{PZ&p>)F=c)Y{;-m9l1p)}t*SDH+T68kvY@CW-)vg7ff&R_3WQxMazldTXUgbg~TsewI7)BmMuo>rue3JtWbRVxO-)venOh5!eK1MPe z=OcfUhmEckspnd`<*+V&Vo6C>&awvrO3HN<-UgDDf}#47AH`HqDdM&{k{Yx$WQ38E zZ*m~Tm2#kSY&#DHU4ktZL0Nbu^H)tFT0+Q0tYl9wfs6W(8>KWrVa(FtiB~@nIPvF8|V~4 z-66z0h9VUwuU#!y^T9foS;9`zvw8_oIk8HEq*8Z17L1>t>j8l3;UruSsNYRw+R6>2 zMkz<07^vaO^?<1RwqoYNK&|CvXAiK6h|5I%gIpt3RadMF*MFN&&J=(KG7v)#ZvT{! zdARc4CT-VeNSpVNO&$!P&mf}@uH8KDcL`-FJY3BK%^%cz@EqX z(eraDiBe(#S)z9uUkpbc0PwOP9jPc0&32E#o~fQ>f}0~yeOqbcLey8`15ACJ0r`C; z0s5T)?XsBsmYJ!TVa1_NS1<1RK1d>TYMh&^Z_i2bcWxzU2<+=2C_<$l$SxBV<3OhtR0BP?sfWAxZ zE9q?Usz#qO0PV*C9SVkdJ~kg#>^VO)(O--ZgKom3y*4g*{#vii_;X%Zf;G2f?6V^L zcNQTDLE0Dc8%VFL9)I3Q29yKXs~daFs|y5qo)6ip9Dm+Ou?Sb3S2Oak5tvn)+jI?v z)hcx)O>{Z$tXveBV@N5~iPJLS6b<0iGvW?b+i_km4Y(La>Q>6X?w8lhb@!1PcYEc% zgVj}>SuGRO{c^bnuI`@k)gOGQNYhZRaWO@!8aK*%nQqi1Ln90 z_yE+X1p=?8r2{enq7@OBmjTnq&>QmwUcKOx#T>2$b>I-fK)nVuYddm;sV?>2B`(fDxoK-8H1Z7DJ3ePEFx#%J0I#v)cZa_+bP|-nLT~bC= zadC-$a3@Tv`uF441!P`e?v9aTA;v$F-N^T&aEM^E=AYmXmw1uDtCZ#`GR%NqhPeJF zNx=*mW?6oeXwQ&?8Jwb{NC2V=OaP*QVV3D^SDQhH$x?Oxrx%7=3f`!N2lx{>$Tpyc zK=FSTLGea(6144dlm(pKDF9PSiYPuyiy@g^UOH&>(s-Fk78h?zGhE$|U@%Q$mBSZ3 zfpVPdDRB?US|y268fA{eBb|(7mp1Qhs6v#5Xc9TYi=HSa`&JyC4=W|3*uHanrEY6sd?oyPbFB(eRiBPJ- zzte-iojGE1ct*p%(_)i%l)2B9vohki;Fmmzf&S&DYDoXZfT{585HPO~Ui-r^=&cq@aKB_i z+eS_iybeUQxEdPAO&qoePVh!-g=`d3?1m^(Ue%J$$$PyqM&bHpXxK9tn}lbCGHV+M zuj*rBA~r)V1ypk&Utq`(UC+rS{0Q@E#T%?N+0iN*QNig8Frg_!?04PR#9`ry@vG(qq!PF#pJ7aeOiK9W)gQRMLFHc*LlnBe`- zPsDf*Iz>J#J$NCPUq^mkg;1!NR1XO}g1W%dWNC^dsUYPFf+mcZkm6LUi53&ykw08~ z2Mr|6nZ*n;Uc68Y6l7UiXug8g38G$YEKSV4PXnvf?u86_`)fR z?~rT~IcRq=LzbxSm%|ij&@hlLx>%m!<7SH}Ux*i#x?Km5mBD~78%zYfBZs`KK!x)n zya0ffUPxU8a2$0!5ri{Gi;@@9T3Mq3NQO%-TYKe zU*v^4D44%vwH~qH%OOUd7V>8?A&!P9K&}DH8Zm8XErl417a|cHfFX~U zTm%;!;i&S?9<`zEeMse>fF%p0o2>l1#7+7s6q-k%$PIyFK_Mozi;qe%f-{aIDJDMG zfML3e(I&mR+-~HQroopicnEY^GrsHy(H5esSoAO`7PAaZ*{P>uca#gV&KDvkXEK-N zTDJunLgy#@0NPl7O69&wkw2^G9E@-aVp10FSkGlvDuQCUn06RAu~o$D`lo5v*vcn} z>#lo%O*lp3G_^~S0~V04k{Btdld|4+85dpU&Lb1x+LSp6ngvmta=m2{mM(+d27iPF z;U%UslRk*90fTuf7|%z1Fu#s^=RBA_WD`Eq@5Iaj9o>tVfbvd63=*)5;Pn&*m?&yQ zru?1t%Z&&Nd54i;NvRP_9+IR5JOP-mJh3z73qD!Y+-?vZU+l#_=b{1&8rn&)Zc4NmltS` zSF3-;zz56%9%wZS#3upo^Fw9=g_JiT1bkXMFQildQd&EN_;%D>8oAx0KZuR(u9u&b9km1L3i->hK>C7PI zlSv6+OSlsXEb!4Rke@P&+#zS-V1|MOBeK5orpWYvNsfqmn4y#~LVgG_R#);_|!l_W0N zAydRs`(ktZ;K6go-{_G)>~lca2}))4UflY~h>0)nM3?Ou6VOovvy?Wyd1y6ePfbC( z8qRA~MU_D@9j>3lF04jQ$^6t-)Mc(*G&lB$|GHMq%C#4DuI^g3-q;-v@3f)-1gkNDTR zyarU?S@l^?TaTo5MC}>cjM_U{B{#jQd{bqG&HCU& zd4=p@B|evf8v-7I9+lp4&u^}xeJNrhqSGL*uJB$x&=i2O3=kLfV(8}$?=>N|c&TzC zgx1pEfS(2E8}H6aUNE|289SU?27dx-8<>fJ+ad@>U86q&&FpVz}`}q0J4|pl1k`vI-Q#(KqsE{mSH@-ap2#dW(VTMhxAv`zY zL9(ns969PJz)T*fSx_?+{U_f<QbJS3g*^8z11ug>7D69#{0w z9}VpbIcwEF+>TG)SFf8@Lq8cU;pL&Fa65ySpWe1kj`y`?mBIZ_taI7EZ#N9{x-n!| zm#xaxvQoJkG%M$qAMw@RC)OEE_uD7Ky{j+tO8ZL>*R2oB@m{Zbwg2gDt4Z(MHG6se zxl_?h<5oX0?#5R8ZR_N)IzCyP>E>vu?HPCDTQb#en=Z$weLg|ZZO&EDDd=g#rvNME)`$M@GKqm$9?sG;vwPqiIcx89m6@T=(ivC&-aW!`X|O>aPsNP<>yV>NZlimeXV%rOr2llPVD%gQ#!-?H7WEH3)F$De^tdxmBn&c@Aq`)Y6GYI{RD z?iOU)I`zg-Z_uyu_pO7gFVkiF^3&n@sxLA zdN4CDK9Onbgc)dfZI4ae(5w%9j8_!hZ`-%?N~clVyJ*bs+h3aZ7xUxK!{z(V)#s1S z_2=GVd~$qyJ+N(Uzf-;b++x?J;a=~JmfGn@du)9Q>j(p#eq!y*;eC5BGOlmiti5-& zY}YH5_TZy&KA3*FXtY?{Y*ss>j8?rE92VIh`H! z%u#i4eEn&-Ki?Z!AFojJ{$kLq_PBoO9NQoIrz7XeJnrcG&BNj6*-2+^pY+Yq=kY-Q z^2yZMU^ucbM<4h4*75b|SX;J@4uSLQ=CInkM7LLm+TOihsn%dr|(c3r9+s)CaclrM6yz>6Gb3CJ$ zqmcvmk54{T?z{SE=3X}1_vrY#b6xEmf3il+%jW&DzJIlUw%0TIrgk^3*ll?EzBxK= z-dzqmZo6Vw$b3KQ?R5t26YYKX6ZtKvFA_SyLHZ1`&>Y-_%W01Bzr4Qek*m+2o?}!$es2jcI~6{bv@c{jwnlVZ z%Qe34uZ&Ok!=};J_u7m8?eXnsI9q6!#`#sX^`%qk%m$x(e`vSbQlDSnq0iU*o!i;x z%jWUb*y>$fH#@UWUyPG}bM#58&gQ+_>w3>JjN#|&!*uT+wv!^dRX4~nV_MGdzYIC@MYdKUS7@F zYvjyX1Z2gBy{p!enD;A~%8_AY0Y z{(O3WUL865KH1}z#d$?PZGSedJHugRXc}(k(x~^%o_WzQ8k*Up}&-U&Q`lqw2 zPt|F)*X&$g-x`DdK)-_4^?}}eZ|a}P{neM*;%tAu-){DhrC-8+#e4y?=c z_tIgWYNdN{$||QwXoc&2YDglyL#k2#Uo!iXh(wKq)p`JJqIc9`s`9*Axlwg*Y@*+U z&u9GXBc58^{~E32p(}XGD;2}e;@2}Mb|zA(@-W*0q1*#8fP7`HLW=o6Yv9C4NG>@; zC`AH5XO*l2s`#e!#ibDB`qIgATpk2;TDIfHd@ZY zz}w<=&%-u+@@G@fD-SSj3{jETe3gjM%3L9a+`4fb_a^hN~*1q96Lcnuai-JcR82 zy&&%@P@#k(hl23wddVgHb2%$c+;JwT@^$|h&z;7n-H^$WQNnlA2L^^J;C)?rPRP4z zA$(W}M7{ay+|nEHGesJ)bjZh^1RZP4pC>(J`(A|*vPPt&YJ<+P9d1AXk2V*Y-IZsE zPo%!>O0v)Aw0xUVsbG9hOh2bEQ`HXkNx5Y_$5uUb}@=%Geq71 zQyy87JyeWUE=HjL8SU*X0gdf#?u1Q^@M=Uwh}g)Xd}^{Z6wMu%)R{gBJ|kjOg>?a@ zA=dJ;Ge{F4VxfqUd#Vz-6t& z64of}_^3z+nsaoBds$>6q%QRG(Z!e%k1~qh7#t5LBQ8Uc99Z6g+OJ=M+SzW(4OLml z&kaQ!ASxGQ^v5t-jZLh=!6dFhAXW=Fxgf#XG`{p;CO<)sRJj_f{FBcDysGMN?^8VeAqirfD6iY zy)s_MC~Xxwe9g7tiL3|$kaR{|LkA)`S#VFTkVthFpG?3ioH*DO_#`y=1_H9G17D;k z5(mArJ0v_vR9_6DhvDTJc?F3A)1_?VZb%@M-WxJ&(!)E#8>u4Rx9b{)j2xih8F%;Z zV5EwvNORjY44`7P#B>Za;ZOh#qRo-xrQyMWD_FW$L-S=?aHKA3%Ls;*AH`HB``~|g z@FFHbOcY+H1eoI}#XGS=3W7Y;WCx7dQO&AJL*=VXr$6coAG^#SrRs|QAl0x+9|kas z$%t@dr=u()9}$P*I7nm0Qbx)Bk8qc#vL|~iZO*6=tXRQEvMMy5o{d{8r}TtgDoISR zgid^VEvOVHtdJHvJDJIx-%{TF>67~>a4r0Y?@7gzl+xe672@xXqKn70DoV9QvS$m; zt;9`k9m$ALPkog@ki~1=3Mk??{UCa6rgBR7>B(G63o>qu_Q>b32ivW2Ii(;`79{xXN!I9peedP#+D9>cmJ%E-v8@={`Y^EV}BIa14?eW zN9Yl`(QVrywu43HAsc%bOo-#H_EPfFcwd4Fu^jkZv%Zd^SL?P^Sz11Y6cjDQ7Q~rj zYg(Sa6ZzBbs>L1fnj-U#4?NT60#N+NZ=hPPzAxX$PZmbFT;ACUEoK5sJ-w9>vi=Hg zHL~cQAxk{(-c`ql!D@8_m5 zO2!6?G!_cCPwnmU&MMZ1aeV0u9@hL~_kYZ*l^_CRTepC$yZ@uQSBc;MUp+kdasS72 z{9?MhXRk)BtuHyw}Gvy!-pNA3hgYKC;7qji{hE>k$X!Ge#>&JMjrE(28 zbW~X3i_UUGe@#ny7{NSG>cbd6gNDMH4Znu6I`)<8HR+7r0uQtIEYjI1gl=`CL1V>; zPAY~mFNRrDRBSDJ=oFL0{}~axQV436ttRZjkfcYhno*y##*b$~WsI=311y}8vmD_< zu)!FX4PGqrB6XljDGph!vAmZNhiGIkC?XC4cf(v&uhePoKY`lSd>3}2`i6Ix7+70I zZ#wYT*775WIDY&V6B$)Q;Gr`OWzsBxVyPsv+?Kd_{1$4=8(;yjnvE6Xicts}a#%^n z&TqTSw0C)%a*@mjp;4Br(B06rLUgwnkJ(71dJEw!+Tg+gZCsMsLph?ssvw+3t}M`t z$zWzHN?>>YUbyt~D9kVg=-xK4hQ$in%j)K3=~M}RbG~xN z)>K?%#|y|rn&P?_ZnpBp5*3O2RX$53t^#07xhM~;G0*<=+;Oj?Ct#Bra7PyTIr?rV zTLKh!z0UtRHbRQ=f-^$dZkIC<-^41%FY;4rlpfXM89l4w-sF~6(q&JZ-%>`OXIiT=uNzM94|0I-D_hDHM! zaOs!buT*@$Kg!2G2SFW&xc}tzr6Zn3B(m&a==Ub`Olnm2=Hr`8$Vv>MwZP2f-xNTgFH3fwS} z4&(;wnYh)@Tg{ijO%vHha^Ti))_cvtOW~))=WBY>8&T9d0G2>$zX)*R5Z7KM2)$0O*}Q=(Q=;SeKXwyc$( zaO$Zw5;Up}&j&ZPMx&Qz)rM9>GG7(cb+Mzg0@mQ-tl8Nfe-bE!Qf9?M5PhbJg7S){ zoD7TPCR@j{e)qK5Yd(R}RA{V{2UR4{n%Vem-{9T=Ki}le#;;rJ_b)GcThv(_;Ag9> zaOtaMAJt~B-@3>t&HlwBtC3XB?g^e|V5njUVwy@_qLh?-57+wnus>+_Zt{2emJn8$ zWiPM-$ASctF|N3d?GI{$=JD`6b%`B5EV=45L~(Vewl5mZ^PB$QqSw4>w0a(*#1Wp> z09mnNQjABtWj4GMS4X~QfSLC0yjr1*IH;K69eVn1GKr21frE~-IxE2g?;s>+L*xUx zN1DrsQ`|}Aaicb<-Lx-8&E8F`T{}q;$fNL0m+E$@nr(ZLCefV_2B+Z9Sjc-Uw|a3m znReYzb6eciA%iBlL(WkVWQbp_(-H%yd%-QoTI1^Gvep{h3|j5x#W1Iq-%?+N4yyJL z)R&hKvw?K!{hm5$+T5*zi4gc%I{@|j0b6)$g%jp5eZa~bCQ-cB}<28&``hVwwm7s0d-xpx$r2 zxZu<&&MPk6-o>cZX!c$lXD=uED#V3HxZf9&adFy(p%qgv@i`SwC&o&w9=7#fu^<4s7+1O>$t|AJhi@7sa;EAfq`jZ65c#7yZHa zFvvJ6gtFLJOS%fg!KW;}=U+br7gQcBETajP%0}Ee5=mkiWlHn7?`cN4`||AG-z2IJ zrvJ{ogbpe*NWCG}##RR5qxNNuFM%%Fo&emmThHvM7w8Tm*sgFBtqOFGZ8UaUH;s#S z?V0KEW2sjVaSPEjbO<_?gpal6NwarTJK+ns(~Ew}(2Q*BDVT7IEY|4s(|0-jG2*gW zS?ui9_+Kj#5j&ALwc+42A0P8xk}=1Uxt3`X)VsXHNRTQS&vTzLaud7Vu(xVU&` z@2G#%U?ibszD-sQYRF*{x(V&I`>gd^z1bVw)Gs>6t&?`Go1xnxvJ`8hY$X#Br|`2{ zx#=~}o3(y(WgC&aHmiH)c-X0nFsYZ=LG)zlSe=<3MRydF%2;j7g;X)NZjr|PnHlRy zgHFaySD20M;h;8Xbxv+Bn;(R&*6h?ioHtXPgh#6H^8y;2_iuzeG+HU43QxQKYP<3{ z$0ZuixcztDuOo*wu#*@0?nQ5%7EI^(Lbljf31^7IR%fS8Y~Tf5o3*dEmE6WDe26Hi zn|8B((Yw0oH9rlT{pVgmXD41>G1F|CF?lTu>fPaYKz}Hg^CO5);yWQb4a&$ z(QXb-o5TJp>oIZ~E7l6uP+6YhDuZv|O6cBR-&772outKUO#R!9>F2eLe&8m@u$LO{ zva;bZq-fYX2Wgom)7m`g)sAbO+N$X|99bz5u-W8(JhR2X%O{^CL3ejdmd4EI^f^{vjqDUh`NB1>>VE}1o zC-n-$^*C?W3d?Ti!G?GW$Vk73Iz4hK&`LW%?Z->85@p1J)9{|Y$=i^7|Eoj1{DS#X z^rv-5DWp2R@9C%%cTJ1!hRbD~+FEx^QZ}f*m3+3ZW`t~=7fx$+yTaq4Xeu+eq^wSH zygP$CyNGGKlp5l6H{De=LS-EU^$W{b9*MM=>sE-_V>PSuM2+|&Ji@>g2l68Ii<@A* zJ;jkl7zg+Wy8P-Jk@cYSk{;Gd2un~C*Whl9ch1w0`P5b^88@Eg6v)6cD%mmzUrB?d zv1{oDD%;2mvzEGJSuw5P3gyItJLW?cQJPi>Os+(l3IW7NW?g2AqK51luTtD*Zi)~g z$pjUf9r@ql@%^H_Ie*i0`>Cceh@_H8>9}x4!4%aD88isj&1qAnKlHsT|Ebi}QQk*Q z-tgrp5r<*UE=$(^uDUMHij8~G<3ZGfNzo9`Xrlbv>a_3}$tnDa@d8p-0XyI|h)=dU zx_`tx>#&dt)kV6T;A;owk_Oh-pGgKLY zS+P>c%d1e2Bpn%HYcr)l*k&@wl+aSRU!&C*XuQj2$V8s(?`EV*ksw#15@POujNK9* zsp+yZ>&3_OCt?w>=J{6FTqvg!K}d z@5C&sqkA>MAtz*U$Fmid)a+lfA)Eb{W#B|ANH>G3knJ-WV@p?M#oyh_2owt~qD`mU zY-ufvBTc`>V8+)<*}eSmOp;m4o-8XSFbY!5pVB%@3}jnzo;-xOyt!K|RC%3KYmH_P z5OVFQ19cP?Qjri7vt?a&M|j)KajhC1Qsf+gO6aw?f4S@6J8YmSYEli#{E!N#hxB?; zp|%w#P3rhf%O>gp5lPen`m)YUgV68!4=(#HKhgR=m1~Gwtldq{cH$(klPuvKJ&VY;{^97;R8%b(*Q!QdR~u+khG>&EL@ywC$4EPu#_nx`^&p-yRkg2+ZIe0*nCO2~6dh z_tdI`Y~nC%O})$7EXokn3uNM5*ygEyzltxq@w#dERF%Ba%3G7XZ-GWH8&5iI6461y z9YIInu3DI5OFsf#e|}M?pACr3f;X4)>B*l?lkxZBwKR*;C$GDbCg1^Jhy*U&J`F76 z88XbW{2=dCQKs>MXK87~w26f*7KdEmLco!eNd?WxOv&hs#mk4NCoc;_Yu`x){VYnWpwE;I9FRw6Lfyv@F;W*>#tbsEwA>xXJGzUK@@ zqX{w_j4h{3#2l=}8n%dzZW#8>b+)w85eO0++K^H5dAC5qb0H)k@j4OX|sX z#|GBQ(6*^f7@uiFK4y!$CURPOx_k!TQ`ALx@N?k1{yfz{wn3IAve>pJCwZOF6U|Ew zvJI?3DhrgoM^EK)gJ!yv{U8Th56P?d`DbJxjS)wF78fy}r(h}XCNoW9l|yAB2)_*k zEE{Ddx~`SMAvvZ=Vy$n??iMnrInlQ%;9iGoG%@>t*DDiFKJ+-{V&@r=uk-iH-R^17!rL-t1s>uqiVeRo?f&$y-W_1__%NfJYur$lC% zxH)d6-R+>J8h3!5|b}xD&XF
  • Qt1z zT#o?Efq+T$>{HiLzPbIHw|Ng-j+UjA)QQ1Y&zM+UA$QpdQ6h6wzc@c{$}}-j#Pgz- z(O$k&u`wuJm(FtH+a2~#AB1VBjm=QB9b%HKYo5Hb^cR`;k)`7l0-B9Ne5Yjf)X^Wd zo4u`GzZ61zB`WggCG{5Wty21BOaj+H&2RN8rUzi_mn)^;9Qn|sOgW^=z#6hBl8*m= z#&tqry>;3AaMKdEe-5s0dd=frvwte@Im$E`lJShTLv&YTCDr<_`d404ls5SZjd^k2 zys7n`=02s(7!v9`49%PP#Z*3WD_8oW$<;@F!j{&P<{HR!A3Q>CJe1&qnszDH<>_dw zm6Vk0Y$TKO^Vc^HqEfN(=2N&bX2_&bcqLyECG88jRt{UvTkl&tZ9_SVq1Y4dtlThu zUO`YkY$K>+`e@k=T3KT1~Ag+yD30a_jZQ#tr`?=T+!^Y-BF zkNBVG`0?>jI7Nny9KZ;eBF0xvejtz#g2G^ifQ}f%1_i_dGqM0|hN$`spo72|VQUJe zDv(GoU<$AW#>gNGAVPi$UgVHE?f{tqDaFt#TZrwB9kQTk_pDke9aQ$;7KS1ch`M9S z1YQm@$sH256qq=~1kfBqN}*01vBWGS0(_#r>XHg&Dl8>c>tl;DXc(fNHiHfW6XE~|>0kyK2t_`XL2CjQ25X4mVOHYfCQZ6 zeQ06uDVL((`|VpHNRbij67K$u9&t1AZ?u>nQ4fbOrmv|x z2CofdMPBcK>JHe6Vp2*;&m)JmOwY6N0Le+}5vOqt`x1b{N++CRWCf5E=S4_dHzzOI zd{py=+9>%4D%K0=3itufir_UjnbMF}MnqKSqz=!-#4W3qy?j{FHB2eCh>03Hn=WEf z?0@g^nd1MeZ{NPHCgT71DnIQ1=lHFU|BuGZCjv4yj+xI+bPzkYq?{#w=n^lh5z~em z+c1GZN+`<%k_c=iwm`20tz#g~v=Pzc`bGl^{0{*?yyU7%Lasji2WQ;1Y;P;jU&bQZzlSD4pbsiBUH@?~< zi_=+I8}r@uk%bc{GBc;$X1V;+o7l-nnis!}VpFnLUHOonuPN9451XQqC%`I#~&Z9Xroyv1eZ9WJS%PATP6 z!?!oPkYvquB+1a8SYVIm>M6$VBZ;ov#0R03XjZ6C+LAA!%Tm!!UN*8B*1>Hf8Le-I zvyCh|!xQ#GNc+uig!Sr7s85NoXql$OpRLY~zGjF76=H-x)DfLLFSsSc1;hgdQ{BSZ zA%klJ2IJv^E8Wg(G)lf(b=KXfvs!Q@bT83rE`;)Ar++|h`V}&^*>%0qwE%5dgBhk> zIgdO{$~IgD;EAx$gUh{Au}Sc1{e536STr$QWU;ZFb!GLG-V~;hz|Fg!F%7i`TnU!* zLa?<;EavIuIM%u?s)CCpune*!&!x`Gpz3KZ2Z;qQuhEs!D<={m(UE}Y_<6w-LWMHYh2#x-i?%15a#Q67eH*9Cw}-#}82``l6LXKZ`zlWWs8}F2Lm=4V0<~^SEmd~azkrOnl|>`Y8h^s{ zC5e`$qXg+efVr3jg%iA#uzYXC<^TL+ESm_nLfJ(8dcKYjRgx;A6j{?ngLHlBJYy|p z8jH%woM~7EK8wBd^8B4pExzgt7?lWr;0h8N_6%0bT$6xr-^!7FE_5LN48ScHn>96Yvc%2Ic#i+Ek)FyV|#GkA0Y=%f}1howpZz|qlsVU z7uEkdL?*H5_DnzVTc`i4zb5tnuRr2HpXK*8GiQQ6gRC7KX6nM_G;Z$2;=Y9N^wGn^ zzWCb5nVELUhRZ40Ldchd<7vL9j@I(9ULA(DjgTU*rD!_CWFEL+Jsi|fD|i{q?j7MN z%h*oU?INwWJI3$$|CLOx-Jx;xu6$V9+pk1UTYEQkf+95#vWwtfq0i!OEB3@GqYnv0m8vVatttQU@R^I-w|DWY24#j%${}coQAT$dVM_bQ=1Hn{y z3J9AaCwN6&Z3ZAM6ekX`7_#)DDCJ*oT@0U7^>ayLYigicc;hj;K8Oa(_@Bhqgz^8_ z`}^fKa%4di|DO?jg%kL3`fQ7{B=uq0?xWe>_|X(?b1hR^l4`GS?}P+afn>Fch3Z0; zWKOpu?rYrFyH9chnE(o?Dt<_o)!lo{iRiINAOR#2iOfWPFd?qY_z-bsF>ljEzpbnj z?UmvYa!-n$>j9c3F2GsBm*Dl*u6#GLqalhA6YJ%tYIf?aT}KD)Ehv~6swV4Ld_qzp)c1hV52=oFlet`LIDCZuz=iluBX-}`noqEO2Hk*K2cRj{I zJLP8H+SPQVf0JvK3atDgg%~P5j5~h=?BqxAw=vEqPOPf&WNqdPEDqTuht_1tcj+<) z3jQQP?_Ar=8CD~=Vd1%lv3!5o!UyC;zzeU&V9mBglc< z^#7y7V*RHV$Irjje|nP7pQkiTl`DWA+WZEO>nG%QxLJN4_eVbsdi`AId^?$3T#dm% zO3owq>obr;$Uz7G=l*taKAMoD?Psgnf#+V7^h8qzJ93!qkQ8Sh;r|_-tpHoOO|aH#kZM zc}U(;Uo|vK7kyMAU5mjQ2ZZV;I4cr=^L1n$g~A)0e%;bdj5eta2 zG{~NDDv(@@oOgS zN1~hetG45Bo9LYwkI#B6NxP$=?xTrr%2L{KeLv`(XosBo?I{gu_83RVsI}b3WbM)pD)H%k|K<@f_mf=GdhMQPx{_!?-IF0RZ9L8xkr zTkqtDg9B5TB&O-&Do)re_yk$I5B6oVf9ap0)Eo1!RC1pybWgTwk2OBEXFR#1iO>8t zJ}kGLg=J}Jo+Is`E}!7 zKjjQphnu~i93EuD{m+$HO0hu)DQndJH9K)H-!8Wz?USd%FP%25F(Hh8Jfi!)Dczc;`*>%ZytK3)|{ zfo<;Iu=@Yh9}QlAyc&C1y;fuo;W&i)@-|Rn6@hPSv^gR6lQ+}yY)M1Bcn`lov$zP90 zB28!zrNWV!buK_F{|GATkKkocStGBlW?XZt)B7eR!U?)R;qPzl+=_+oPlE(AXIzBY zJkV9`V-uPbWnoxThLIkP=`;v~G+@I1MZ&qjcL3;}e>pfU4LG>y>F?wnaUO-M5$Eaa zAY@`CQnt*0{WS{`=AR}!c18wu>D}P$4F2){-4Kddw#m3Zxg3~(z8Rd>lnJiu7pDI6 zKdYDY^FOVXAJmd8>WWK&I<-oXE5GVStD0bEL6m(mnIcOKX5tORo>g?s;y7edr=lUW z5fzQB`f%+$g#rBK^SkJdCVux~0DtRb0q{ps$A>S!`F}mh2Q2bx z+q-cv5aU!bcXINhn?pxaMxYwl>2tp1J;1(S^ceO9wB`N3V3l!HWeqNGpnBk?HNc;r z(#5Unic+&Iz7^OrP)#6nd;V&^@zMMXjj+)?h(4M9a38o)y=zaYcf&xvuvE!S{Zxl` zobZ%;JUju{yK(U~oRnYw%|>o>O{i+P(Ivsjvt5LehQiH8lZNUJSa4%_XhS}6bqD7( zCtIjWZ-K~vpYeoD&c^Dd$`ZYig^{7RaW!yK)Rc#k)3j2fJz%&31ouiJB*{X3!%Fq` z#Cj!i0u7a#Yvs{#iM=>%mV{uC`<G_542#K=gREnkRo(t@$ zxiJ~eM22ezRt+vTK1w*FY(17XwaR}xW}&J^u20an*nvOt{{K1c8MptC&qH?0LWiS-e10asbPdV{-sFXuNLi963sqkjISjk5 zZSHWsTT8qw*Uf|`%PdTT9zwCDQRjW6q7fp^TE~+6EEFkEm>;OwHum?IQ7X?SL~W@8 zy6u4POQFl8XK}}~vbLzl7>`F@mLx1n#}SQtJW|UN<%z0GwdeE@3!YlNN;A*lf8fbY z$f>V;?J`2CPYM3b;lU|Q=~&|Cf+jQ!S$G-CK5N0hTiazLbPOnjO7aqutp z{?$$ct?j+bnD5njd*GlI-8YUpVnAvSTh`df48Ac{d{#^Lvk7>{&eluh6P`7UPtr;@&f`dsUAv ztQOTH_g{}MxJVyLdX?R)=fY;9+_ZX97u+8)>aRY~AhoAdf*F?^mU!5Em7Mqk<|dw; zje9IfA?fl-`}>!fG*7-?5IcPA7HtSc7NZntZHE_fNKJJNx}ESkS{+Rx};k5eShf+irCU8O;JOM`yqBvn;-TqdOyf zb;qXqH?Ym?j07h?9Q=SDVIiN-b9Z-TP6?u!P6jyR$&#igB*j2in0W*a!sdkoN0rl5 zeF9%s;BcJo-`HOj-R{?|UhZne!U04WuqYjzZa|rCQtmd+eS$Z2#b&O%svjiGOD{)* z>e=QEoY@MG$BvI{oA8)Lfp32kC)~;90DcKHz-kFx4sm2$fp3Di8%xyCaO@?FX|EHc z3z`OA50+r|33uW9cZUbxTN`x`+=++b%)*%EJ^b5m1bnb(r19@yS>D&dM*Fu1`x4~7 z?zpa|QU#`w0bEca?s(##zAE8F|Aa?MT20{Z)@ZI9AQNotAQMM2=-@n+ zGu*$W$$pYWItpaJ=-puN=e|cfUXm7L0KX2SM!SB272}sqA)8&;?FLr+(CyyHO^c;_ z{?jm0Sol}sp*o$a0kL3ExCzuY%~|=JUyjaB@JIbNB){#5+xg?NV!2j}UadcIGtN%C z7i)NL$uA&Bp6wZ6vy&kokI&ZhaoSXM9^VA9E)B2|Lf)_d2|^_%XX7>f_0M*fX%P9R z!!;f7$71t~4=3c+V0e0U(H)OJoR3bgF3!5W{@e4j)Beb`%mhouj3(aV#PL}XZ0&G) zmkzdbeLH4Bm{#3|#lG0i?JIvS6}YSHty>o2SNv|8?l3mCbNhm@yjyqE)P*4Dvl{^2 z((NB7Y{rs=`Il;Oy8)~0W+~y zH5)8dkHh34bX+t@ z=^@~8xme-+^EjYT0TkYopQirj(cwYS|MU6t<8S%Dp5*g4LPPM2Oc~NK;WMb5U?PgVc4j&ZI2cPZvwc&XlAJ1@W+OOv_^}JyTcz1n!rZ3ZB{Hv?RkxZdBpsl9HcnE(&VBb zEbfUM$gFfrg;%r;oX7Ontg7Cj%TOM|UW+lvn2|bJU}>_8(D2t%78-;r7W?25b3z(% z!rDP3QW}O0TYIRg2%F=y19-HB$_)wV?uU~bw)zX8L|4AGQ1Xtg9E|+YT=Ms~hkI*;^m`TtV68Y@5Ir_ukOJwGnye|Y}#*^6)d z|0zDDnBHHQm;2`jKJ#c|9(LQH#r#nU<7QoPuTZg3-X^N7d-I;U1dc*x!g^!bbT|@P*kW><%xDm13DX%12lI$0tVW0&p+B23FBKRtqYVoEt2{bWjDHsP_Lm zPZ<*2lp4xQG*L=A`MJ>>^}Cb4M8xIAsr;8N1xDjQrNJUUH~wyaUBKZ#&6A|gyGnj; zSe2b6Y+e6RF#J3J8Sx_;|176Psk#~r_Qoo$aDlSMCfjgTx|ZD_maQLd#_`SY_*8!D zPS5>r_qi2dZJgL&(EO)AOm#}r+eA>0bB~_7q zKEW|7lcm)NQTc@Q-8r*TaktCx@l!F%PGjkt-OX}k(w9wo7q(v*FBB51OnT?I#(wcG zhI~E`qB(9~Ti59MOY9>w$)bD>D3JZZ+xs_ihSMQI6lW>?yz|~;-PilHe*T!mfJC6F z?ax63YicI!e#R9Eke^YgwYIAozlhr(4h{|;AGI_L+cXaT9GHc(L+A5^N!0o@4uqxC z+#A97!EObQ%O}!}gA2D#(GE3f9(}8dQT92&6Ov5Nv=(4=t$D+`kXJW|F z+J12p#P;69zPprQ0sxR6&^C;;=d(|xcMDB>YV%y;YuZDpX>DVt)~KC4zi#ooRQ>zO z>_4vkFBcNpCiwQt&463pe~(|5{C{4&_?G|wX+Dqw+DmB6bX8b*XLXN8!bIwx3K54v z3TLcA8m{-v7ID9wAIjpmKT1heo`Cy~-cW-zO6SPUwS$5PJaW>Mz2{Mo@+64n7YUD9 zl4AC-@>23s(LpKMc@T%CBvb0y6%VW~@+WPiZ2zfZ5vs}&Sjt^_*`~nC`7KM5z-O9A z>j_(3R(WhSI;a|(0PZ?it8Qs{LJpViFR4zLcX*(66?RL*4Bp7f{ZudYZgpZy2t@*O zrPn=hdRtH^eDIxKHIL;mH0%|x1JY?WC1uK+W2e@(+9Hoxx{N&@pC6pZ7Z; zb2Ul1r|m-J3wa1~%#LAE4GW%C&#}Gs9V$nQ16OP~SXQlJKo=t&(r|4KjV9Rx;%_$p z)XSv4AK*@H;w1Tkfi&2!a=|(ZzwswpVKsANpVEXPRAeu@OvY|)3w1#^Xz4a*KoDL3 zHW1v3Aa0Xi`1DG})aGs+ZI5Eh@AE)=57Qbf(lpL(6KN=}ib(_r@#xho2-yR(cxWh7 zMiZ7?K}qrl!GoR#T;B9b_&FcaKetK3q->??%0*ZHqa3mR5H#@}R5>ibe|zq)e3f-CJ3>_FDS)k9cE+GQzWG+s^ZR9P=l%YnrHcfTEemeT8|)xPS! zuPIFvawTb2SjFt&9b;$tKAcj)FjbIUkeZkX^YZrQTT*zdfV%ne4hbPu#(y0A$3eqs zP%KA`pY^6aQfb~R=@@I}e{I@*1Sf!P{+|cO#rWSB&z^tt|9q0qZ@=yTo#-OjB;~m? zp&|e%PYsoXZV#Y>ta0=tf8T$QtQ>QZ&Vo<2Yrt-Wf(!&Mu672J@GNCe^=EsXvhw8h zDX_}s0GJ0>(cdbMuMri@JR(!@yUjkuI~j~N*N(r}n9y0M;Ix#R42Pn0el5L+obXaB~s203D}2-fy~_bGf; zF8GuWW;e<6p_CSYCqW-a>kS+xfRnb5)=tyQSGRtq&H#@A-Cm@za^GvKPu7>dQtIc` zQk;;(=LZJ|HOsoH%#hmKRm!Yrpe)$|Hv=h^Y`!8iHuDL&@G4qxkxh-B(V zIZt&?RVia%{SH-D(Y+W5$yiQ)EZ9|Cd#=?Q9)t5Sb1xDJX&V@)bpcWwo7wCzA=SD7 zBTE@saPLwvUlSArof>~{V1dJ_^@TG_D6F646^nq%3sAoQ`}Zys`9%pzqfzc@ zHd(j;kgLEy=a6Hu4WXabF8@ikHxl=9GOVHlgPXRzU}I5%CM$ZYrkpGa2y?Nt3+k1n zdoA9v!VJ^0vap}{$@`bvI^v%2$EL+?N7 z22804yq&S{$e&+=;iA5cB@ervDvktlat?oe(^HnxAl%YJ6*Sxly4!Xq@a&VkBzA)C zG~EffMw*VSx*KY-=}wEMaU2F7-2t(7%L>pBIm+uU zOkk&5k!>-h$3!I!p%IG%Z6T^H$X+8Z{ngd7M_0!- z2pUs#@h-?@*+MaM>Ys`~gF77SNbJcpqJ1gjvJ zm6Bm3f2$6)De*giqg==#ft8w@icsxVaYOxU^g+oOQUULBO7@;Lo7HVl;@S^JBY(qvqFNvmjyqX~JV?>BmERI8P?xtaG1uHbVh0TFQn_#3`oA~@Lx}%BT zy%^Nmge~KAbs8kKHi;#5ij+kuZfh^3!LkN%n^IR|eob4)Ynfn)wwG7dcKz+QvJCP0 z^GTTs|4Ldyf&b$}!IS2HKYDh2T;%`9M+e{hzn|pe#7DAd#uJZqL=eqG=7l`-y@dIq zVR;6v38ow@>6~3;VTk!#-7Ygykq?622?Jbbged9^e_*0=dMDa2zv?vQ^iEt+&tpPd z-LTb_DzvVdG->FQMDIiwn!RJIflr{S6M^)|*z*6^u#)Lib}=brt%6rLK6bFCv3?GS zKLd8xc@(=<&7~+oT~agB`V^7(~l5Y z@aM~s&%C5E2Qp3_@4jCIj|@R}iod`>+c2}Y9XDn-stOELAMH8kl7FbvZ6EGG5teb1 zZBbFPN6$c&shr=iwxAt@)FR3dQa29d5!n?-!E`CYKRJqEjvFx)OqW+8+`xN+>2l0x z=^0(IL}6$b-c!-7nh&Z8I5mm4&O+DVeaKWy-yOKrnv3sWS@LH;95Ba?_n&NVm9mIN z>4w`+wwgerT`xbU$C|qj#>RZ#TgJv3zxNs2^w?o+T@QHM__%-forCJNX&(AP`D%_& z9G0c8u?F@?bpaJ?BXE&Tg_i^|rd{xNf(~)R-BUgHH3-^?iQI~K!T$7YM9p#&BT-G<-6@|Y)y!NmxaTW?{PF<>+iXHCFbjW=IM1gM@kM)(Pl__nUy-t9k+`UbRc2oeKwB4NmVd zwCS>O+)A;o3eXO6-G*+}a}XYnnbJszddFal)5zK`6pl)F68WH;gstvH>@nYY1|Oj1XIy|2>vcn7RG;-mY0&nh!(3CJf}+ zDgGRbE#$fR8Y|X^+r3#0t)Zc_u?|G}UM%#)U)J`w?ljYJjy{^o=bf-64Z^hmtUJm8 zDBMx&x^B3KaLjeseeA2K1o%OB5u$A^?8m#3T#$e0opj$jB=l*#K{0@z|4&m2U{UwK zTsE_s640-CvFx~lC%988mi;)_$yZ7Ew`&UY;r!k@R>5bVYe(C;ctOrcWhPk{CP5LvdBwT z@J7{EvuWS3U28q+85FL(9HzCx4!sk-7>z|9ZgWBLIQ{gwhb(Xhs z29tzFB1qq{6m?|*ZK_TIR$@ah7Ui~VsXZQ$t^h+7?4R~BFPhhd6Q{M4rb+Ms8 zG;jX;jQHPe%P5~K|DDp#e{sh7-O|k87pk>g8}sSAL*8qlUtyJBXNmR~r78nLXvU2S+ui}GThRqPVv|+#8O=|wVGfG6kGkzuUotUr|d{SUs&<@uSUsjt1NPf z$Ty*O-AY%^lfg-`yM@=)Xu)3+p;_2tauxiO%Q&8{loIGub2!on3lU0PtT& z+$(V$heQhnG+j!~c@hwduDVwyvEm`A(upcwsGorMRX;%Jn1T*zpu`H5LvuCj7v`n! z6cvmEpLx_&4r`XDzWLy3tQF_Fd9*t=U2uLg#zHxjtNF&(pN_5X#1?aYRuKvg@0ULN zl)vm&yhi%$Q#VT&PDIOnc+elEp!w|61C_HcQj>i4>1(JaMX;3`NTX2{H09>5mnMJE zB4V}Zh={CuSYLaSeCe%$gDF~;n9dy}~_gj+$f>0taL=pxw=B>Pt5e({H zN=}H`wBQMWh9Oz;EXnh7dHEG(M9KD)3g#0#dw!=@``h2hhFFbN`&O5ixH{hPxB(r!yG`eXi2VB|lloE`y}Pyp&6N)zs9P|ToKn=Ar>7$8&5 zQ;{Y#7A}(Fkft-9ED4VYB|pD230e?GC;M*o@TM;PL^2UXbCNC?`B@@5M=4w)V$mcq z^={}~qc6c_UnC6NvqX?GtpAb^Ne(+N|10JbeZ^8rASu1Vs9GIhMi1PRE34He+@MT; zi|fVZym{BpCk8p7RDu(DGR-%MMe`tH`(|~Ix-Dzx7m=U4AIA9v>79Pmh3PfP&o?|4 z`S0@}U1U@EE#H)V*{0dX2WkJ27eLx=!BSG4&FRO+cfWN%jIX-AUVl8kde{GWH8}m; zApg5ix%*z9(tw4uO?uFAzc=bntg$~g5ZnKq{Qd8fApZN`PYCE1a>c4Ts8B?E4d@61 zk>a7bjDmH9mFb`Mv4jx(e0_6xaBUyylPYwp4v5=0OXkd+Qq&Z(gz96;$c%>}zk>-# zdWYEwq!-;-ejU??c)~P+Q3d)M zl09B4P*QZH(dwG~PI~xHkPJIyq&+&7|K5E&@LcZ;LF^4NjuFOTema{$u4R==)ERfdh}{kf zc9A7rkxbxuh`7&4ggq;VVV{_nr99a*gl_Wy@U9hV(;#lkdk^J$|6Ko0#<;yqkUKRB zEg+tV1fIt{w5yonPY zhAbh_=7e0!8~M|9i(J3y^{*?YDtB3a|N4KP4#^(t%sVY$%pLZLE@QmmubXfj^^Ww_IDhS?n?Rp;aDGfUnvNUXlVC3Sbu_S%f z0%i34C{^?q!M|Xh>WU-SgvhVuI#v2zv%h(I-P|ik4sdkvCa503qbUvf+zd2Ukg0+V zpj9K|f+Rt%n{4GMzwoKpJcMlVaQNyOX!8)N(Zff{9Vs^rqgy?E9Fg*b&RI`$8cn1= z8C$=A=g?gchLU+@k}=59FMjF?i{=${8YFWX$IORH!Xij{vdST^fYd?Y%YO!Fd^QV% zh><-5pjncTzW+WSCvdaQ${{W=P*NYqyG7tFv}!2`X17%GAq*KO!L1Y!@|kqt=v=ET zoY#_>M`Ue!%s>D#I%aCsfw4L~0&a;B zmHnEb*J!9OO9^1meXj&9A+ja&$#exr9N7n`yHfs?O?k{v4DVD7Vkg6t!wh-U*0Qy{ zPhK1o7J1xfK6%r-(DyXa7xfJ!h=g3QrRm$L0bvF53xYWaE3gT_Zo#LW@h=ByCKbm7 zlErN3ms4J$@5RN2ohW7ElK}J=SceENmvli>az}-cg!D~enEl?Yi zAVRB*gU~Irwi53-bGHmX%p_}5b5?PY8kfiuiqV((EwF47XJKef z$fF)hQf#%Pv61SBmPFsv`Tp@t7?w_n>?=hg$;QFTNYX@}2%jXJr_Rr+{8zfN2gl>1 z3dt_7M*TN~^I`Q}T>)gteWn%l zgoi{a-NaP=CVPT`Mstm~nI9Y11tzAV#x6`$rjIPkV801QVZRsiiE+~NG zlt#YPAZ>k5Unn-bHS?VP>PLp3sJg7?XlwnvIJZr(Qmx8mv7}<=o0Z}#>NPyPKYs?# z^DMb0QqRHbwD5RvU z){fzKV3I6Km7&%_RBp(yc5qF$OPGFsk5+92@x!&2?#aQm1N^QmH9H=B1A^l9kd@o1rv>^TBCP>x3nes!BCC0mFL{ zk623{4#A?dr9?!Y5H}+V+{Nx!rHSaInkYQUf6M)y2e*=rR{1XIT{1Vqx|Mka#YzEH zSJD>%c2crv%z2Ra#6lQF<6M?!ZN3)SbS*G+Z9>xnI5`2=Sx{la1Xklfbze-JpFEX` zrFs#0vKEK?5ib)dU&8saju*m`h*2WBqy;&WW?~zM$=B8mL&Nrz4G7)6^2G%LAJ4#K zkg!|1XHuF711~L)Q_S-y2f-4e!eVA+CLpg0i2>`Ii}VcV71*rDlZ1(wVuQ3{6#`ThHG5rv$w`_e5#h5L$TGJqK~Y9BCO(ry z6QRNZG>~uW`kl-Kb6< zp|Z>VElu{5EYj80T8eIv(xUym)xu{R>DL|lk@`h zw+Sau9E?z+e$eNnFoGuESZZ~dV6!Yl6KWRwcwh@1P~&J;PvY@t9Kh*!p0`}!<3Eqp z>k??B+8%m}7*u4fL~F2tUPHBA+4#D008QybJ|3T;VN6~3feRa6(*gm{k@)U7IU85w zK*Jp01o2PutgIC5dmg0H@Rg&%-cHWO>K#~aXsXXoi^wJ?`}^AL4KM8Ks6aPVTtD3V#+X-U3S+>wSXRwUHxwnd z3=Yb~Ba9Dl?mfCh^dk&z7hc(UY$>L)Bw@Zbvp>Q_49u&nP!DGFv_OLX@>2w0ww@(> zurEDK@<>!G45guL6i=*4=V&eo(g^x{p&W9e9nO_KqTGY`H1!tBZgQrqCb%Ur4HAK4 z(uA2?YbiUZ?FJ;^Bs%vR4gN6nWYdv8t1cxiGm)y1jn;CFX)OWB-2&fS^d{T`?as6; zkQMXN*xoj=d~v(a%gxcbquESVpWexcWjxq~>(2)wF{;2n>z{}Ux69puOT{$BeoDoS z*w-GTtQ~W|ZM>ZKHK?`%wcj*W1}*5C#^{ICFmWr;2Y2Fv5Ji90QsLmB$w75`Ao%p0Uw5jd90X_=L!8w z$}8Z`OEIa^hpdvbF;pN!e+O z1fWMlk4vzzU#K-z4ub6nG(^47hHhDbwI3aeSQ2$7-HU|k)%wN6_Azf-y9z7=Y#H7V;;=aTPK0d zhzPU!7Ysj7=!{0xreJGGeFDiFY~~y}dP{>4n4HC76u5kli#4m!w%WozDdrMHX2?BD z?&f^hWKt{NO zt70EL9@=L9xwI$te3 z6;ZwM;6B*VkJ?+|qEnc8178nW`8jrcaDpm_vw+dXa*$GlYIzrH2qE`qcwM{JX`4Dz z>fL1V1hZ@upyHRPKtT+z(c`dJVXDW^ZNLim_k~=&W$039U}Lc9eL<@b)lHL@I?#Q= z#tCbK?gVa9T%`DyJIs+Ye@I|6Vllt(Sf=Af@FJzDxG!kvv=J@?OnAC z^iHUxLKw&};vqX#S3kG_s(<5KlzN9!CfnRKY>JT%@W2?E*oY1oG?zgXEVHE&qXR(> zpUdyElnEVAOqTSM+s6LV*HaKmcE?9qnpy+B<_Yn+mzlUPLZ=cZ3w|f@avQKa7>xNr z>Hya(GJ_P>Km=pNN##%op&xYL??X25bpKMYJk6nq0G7b@N6 zJ7$^A|MoY`?U4qtqS7SrZUh9u6Tx}QOJM!FK(6y z`3L5Rm{l}4nJuIM8(CpdRveUeG}#+>oAN66rE*O}(ZZ`Z|APFF@%a!D8ZqW;XEPmI zr4X(Ht}sZyQ>$4^74+(ct$zG&Pqk^^9S}j=H*D4UMewLuBaf8VP}}PN9GB|<938%V@h$(;Q+y`IXpvGoc2DOxYLPfgVlG&F zngt==XL;yhOY0r*n8=bi350XerHn2yEQE?xJLT<6uoe*>1q)XTZX&H~=V_ww4&GN% zd<(4pO0Gxk7CSDv3dQ0xI~=l*H|pKt!T+&}zuX|MLvcB^jq^*(3Xbz^7K(8|NrlCunFwqe|ffJjxQre7=JYSQwEYD9qd_+Ol(I+!fM^vj zg?mLwM8RXwntS(R0G(7~LqTSYreJjunM9Bvvn&kTB4%EiWJ~ZI#{lwG%b>$SUyINz z=s9 z0{j*M=M3$tAw^vB^P1dc>yw)OXStjt|5ve_^4k9@V{GC7@b5zYzn3o$zVZL3_{jZ- z1X1ay^lOc=dH-KLfBB-c|IdzJe%t@2_`K_mC;cJm4^PSExIZFigI<3)?vw7DQNRDb zKb+KSb4T(W$?>^Ms+W0NSHY&PC1FQlcj3RXDJLR zft)&ql|3qm&t?#eGi4#aYqsh&yasi6iM)fsFpE84P|OX*BYfEqN=`tJS{+Oak$M0v zWe`N2(NH~=rz?^K^F=B^yn!$smI$)XK6zm#0OGziCgiTWqlpB#0*F3aMpF2}$1tid z&$+x>BM1+sclk>hwc@u$2f}$wVq#IR=@{=@7Ouc0OFKBq)ht-Tf=I_jHO#(u8j?5^ z#q>$H_ilLp;jDl9ravZw33=NclegVdGI`r4=NA{}qsirPF!@L(=Va9XY0&=wzZgPm zk&Dsbd{nPZ&WY65FDBh#uTRcjlgZn`IB(a{G}`}uys;q}jO(@jPlMC`u-88&uRhux zPtQLL&(6E2gW;PN>78GEgntZ%<4O1IY%qL7$oYs|j^)4PEb*AsYvc3RlMmfdze5JE z$;b0c(jCc#Ph`9EA$iq*+dX?Nr_v?8^Y<6s;l~y<8}%nmWqlY( zuct-2!&7q6AHDAm`@_lE$9nCwf7YM4tvYhQ4?EwZQWJqyQVkW1ud@!;eTwc~6W2A(amBMamh$6l|gJ#G=6&`?h5HV~3? z0uH7t!UA}3kE!IMO3vrMU-=b$0b;>hAHJsGI>ykXD-1@D3Qp*ltz;K-$eYbz2J9vzj1UW&__j1 zR0I(dqD5vwL?bUydT5L}i|aL?-LjC!coFB5?T}3y;&X9M&ToFYC$PB7>O--y`gNvO-9#f%PIe}E4M}uf}&W}mM5ueQA zj#ikEAOO0MK;~KC%cJCEaFetv_4s;C|17`J`8Fe-GK(K8epg^|`30d|)pPEqCi$F@ z#=w@jF0{-w0+~xsI;JUuL+CRR%%l8okH;$|p4Dq2 zW$}E)YKFLBvp}T?VANZ{Jsd<{su`&g90XZXU&;)|3oj=}9S5~!gT8@Co(xo`Da&YP{1848}u zzjCIRu}>ve-ONlob+D#FK=&;Ve8|nmXT;~(G{xsfP?8O<%!pzkXAgR-79=L}W@}%- z&Esg^RaB}GeMlNr{WQ`&mOw&2gK)J5Bk8I{8bRXMYx2?ylq6=%-c?tJ8K$MATuVxX z*~+`6a=J&PftbxHI$?N9ill_Jspc#1c42w>$`g&bJ0Kh?mC^{!{UtpvQuL(2(aheq zbXheB+{=%Ox?wbYDM8N3EchfZH*X>PEvdY9BG_Sd@wUh^h=A#+KY2nTc1M=<7oJE4 zhQ$CnhM454wBXF6(1_t?$%gtZ#;qG<|jnNz< zDG(=@ODHW8a_YBK?skb1@L6an9ic#zJdqbw9MaN_A{7Ym9gAYAI)|j6zuu|M>JT!{F9BnQ6Cf&t0T7QlM8+Tnx{YRld{*Fd zy84>A33Gfqx1C&}HDV@M=|l+zZ;@INXQX`7zyaYAtJfeem}nV;q~QYc_#mM)Tbg3P zX6G^s3Tmse%-zb%NdBEBG!iq0MJD#xC#dZZL{`=X?cWr$WGS^HU*7$}JOW%O?9^*H zdhl)-tImhV^77g1nl8;2%tL?{!yWxFiDEL&>~qBLWHLBXI6r#Csn?jBJ=`|$=-xP-eA98v;41{9VELWGEQld<|gG1 zAz*423IN_Lz+S8vW#`t{+`nGau-0AYz(XCwtkP}*h*HADq1ua%&+4_q10~W*1me#s zStXy5!y~ffQMwR${wCngMLN}r4N6vQIR#&x1m}zG($iT2@UV)3PWj>fOJd4z*n#2sfB{+Dqlp03}z@-S{EeR*g47KIVd!8HcmqeXJ@1?9EM~;ZyDD4z}f|{r1Dus!civ>FJkB}vX~`;Nr}8*ZG-K!Oj3DXHAp{& zPwmw6G7vE_Z2=krp^%DW>;O(-0@8sy>2@u3y@LOuiLl8*=B~86tsa=ftr0M$(4OVUkNamNS>-aeHGd5H7u-P;@U=2ZLlIg!(HvQN^KA( zYUjnbSBwrcVjHbQ6}z${y@1a4HRuK6F29g1lo-nwvxR!i@P*=eEYH}OI_1<$6R+1Q zW=@F0ujHYMZI)=m8Kt4@oFZ57>}B9#GPh={ti56j6z$X*c0`|eimJ;4HWENKy+Np2m4 z+Eq-jrIg9+k?S>@rc9;A)E-E95$E>%;xu4D7!gurqGlvm7|L~mSmv%yON82fhZ!}C znh{S(X<4^TDbp|cEhx}n2uuQ&(qtt!1=14pM2E{6iDMH;3DMZQ{XpUbXiryj0CCMW z`2rK5;YtL;lG9tc&W9*^;-FF5HClpU&B}z4s(ZP3Qc811G=M>p}Qyi zOZgPaf5LFeLL9C@pEKsz(!eShF$+^sNNAdu(sB+{vtBFz0;p1IoS6w>(QS}$)Rly( zNQ2EvjDbeJKCH$=1hq^b%^fs|&^+rcj29`I)@VxWH4DMUB^BSFxW{}9J{2TllJ2O= zhorai`@E14XgbD0Q~tRxkCo7Ll&>QfJMc6InCnUYzarWf;B_mBjVEFwioyB?wiH0FeUwg_ zys5RV;grolY}jj@o}V_F`7T(5Rj)nDE|@m;+77!=wh|RKftWq0*N%=}c3vJG9BH(k zo}ZG}-4P_?4^{6n0?`=utV70^;D;ptI>ofu5TPvzzU&2toM2ZQSiM&16;@hZ%k@ta z*>oAC;G>Yn6(eJDsD9nsdrPaX?G88 ztkTM_DdkdcLLew@ukWy1p%&@Hz)8|5Pfr%S=-!_3+7Q%#u?3P9; zIZq@*d(D%jXc`-yBR%JKJY&hgptgutX%#_4pvXqc%o*pZg>VeZO1VZ*y{1tz)@Zj$ z6v(B7B6x_IGD`-Rk|)25LQZ{kNoxyT7#hR6vgX(~gkK<4ywrSM$^fou`#nLG3n&@D z%!_#1CfR_>pdW@2uC3JK$LeF9q@+gJE|vI3MCqhd_X6ya6$x zg1sr;BFC@hFGlMWEp=4j4uSVZJlFl%OnKg&LP96Whh@VG`?DmiBn&W1bI}l8e!4cM z>CO=z+ZIU}q&x^B%x{fc7Z77tNmDvM73G%)OO44n)t+dQJ-8Mc5@n5MN2L$OcS*_m zEgS8m5|?DkRvcT@YgMzEuGBLUY;pdW;!Z=N&$_!}y286&(rex)rpQ?Kw_36mV<~)X>^lhaVn+py=UN0 zR*=nR0ggPAHx&t@TOMW+*dq)ajMUSBlzC41R3=P#LFB9A5Retuyq%y2?W#SK2Ym1N zc~e<+_l}!nMgz=j2qA%#EUDyrG$hkRUm=pmny>NE3o}~VY|WupBNGRJ`DX_>qHGCn zaxn5eosNb($rt_k4C^YR8I;zpt~#8-LV&eQof8+G+%i$z0iY?1#Ud*^o%OT;3Gn=Nv9&^b~73jRCz<1@1NK}E0BYo{jQMgDk5_RicmmopvC zB7Z)TfYG|G3mItoJvV1fHk6C~BxNh5kROvhg`lRAZJu|?0m)f=iB~9`!4fjf)oaQT7!N4MRv;tWw5pPC7Era;ZDDJw+%oP165clLkdDY# zK})vxvc9aVK^I)f%qyfY;-`8d3xpB*p06cO<8YDFaJ{yLh>1hP211FJrLdb5vz&MC z-jUO1l@Ua$kBAow>Gj$ctji5^o`5Xr)U{zCAh{LmXrh2+OHTo%9`9+(sK4F{q7x~i zy9m7N?>K0_wdxGN=#bv|@b%!dKb#D@XM@Q{GIZiWfVraKe6^m%Yqu<0CR3zCh2*^n z6Czw$+B=Y@vqfmyHEo?2`yc{=LNfC;0SfBEC_3t+Mg(z7U{s+?Mtmm6gh&GQgsbe; zm4F*!Ll6v=Qi=pGY%n0W+-n-*R6rREE(N z0g5AJ;VjV4Iy6O>S&2kW08PH^fuXI;uC>KP3^)c3c-of=m`~QYk+im2jpe)sGuIGj z9yANi#TYY2BT^CP*#hec+B|cMidl`+qXUoms<;WfC!?!K8I+z=wFz6hv|6^|bp|;j zF5@&DAEu(w6x_WcUk-|tzs6WtuQkw9-CNLP&KiYC0{OomI^=TPebXnW-AUJR$HrK5 z@O;jeS!f($=zN~AIYfjg0X!3Q&gwNG;Jh7&=cN z5|mvRQV_+7REZVh{-Zi4-#h z1wNz7(UviPO1&F!f#Ai+u{@HhsQedP0_>P9G%>4yJ`;$Cv3~j?C_2T_VuCRmZ>6{$7syb>+ac&$cL z@7!`$Hn!`v3Uq^gm8(Z|?4G;3xw5*xlF^N{1)i41EUQe1Hv#TW*-YCqncSxh zDKU}qIA&oBY;eZkOP$xlgAO?xydO;Zr{qI-H0lnOZQ*n%YIlekoS9Fw z9+}^`D!GR!xtf>kmQqYoQGo74iSY}yJZt39%uSWbK`Gx%sZyp37PWOc>B0s|Xbq$E zYOh#y2F`1K=odc_KCCEtD18HVVVe87S6Ik9wqk1HEQl1<{1v{)P|F1D==FfsKsZr9 z>a|Go-x#+Ml&W<~37bhVL+P}MyqyV(jPO!i4$Dc{EQ3I+5CUl_6pFEKN{|C$8o2zc zD4JHh;&sgRfzlQ*kJ9}Cp=QeaRY!p6^e~!0iateP-&kWbs5(!Dm#%{AXr=aE2=J-0 z|1{mmZn%Y{I3e1BXrQdB-EU##Ng!@)%TtF8d3qP53s58VNFj~pnG^$bz6;c~bRv9L zB61?}BCeMV=`ZBD&v6+T#!;^U=TIpD!8|3$s>DLB8*o?JLhF-;AIRx#8XiooA%N7AGnBfA#to6Y~0e1i5QU zocUsO{?p*Je@eQq&VTB+>NS-DOLFYNWDGt+XM_G|Ox}0j^~vx&?|S~4bcY|wyTS0Z zMLxXkPu})Nr2oGzM*Z=)MF#I>ztdK|HlB1Rmy`3+M{+(Q=d#s@!MNWdqyAYJQUy)U zoxD1-+2mX$8EVLZ2g6B!*d6xd{4Pf4ulwWi;C$FUGXTlBKl*9V>yPCHf(sa5^m~KX zgI@RS?4!AR#*iyWO|4#2OBwVbbI@sjJa{u~k@x*k?`?ND>Ao7A4JIF3@O?2pHQ z#4cFN2ffR)?ucAmjxNsSQYM4RS-)PBNE@CH+k@fj(O~#SC1_fUQTYu9B%Y7PI!}?B z6FDE%YlS>X4w60$&d$iIK6yRr%m19eC$BEwi~)DDtsLocY?{?;mqUrg(dhC*?*I9S z^hcxfQCoKJV5+G6f+pj)5>MTWi?fe%QXdABx8q57c&ZXW)oZW2gR{$#N;B0b{qYzL zkQybLI5fSD`tSRv7PYgIz8r?A&j2rdZtL}JU zk%I0sw#ZlRxm*?*^(CcDkO=eF5Cgh9JgwKN-bJ*$bExDUa0~%LIvXg`QNJI`Wu9G1 znw3BI`lCsAFeInl_uV%@veqGeHR^YJZ;>F!eF#e>FY2|uf|a)^cToTSA7sy{NX-@@ zgP~k#|0hW}u>bu591>iaBnCOdvpy2^@FN)vPY0uZZ_*;Y^WnJvKQD2d7U}=5{`-rw z?&xC+&y)OaFzmrZ;P^=zJsUvAF|{0MaCQl#0u*pQo`BNZlK+HtNxB5ump{q#JeZ7I zq&pgn2g5h@8W8jOsK6iWTxP8jzc@YbUCLiz6I!r#GI&kKm%X?38m{}@u>S$D_V)bp z?3BFi{?sS$hUXuKnussX$KwHR965gtiNSi%bky&T&xhStXCI-LXd%139=lc!^I zCSZ?Wue}-kB$qWBjQjc`0WKy_f&63gp?~&MpX?nTHOc$);pDC4<7a2*A9OOi{_EEg zwLgJrMxN?Cg`Y&p82PEWjn=VmSTqpT@I2=LuwZ>llFQ*~eb)*9QayIPHW;4v-w)whLrlCn8@xfr)5@oq6M!`l9NQhKUPDIzDi;j&U3~do9}68a zzst7OYdh{6*=^hEwR`LvoV{n2T)aa{*BJ1o?(ofJ_f0;7^WkLF9eu<}6!HjaxdJdI zsrwb8*W#nLrlIn1Sx=D`$}vp{3HWm)sPX0*H!*oK_*M3+;`V%qn1_cL_CPv3jfrv9|k%YDNKY3EGso8BakydYo-qU9t@}@uRkGf~- z&qFfgqzi9Iohqv>S`evs6Y;x{`E!=9#(7OrYh47fmG-vkAzE8MAPCx|g|icDw$R}` z;wgkhIq&fWVCJ#HjyuMOwRv#MRPsgjUWl)X{7zTr1-nV^C8hErto|+K|K`&bW=yg3 zq97$euD*KBXAE^h?InvfO=KsjH&!)bYz*q{(6$8q98n1UKYZ3XBE3TJ${D?b z0Afj$Dj8v=dj~`+7<+WRhAgJU4tV0jyCM_C$j~5hf=_Kv$PYGq(xwl~xSo3kYAY#v znB1}mv$a6-wp2L|7H18mezbwJGSpPz(v7&Y-URGqd)B&mvdzC{+dBm*N7@oQ&=w6S zP{|atI&ZX>OOAg_b>s#_|3G;LHlsSzg?3j~tWTvXmH*_kO}2bZKhMZ(o_LJBr@yew zHXUm6^fOG#8q*b7@`$ZkBy!GEZXHqvFejHMnq;-Lenx{ZLtiqVVHPCh5EdOtu9phe z-yjFcBm2OJGzo6x(M$rKLQYl4Ej0@{_gh4yJXyBtHTEeExlkmnoG0JN26r@Rxy}I8 zQ%?jmNf*Cn^hRaxGp}m(nnyD@iOM}Xcj~vz&zfOsCp~l#QaC?WF?w?U&*Qg(J~1aZeqf>wNVmF zmi!x^{aZ-yNRovZvmPfwA*iF4=UN6)s9ICzS@#t37nC9T31eaQ# z*iW8_B+?~O#xz8~wl|#nc|pF*D1e6#6TNABlS~pC391ga6nl)j3ZyZTa^!%`eZ%VqiAXp4Eh-OI; z%{82&e?E#yQSMpOLQAv}n=yxtPOD*)a!yIodO$F>$MeCx#t3wWg zd^r%KXwG6NE`Rs-Cx+kROGXSCpBg> zTs)^{LQZI8kZQtyLHbb|B(_tH-1`qSxK-Y4u0O7Fm0+J1!BkEF&X>f}f}+e3i8M%c zz8!|Jsng$NC)L?tN{+J1*Tafd@C!LBt+yu(x<##PNmI$y(96QS@PM~iy=G(v5 z8kr4PP98#NjR9Ne4pOgSQht?z$5)J|j54JddJ7uO^ZXg*4E~aw=F0nf%JUFLd+{A2 zl`10+%t|@nm7gL5^3(*V#CAh3qMAXgfL$-eyvBDx0?IjhD%h_X1mmeFpMMxeYudp$ zii71(d^^I^^!SpDrJSyu#XE!yY@VR9!r1vmiz?uPg@8hMfX|XNcVC1=P?G0EF7jo} z!1Kkq08rJ+B853jEvOtr&9M3+Q;m#YhR&L2XB`rlcm(lXP#X*emG||A(_f60nPp)} z^yd2Y8ph6S<*+V;MZRQ~6$ng6ErfEIlyIsTQ651&W%D2+5u5Y$|6}h>;3UhcI^k*% zkoeI$E(3#uc#Wj#D$IEC_9dul$&Adb%BtKd*Q#uqtQYUyhU^S4P}- z@44rmd+s^^bGPR(m_j!xAZ8D7{7!Zb(6{L&btRpxQx6V9TooEQbl4@K>Gj|2<+=1+ zSyp~B-IGL6CcCo^`C0kmCBnH?`d0t;O53!EJ%?u|GsK94aZ(WYq0@0Z)6HIJmsFI_ z)}*G7T(_C@F4?<2a4EFJ8DN^dDQGt9yK>Z;yi$l9pUKwG9WM^EEwU}>!M2mTv_CyM zk_SSOKgmMST!!q52(~kD;GWKOYzjQcuHnR{2o9VT&72fHbL{wGYUdIFR(9mzbY6}O zCdFPF7tk0M59~9O>87q>Bn0-mKE3Y><0dnVQU{+L+jLxVZAhzM+(_V>?6ni*lpwtz zi_A^fK31Q!BXS2ZliLwWPOPLI)Pi8ocWmzV%HURwfv}z#dZO;Ec{y8+39{a09b`k7 zZV<&OcAQ@ETz(XG;0S2aJ6V6^w$zp0+uDO+2Zq>Z`eDcPoWt}u9->hO9StG1>yer4 zDkYmTliMpygkY40SG1M6cS@*lLTFm(}C1uk@sEkha1jm7iG7rB0Yggdw z#@a z@PA+X_*17&Z9V3*FFbYX)NN?>{Pd}xdhgRddFs@u6?kR7y|{SlS*K2&`r1>Ex$PTI zJr-v^FLX|Q4f%fR)H4b3$G+~i?CI+slRo8N_t@+^&iLDZG5w9FPTh9ZS-%op zSzKsXevdP4zXiF2UO-5lJ!l;Cf*y=aaL{u-)Hpa-B{>=%YT)F06_6~^?z!rf#TC%> zLkJX3=J@Je6#&55&~7(YXXaS(&~BfrcB3e0)a(2E``o_7`C+Fn7=}^j#kwfgLc3i< zeJ?T(Y95-PL=rkPFuRKZ|0eI|KUb}?JoG~=ikx;I2NsmeyHQ@2Z@wr((OSvhZtnUOjc{agUv!nOg5Y?ys-h?S3=- z=DRxWkN)*N@=t!~MgQSVzXV<+Y|d~We%|{|O|?GId+`tbo9e%M{|A5n&ZmF+^}qL& zKYGWL|LxTG-+B5|j~BlE!uu<4_{rO^yZ`;`@A_{S9#?m$!c5pFaNKH*7lJ@TVVs?QiYB@kMXisnp*3=kK|+ z{L;5wdd3?+_wu*4^~e9-GY|f7M*OBXfA93dKTbUTiMM?~`H!!8`AfdozIyo=zGq?k zzr6RKp7z@Q=XPHEkuObu;$P1F@yDC5K%ainsn2L%y7V8u=@0++`+xHFzxcP``%mvX zc*eEo{$l?d{_efM`3>Lpjc@(Jk9^CUU;WeC)yIAOfBe>`fABhZApV7yeD?3J7|Yw= z`L^G_0lc1v`&$3?AA9>tpY_1^uD^Hpvmu(QzvB6=KbZX?=@b9!NB;IdKI_hp-Tu30 zUhreiM_&4oKl&5?rBDBj7k;ksEx&@^_f0Dw6JHrT`(1zYpI>%n-8pmpiBJ3Fcf95I z?)mBr7~>pNc5eAB=2 z+xs8?koIR^_x9U=hdcH9JA&v5lOO)a|K}H9`M&v&R-OtT`_ZTW>PzSU=<9y(iGTHx z-~Q0czGmaa-*E4%-t;>y^_~lVd^Y;FpLy$VwV!z3*Z&BxsHlvlmudD1h!5IpZ+?7z74Q|~@G0A$c-^lb-1WTY zqkAr${)bm|KW(3Ww%O_bQtfNx|Fre?e}2hR{@@G$dU^lXKK!^BEPd&-@BH}fPyV^D zfBdVJUtOK-*WP}|e|huUK7M%Zlkfa9`;)i5@d>Yf#i#z~TPvXbjNd)|myiFv^Q7;) z&->}`n)rP8X`ib6#nc;~@Z{^i`uop(!8;##!gHVbM~^oi7d&9C{ZH_ZPk-{4e&_$5 z`n_L%;7zZ3+3&pej&r}|^v}I%<MUh{%VQ-9mV^U+)Gc;(MX z;j>=_7HBAL{?)3xEDYcgwxkyf1j* z4|-30PQAVGRP&jidaVBKFM0PD|LgPr;hhVw{^3_PfALT6|A#O9?#$;-{q48?`wu_v zn_naS{DqgQfBxg%n}6(2?>X~3e{=a?|HeQ6_|rfA2OAIk!FT_wv-}tLpZDDlRD`$v z?0xUJW}IF8yz(7Sd(G|dxwas{&-|y>;>0tN-1XZd>twc=|j3=KCIlJ+16E$@Z|7oH!#($5*=PcUmfISE9 zPx}YwsyyIGuQEUM`A1TlmMQyHf*E2 z&eq8v{JR4j`&@O!?7*5(1t<#rU3jkQ`5sJu)%G1b>Yl3#RnT@^H+hJ^*TUF^=c;fI zdcJKlM8!y2zUzlOSg|t{N1vPz{W!?Mud{&}b%A}Zx+wFLq9Ah;SdclDeQk=I$iDLA zo0xp#4bA`y_^Se%B!yD}&dqZwzy$E$1(_#*o3hBs^oh*LBrEx%fCZXBs5R+#QU@tO zD^B^^u96n4bl{_# zNg%LqXYzIBz$!Q!_-?=Bdm!)~FGA<4x`KI>IRoe_{-$yo&?Qa>vd#(kn}E{{{8I!1 zuW=Gilz^b>G@qbqIHO9_I0fhmrvX7VI28zric8V&hQKL6;Bgi~*EwV0yTbBF$#+F% zoDoS-wu?8)!ffqRih?343lLqEWlr&DBG*nVl z*GLxJmx3;H!laD((=<*ZoiAuq9Tqg6)2L#o8mEwNs>~^q0w)7i;Iv7R)4+nRa582} z2MZ~K`R@kSh(rz4DZQzlObrHGE>2Pb;|jIpyDE@DAaELJs#Gr}plb9Brvr^A&n3c< zYH$Yo(WK3#KLkC|Z7dpXQsgBfRS9Pi1dYlS%Tf?zP6JHFf`A1}M6Yu)?Gh#Zk>mKG zw;yZeB-P3VU7D0Nu%H-|GADzkqH@Zl$_YS`IcXBtt%z7Y0w;h4i4!Ibthdb?7KA_= zM!%8`n)o?xAf_j>mTRC%NQu0PiDB|0uX7quu<&@?I9(!b!y{)Q$J+LT8}9Ya@nc_7 zjCAlzrK7*Bu9Te_O`KAy<+9Z!PE&PV6VypTRyjeEB}D>FUEy?16;&B0>zu;NykP)c z<`l&cWO-7M4Nf#7qo%5jpQDx#u+JP$ml zh^i{^q7HH#=TaAP?aL)M1(}m|T@qD2pPUz5T3;?P$6%pL1Wq?3*^qQ%m<(Q4RYRIo zc#an}LsL~^qj*8&RS^$jPE~YK;w7Le9Iq;xqU)2g%IT^u39^CBn4n35A{0^=G=tLx zP2mkKpWM_1&S2e|OV&kBrp(DYC(F7ZD;m&wPS*$*dEOQzo>LS}Fm!p+kU32^bcLsb zy<~iBA7iiuiBn`eQYICh6AWIK6zqBgfiqM^(k2xVQ<4Qu15KPNOS&Qod496Ivd$Tr ztSLB6<|IiKbdo6OvM%!~CZKW}ukpN`m#QF1oT93NtYbGM8@i(ED!}S0@VcN0`P8PM zOB}Box-Jzm8zLw1h9>Hhioq$oYN$dX)et#DRy9E;_CHN7jKX|!HVUz`5DZyS3Zqar zIGOfLeiUNnR6*6GLUL2kMUJP^D9{i%k%~TFpU!idVDPG1G$@+E8KSNk@}wY%oGgp7 zu3+ydbCQ93eNw>APEdH!0D>xTqNu34FexYkr>c^s3IJ!2R6*8^d}>n=46KDbHh;Oy z*aJz5X6OaNxl}>N=85O=;LMT@l@oYILgYDt4hvo36ipU1UI79|9fl<7N<< zlBUUp%&NwTx-3iTq^@(a#w(&m0Fa_+hNj_ORdq?`4Uqx`mW&6aEb0P}GxMCFivll9 zDjH{qnjrBy!1C5*S(CMVYEzID&X80?RSKC!44f2$(==XGRUHsVtn%8VDsa56YKDj# zAjq6f-2=utf+X{p*K8c9Jg2CjWbkUyiS>MRbgn6LDl9DKc;5Thz=aV~{8dl4bx=lQ>;e zM1#U5K~)4p1frsFlCG${M7U8TQIG`;Hx*7I#!uj}S21|q0HS~gE}@G-onk14s^(KQ zmE#RX&59M~z?XknSD0|C;r?HjM@xE(UjNg6MUIyO!suZf~y08!C7 zUEl>ln>2V%$5uch?a(=$SO-JmG)YxOLkFUvb3COh@I1#WxS@qqY@2yWmoO8#WJxF8 z%*#N;m=-%pF>k#^1^Zc16Xk-b7iEPrcu~R-HkTQXeC9)BozqlJ#)L6mVuY~(RYB)5 z7lOoz*i1{40uEJWMbsrwNL4viQ6$_;0tVr-u43yVbE2-Oq5u{|LE~fv53or=G&oTg zb&-q%>;qL%R41`z6EsyKl(DfQY9WX!Cx}#!MS(M@3gNs46({K@27t2qL|NoCSrBD~ zSUyqLWztOwXW&F3pV|~P>;XhoFJ{i!30#FLs)_>2jmj~WNK%QZ6E(HaGmTYhx=TlXV*CoS{b+C|6&H?9KvaE5EAseD1f?Rz$(3@i}51eyN8Idfi9It93 zuj`Nr#GEYNR zIThOly-;5+HDhiemt1Iyj_okfzI@CdIUD4XB5^v4QYDQuG(k5cAmIR9({xeDr#2-` z;bex#bD4=Dl|@;|8z6~{MWZlKNTX#!Ua)PMpvQFX!Lp6BpkriIzU!OQb{*u47z1W zp}~`?@`fq_SyMSl5JbJu9vR0WhAQyIh(nfmj1~-`5T3|9ftLc7pQvH?Wf)RE)zCRn z5Oo0$&0MmobDAisvY>z*;T#Z>F;Kw@Q-DWJ*$_BgktANrCo2LTOax15JjY9hplV`1 zHRq_Cg=CSFMMX6Xpzt^XrOua^I8jhkK>-R+MzoAIDN9|DMUhi*-%9!9rYuUFPBDvs zeI1$YkY#}rH4OiVOBN+XFmxhhg(9R}YE#w(P9dR8J}-_>nS)d~UQ-lF1F|7;0>v=| zh>IAJVfiSW$VR28at7H_1!MzTHySO9Du!E{CIZ>eus0>)u!ymVXz)5%C|AO5uqYcc zM`lK(d|=+hoM1pdsmYwC>8fIAK*sU#5H`*AX)d)XO9m%Pg28Kr%%aQ*w5RigvGo>J zSuqNe1Ldq8H`)}D6KGJPY8XIhB35)oQ8}67tsHn&RFyNZt&(NjFQO_bK+%c0#3Ln_ z+EjU+U~FB>C##~0A*7VhiBoSMv3q{&TH5IF@0no@y=AaF8`&hz!+Tg z+{`7bSe%NE@ttBAoFwzIT8QNpUE+A^|ML;Pf+HmwK;<(lGRIT9n^TZns-bYaA_2gwGmMj~brV?eR6(-3U;K;Fo?LM#YyrF{}&$-kE1^YUx8@c4BB4guV zsDecqBet)P+4*Yx#B`+*bjhWm>zP(2qU4Mz&2fzyxgL z|2?|`K$0R!68_oty{OhUdyd<0fIE|G9!(E{mC(O)5?p}p9*i8zoCL`9Pz}M*X+OJi zf2G1|^~}AN8IG#A-6oZ`{DT_mnzp~+06Y+b1M*+kX_=?_Nx=TkiDyPJh@3;%0D=@8 zq)GeG>2#w85O^M^b=gl@rFr~47`9zpU)Qm1=;5@$v~9=hU={=(XB%wA4%uK*+Yilv zG{|;P;F|pgaLCQ*wU+B!yZPD#p1HKHLT~_( z?>aV^Xf~UJis^U=M%V&i*_UW1uPFsH#H2jTaNaO64wCUHF(X8bMmmr=S=1FxkfnTc zX-i9WVstlT;%KyeD@Kzg4@`PIE+?+6MN@OL1~FT4gjr0Kcc22-Y_TDKoDEj-Sv1tCyCI0G_ zi~O=PMu6={lztYe{u8j{^E>J|7*(oi!;;a#r>eSy*0<%S1!8Nq2FnFo2WGv z1h_s+TJ2iis%tqszkPjHUYfah9n)GmyeurNUp~0Hz7bvBn(Hq0h3?|a-0srhW%=s$ zx$BFEQ&FQ!$u9A8$9?tGx z+z$@7uUr((ttI#J*3$Ksv>Ds8kqg&TzQpY-7Y}jyWoKGkT$A?~u1{UEMAvKeXd9ND z>3i3OC1q)5>GJk@WqWaMv$^d=o2J`cwl@TSV|H$>C0rD@cZ0*NS@-gm*PY&)TWXr; zgG;TA{evZU$+sPSZ}YIz-xQY?;rZ>0i=wi$y*|aSuXm%>4PJul-4)B53NP;>0q!0I zaL$%2H}I_9_F8Lwde+=>XBYZWw0eD2*^pdeb=T)FZwVf}u&FN1bT6;>Y;Qx@>~C!> zN^8R0rZso29xWZwcGBur$4SCD>hc?IrWjzHILX61=cEXU{6(k`t|L@~HYVW$SQC+P*T)uL_$NuJ$a++KN^-HkWvtU+P*jbC)(YR`*&nn=|YD ztg&+0Klf<$6xjZg*HSvZhq%7kb8mhfN85iPY_A$k((L$F+f$d}!^URpy#=v)VFm z_Evot4!2=-DY?o3Q=6Y!1flOjb~gdJf98_FlY5!y6@BDt37j>(c!J9sg`&{$I=Lp5 zQ-4T}GWQXE%uyb3vt|-i9Bf|lfZXi>lGi>*{x0-DhrFzny$Tf9N-x_?xg6arTqVfW zIoud_t40HmGvqhkG}uPdrnMUfo0!a!*&FV;HS#`lhut56Nzcx$k?=bXz#h5N!!vtF zv;t?Rl>kx;VAUMenq7e>q>pPAHWcT5zvDvCbp3eH#*}Jlhvc6MoU*aDnhdJ>Hn7|n zMKA;oVnd6*b|zn6*6LpDMh=c_vxYDEUM;!yq-7$g$^d$nZ$lfLZ?1qf816Y1#DkDr zGl|D`+c`#S&nL{6iel<84&jNcCeFA{ERHx zvQ?DQ6niNhyA`_+T)*RZDKAF=fTIiE+(sngD$%SF%?J|#&(Gwpou8Va2m#$dyKQiylgT)d;ITohCL2@ziuysy&frz}U%_8Np`Z5b5@W3`v zx8<8*&cC*N--V`^gBA1w^nSBR=>4fRqiRmSbWD&q4@5ePQW&LZ%@_DbB#i zbX){ZC)}JlnrW)q43Q18jg#p$mGr&Aj@L;d2jT#-dr1j!$st{G5n94Rz6q&<=Uf{h zNLU!wD>6V%s#g?mW2j}{B<)%!6sHNrhoED_?eZP_#KVQW4RL-3k7f^A(M@e2rkx}G zkX!l?M^h@6WZiUQb)iHtH&z$Y1~1RgG{FkXezd9;vI0taGz%f5Z)Z531uXN98Nq$C zesn1X+FnQ3(@kfoZ{<0U=~*ZcxDN*!(zN+vmXSf!Pai71x5$C!-ejOvk-; z0)A6Aj_kHx5S4Jc-i3rXeZK(Clw2K)6^^})d_bj7d%g!JlY18@sZS@@Y9r`EE81Zf z^qq#GTuVq6mV<2(#0lo31uOKIRUf-N?Kb#qxuD;>>U;)I*7>GVb@ zDS07iz6VW|yiYYu<~fcTCmRTc#C0b19M9282;0s<2}|?=b+=s;sZmr8D$(C4Z>#+$V(|)aGv^;Bd5vp zO@^!>E635m{3uvKP#vo+@m_4)fvJ^wfash9Is!yA>8pr_m_7!(97b+vB`)q&?%4i^|DR&7+FJei*H2 zi~>XQ$MS*6l!+(~0zbr1BGCw1-?h+>BdAEVo*4u%yoO!XwS;iZ_Rul-fz=Jxv7;=~ z!ld$)j_cxmw6?a;gkdzbx>PotSC_I`kgYW^)m#K?YYXJ{AWj=QU}iqq&F*s$42KW>$YF zZ{Ek?rmk!@%VsD}x)?Fs-hp@sgC-btVT?H130VW9P2Y`sL#-S!FE`W>!hK2|H)L22 zxk982X?FIOwPJA&tig!AXUstiVv?P>`=Jv-#CoX63UM9PA^D89kT*kOSy*{={I{~% z9265JIV7q1HnGT)WW)?*r@oU3%>*kcKY%hs-E=_;%)UPaIh2ftW80k&ogEQ(7n(^L z6(jgCw&!SDQCuQAA~P7Au_Ogqxy0*?LTD^^`oc428Y468K)SkzNT8XbP&Tz+Gwq(^ zrSI+VJ;c#>GB^l$ki6q1@66b18QB|kg3ymB5TvVY$;>7?Vx4zz$!z3GVk+`tf#c-O zuJu2$L1sH3heXThF#ZQ{IemG-kt>Hu`L(3|Nqg9XY7zRyuF08}1%u&v^{p5c#&yEbSC9yutCYqlIp(PJAKMOy8>U2LdSF; z#!bM-?*G*W_y3Bbtd948JrbXi{lC1`bGxkzbnou=mHDW3-o3uPr5tWw4tB3zS#?*p zlwG=~cXt1nJ-sCV2HC%RS)FfM{l%I2{e|_Z1N`6S%+&t!b^71J^{M^EIp1scw%w~& zRt2lKdAQv1JM$M-`{vdhT6U&md+Pu-d+u(^W0T)s-nuF;9?mM;4u7ybGu>NQU+pfh zPese?v(jQ;*j-+~=qw#>_ZHVXhf7-aGZrL$y%Z55Xd-b5@Z7yugZ7OT)7o*n9#q*23 zO=0tbeSZ1M()Q~5_TuJ-5?+?JomDYf-xRiY*ZJ*x?b+34OPE&Iu5a`=x8~61?p$y6 z`eyI)#zA29lxb_TyKBv^dRHCes^tX-YqM9w^;!O4O$=riH|;AMb2h)dVP9FAvjo$d zURj%4;@3q(y~r`rN#_zeF^^SP?(7x2_3Cp&3l{YVRowW^L zSzo`nw{`yF%9?js-u9-Ktn1ySt+`e8>elMw`qst6HP7yDOPkZ17u=_sN_1z`ZdH$*eDViL8TS({&C>4yQ-v@^E4!PMlFhl4?J2KB7aDmsU?8YsaHu8A_r*nLbuk=iZz7s!0jY^F|zy>%py)JTK z_ca3WiJu1$_Wa`KU&P?qwZ5;|H(Yp%{&Wor2Q9VQ5v@YnlQ%y zkH}{NY$hvW$t&occ47sJ^8sv1j)nUbR+6r~WHXt-HY1Z-k4l!hM2t)JyT&FbvlP0j zwz0a<0ClXC*^^3Tf^JB1K@f+5k05Y*W`|A-JD!v7UxHyJZaIw#pwH=!fe?~o#D3Us zfDm>Z6oq|`p2FZVvs;f_j*kDS^&HRXnRIp_GCSA}Dm78!NkS02?uzd^R=)w}+e?15 zLf@rasq6}TT4_>E19YsAbNot?3-+QNk{wklVav2|DLUtxPNqyufDHr-Teg~XhHce% zVVxW)IBmlK(idy`9>9dSbdt}eb7*x;?hK{}VgzRyDO|ciyght45ZdUwUQr{c&*mg&L}`1C#9cwl7v`pNuMWujuTe2&+_O-qL}?K9k-p5v|N zDkMqfK`x1>X%xp%8PwSHSF)Lw3MQ356BI}=hZV$W3Z0jp+Jm9lfg6$IvKOp2fXY7@ z+CA9w!~V(0V_~jA2(Do?Xhkl72Z3uk9teEfc2F1-#M_GP4vaVyKseRy%!3Wj%qNa)L)Q0%bq+Ro8MQJ1FrVMH#! z;VTu_*@F+EJRokvG2I#Hn*B9s`JRo4S^yHX_;Hjb63frtJHOryA?o_BO@=}xGtmS= zdw66bs|)Seh2bQCT!#Zy6#3z9l{>l8khK_xEXy#HoYNb!#t^u$mezynm5GVUL{>3% zwN<(%AHJ>xdG?fwG<}QaZ{)Rt_7j9XGXA= zIPE&2PdrADaob=nqy~EZRhsh>?5|~;t}78l(OhWuARa9x_B8<|pxQDcRLB2R6IoaH z1T5`pl4)g>qya$CibE&rH*qk0K`kE!1j7c9H8DvzPZxA(N<06vk_u;S zm>8~uI$%R^L)$&4AC)7$QP<- zg)lmv3IexJu?t-gLKI~8dcMaJILg=p`9ZsKVuHPC9++SybbQhi>86Ir$NmXnh>l=X znV@f)&u3{=Dz?AxVRJo`>>fw+W#it8LziOEI{upIqQ+^`i3k30RaEo$NMp#ixzO*? zagy$rNRb(9@QQEec3&afEZ@#06f#m_(L8y<1-`xFhtX1D76TWW$$*eqsKhL(VfKK; z7uez?F;KFo&Q`ri)K+uWMY?wU~QvoL`I-7F)7$X?gI{SwO1tPnHE;0P6!AUrjvl4%k3xrhyz7>K?YNn*0dT z&So5^tS|Z=R`CXP6zM(?Gud^(o`dG<3~-7kAI*FqOAoC3A*E9?3ugHt^ieKStrd!t z`2Yq@7=ot8C3S5L(4(y3fDQ+D#Df#F5i_OC@8Q6_cg-*=oNJE!GlPMVRPEioa+!1X z-U%9@DsUnvadMRzc$||6H}Uk?*H?ReQXlSZ44=GC2oWlE?h|0n5A&yH?>YwrPS8Q$ zj{)lXv1_NW2=mNn)iOj4_8gNuV!I7!hSac=IxyNV2R$v<@6<(J);J zPC~WFuSH#0OX@TOr{;LIj6e-@TA#=^aFgs7uDa&8Yo?bJB}@!*)&O3|@ear{HYH@4 z9v;*0!T&2Sq=Px#bQRYa^8QI~Ck&9}kMQ|=2+|?oDK!;g{!Z0wgV5$x1SjSjRLq-Sn8*SE+F5pfvUzvJ?V?J5tTM>Zb9Js9>0 z6=3C$Z)RJoVg%c<3--Iv0~j^pVNY#hDl2wIEHTksG8=-SNM5B`gP+Bew`E$p(6cjL zW(1hQL~boP6g8NG4$2t8nwj4Zjq!b$2@cE6D6e`*lt*)vB_vUNDR<0<SUIlpXu+b`4 zk0%M6i|5QJM8f$}XBYT(Wq@u2Jik(iXAbCO7z;e_jm!_K+0%d6*imZlbG?@At$b;_De z&ODrZR!YeCcO5uL2={l*v~BDn&M{=mbL4*w3@wvax=B7k)JRUDL9l58 zN|KRS6k2T`w$fR|v?DVNOS+tHA4co|HT8GNd4%Gk1j|GAAbUQY<-p-2-JhPDrzMc3 z*r%)(qG%Htn|ma=hC>J@CCV@Tp3aNrEg$`kp+JvqK~AYb92nPZC#+GrY;K!{%UNth zl{~jg8^j3f>xfO&=*Gg3Z0kE?g9aqI>awZCV9zRvPbL5wI0ev1c6DO_LL#q4--fAI zUxlW<6*>`I_AIi2m0VM+oOcMZVZ_pQJkxa!$rb@hq1dEg&#JNQ(9CVJbD(~>kcTG| z*?dQp0?=fQhX}4wNkf;supLCyhqZc=MgYWc5a>a=6$40Gm^iFNAi(iz)V<)G3>zjl zbeXjABw%s8Me2n}`-V_1p}gLOpl2R9y|@QrgsJ-;b6%haZ71%5WU2-16X4rQ%VK;> zR@mYmJBrtW(LRI}$geqvZ~;4129+IXCk4AO%mQ8%_kiC{>ZC51?0|RO{IGL8KlYlJ53YK$se~~061WGhx8Wh8=8Oh#HPUVdqGU*WEt8@CuNKH zi=edxfidWofhv|zgjFOl!(k^YM5goDv^{v9}NbY-`^q6#Icp;dPP0nP{(;HZR93@X8RF=6g_lEOa zf_+wXjp#RAI9)(@Ez>(R9n1A&n@qZpK-clELn}cVsOET)k5L_}5yJH| zjB9dZn}a|#j6Du@tH4CqA&?^+j+gLyCjv9RwF^UXY1>57&IZ9!Rn)E0FLg%bmR|Q^ zlbARrh2EYM`UEnk=O6M=qyi2qSTTu5hIVp%<}}I2E*eNjE3S?3YP!gu#OByFEeJ3U zUR#-J&eE%dh9?ng1)1k(f0OfF*=!&tK@Nw}jsw+HhB2x^6Gb&)V55R#*RLG&X`r}O zZGbB3qX_mItaes;Bjvww1n<>J#*xGUGm1=$O!yL?g4HMMn7$tq6pcl1f>!Lf&usBmv-jn z7iJrvzGsHm4%M0Es}FBLDfUvGT&0v_Z}$BB@=^n&TEQBja3J=u;e!-I;$e&pGKoHu z2$s5%WciHvGh9|3ESsCY%8|$^aI)_Pm@R*gw7X~SVptcFWtODBZdii3-lLb&hF+zT ztVZE#sswY+g?_JEY9Fzwh9O4V1D4XX?ecK#1m-=ZSx@iP%4MsWfm7cTI6p3MMl2KX>U1}0*ElaF!zS zkkm$ROu0I>xNsL1;bvMQohV|<5y{b00!fm7#_3rbbB1ca>sZ~S6_k(UGL%%t#Cm4E zQ%Nv&7G<|%y7kpTJS(MdHEzq_uRr z)eZ26S)rYoFhhh1<_Vt3FCQc`zPQFx2D!>n%K^n%u!LC6gAto(#t%J=p>q3^Xnf*W zkZ593Wi73e?Eq!VXbX~CGqsD{BltkE~U(Hiqo&q+LqKZoUMAt!CU9XfkB zKrE2wEaY&zI-(njS!7?*FKlvfDvCm<6-NcBQ-aCWc1^m>RyB9`YN=)iRZ zam#g3mkucs!Iys~0A?h+td0c56hkMdH3%Kgaso2iZ8)^5!!QnN!8I>Gun5d+V zG)#po%)Zb`&Jp&7MIV13^t7fg`z0n{<#xC)WYCl!`4Cmh@9gpSp%wc=LG zg>-F=ycdu?PkP^SeQTFJAse2P5I9@qPtO+&O)z7x$0f(f{IHY8)NK8yuv=@QNT`zk zO=iw=zsX@3Hl3C~F-O-Nz+lM?cV+jhBy;j?0(&Su4&9Oi5;b@b3?7GI1!xeSU*`b; zIB2GDjY)x-EPN_5lR2RyX~<mKTTLbVqWwksJVU9ryv>J~gXrdLG$WHc?4yNfvq2guEVPxr$PzOIpd(qNM4? z6M1=(6d_5P0MLoLFieH!hn*dqEtkU&^ABmAix^BVhaE1{gB^QV5wHUw*B+sKuh11W zh#J83b_hQ^EbTeT>NobcRMVjgBe;`m`=~+6HUy@VB@cP9COZsS#Ci?8@@pwJB+$ln zTItTrtU!UK^O4udM`)5I4NFPWyMOtTIj9`}GEpsZ}B0CzIU-&sk!Y2HrC0E{Ha zt0wPEo>SFeZ;T*9b`Ty=Gg#K7I%cNJMjNZ5ct>iw0cNyNj_o(PPUMFqyjfaapKXA3 zx(_WQLL33<0RmEKc#zFnf?+8S#mk8TVC*^6$GMsU7^d=`By6O!a2}g<+l76u(6>|` z$vCLw4NWR3Y^ECE`KjQ@iz3rcHMa&>Z4{pYP{sbP+5mSKlfhs~EV@*HJqJG(c)p}G0M#D$G*e!`%JjWFS7df@_uPHY zh{yeaa9y}gd*E|_Dfh<~yuX-mPh}*@YBF-mI;Gn2U^H)68^DWQci>TI2HkbvcOxgL zHb7BORmUFEsHzjO{WSQID#?bd@`@@*N->*@1NdRFRSEb)!+Jf`g;mRUT{A$?9@#xb zy&h^DYoKGV_$A|uJnfDeG5gg95aqHb2SZ|F53m~XLtQw`5f!RF{2&v}u&PxfCvxfN z$W5sXkBU^QLVtfip9WbOmUz)jWazS_8nTU)TKEy7O*R;h9G(_45T#G?==6z=sYc1U zC{!}C%EuNN+d+V_$Ppvg^T~W3u7fYLFQ8EO@r;@+#yj*)e>>2#p}m6#q3OE!v7H9zo?Cqe`}*AKBtVfF!E?{8 zlDG9k8`{sU-hcNqc6LJ3>%e=?oGAr|8Rk5K&HM-%LBxd}=-G3A*fXOevW2is08@3k z49yKDmAdO5T?rh8OHzMMUwVkhwURjewU2P>Saf5r&b)eg1tXo;(dIGD#UK5gXqgR1 zS@~QxW(KOY()Q8M2oHFab53<3y)gQubdN_trNQ7Wih_uEzswjYEE}vPEIeuvkTePb zl|rK%LO{|m?J)uxBcMkS0{X7!R@Wq$4H1Gp*bRzF$@~RpfL<8|J+(I zXkvD1T9=G56dHPf$H(wQ?+rtt@nq2OXAFgIc_@T-ogg>{42|&)0&7%GW56>8JY&F9 z{<*aP&v@-?^xBtD3WRP9?+g`~{9|R#JKl zgT^rEmVrUz<*$(-=q611sACXR(u6Sxdh~#x@d8+qVGs>!YFZ8rTUT&%?9`_LX>uOh2hr6iQ;mlN};s??D)! z9~++^E6Y1xL%TK4j~OSn48tVGcxjB6Ze6^@whS>$G+s$d2%H>AF_#X;=gA(VmoKki zJdHG-MgorzCK@lOjY32t&y$@T5gB7dRMLboB6`#zqVcj?l3@fg8n3R6Y*tcw42#CF z=$3&+yj(UC6y1bL zA#n_fN}4bRMUOsEG+r=EGK>O8;}x@!%}Pp-k-H%01Ay))kZceDLuwPV;pqL;GnOj zRka((LZh#|J~M8DpVa9t({Z(@q9>#s|+vH!CSUUd|aW=Nwn$VOh>O;>BGj zci_XDamEgO?7(kb2k!L7>p6J>C->>|z43a^@Mk=aF_P|h9^+O)LF4tDkr?PEOkzmm z1)br~7y~_uFwl5GC&@5?g~kgyBb$|!9s{8<5V~c6(0D=TLB6)@|kzQ^r@159l zh@yqDS2mVzT>v`T)XFC|wc}fihCgHcdF$fOF!mnS)yQ@VA~D$8K`?Y+iKhoQ7n$B0 z!?3@eU=-7{{oa=8?jDC@ryh13yW+c!)d#ECG=BM}?QDpw&!JqBK7xyHb&q{=by zD&^q@UUB=?jJ$@y5kp=hw}PD+?f9!Ch;(MXETK6SD1HR@}s5Lb;nNn!8++E zjF0irmKm9@-`NQ;D=30qw6o{CaS!g;4%*#8foZ{=7(si7jLdQ`{pfSH$<2gRI$)EC z1Z<$OFMZg3Y0o6PL;3QVg=)|7Mov3?rTXEMuU*_4alFPqOg#7m>z{}R{o&%_q&v2a z(G1)?I26Fpf?ipedg}tB;XTvs z9f$q5k{^EbhW)PJ={Vka;lDs2b*KweI^*C!dp-{S^BKp%|4rp#9Q?CNzLJCg(U+K< z91#r@9|!;A;GYElqY=wjef(bnS03CHKxJfwRb+Kx&&+f|;s>5beaeh&h663GMrN7k zuSP@zBF8HMEmhY+QJFrKgPS}+Dm@^9l_?B~r62X2-5*oBoa^FYSj}EmT^ay+W{*g~ zIwB8Ikx?pCMY~S05xQ%A&pK+zCw*x79*WEoH!+|+1&?t5Kqoka=|S0y(q=kM1< zxjV>hHS*ch>M^tf$J&MAa1ccv9{L^}7DlGnmjoWnn5f(G&2YdwS7YKZs`_lJB^|73 zgF3gDYHIG-;r*2g022TJD@KvutJw+d21^H^?YOW3(g&zMn3cPZe2--hQrKR@K>*6g zbUYZ%+YKP5DIwf*Fh}{cEC^_TECgVWUEea@G8W^|ZGdhR1*lQ4!vnJyxRA5_UOj|? zkDSO4`}GK-D9eSZG#kJ)nTr}f2@6~hldZ5=lRfV zo1S?m0zv5X%&-qEQYH?`+zrmMw7fpsVC}%%_sjt7JFbf*Vfo%33?pdc?r=N@(1Ufj zSP0ux6&m%r?OTZJ(5g5;?9?4>gIt%?Q7_b3$1O;*I-Y}dD=?!j875Tu8=$^thIQ9z z)mg?mdFJ{Zlsxl0$ushZf)Z3xl0BihaG&Gh=6Ib3sKdz0vgICC05Cn%?MIHqxU#y? z+TCgU;Z7XbW&{z_Si%#{f}_L+mKX9}{D2YO@nA$V6oyVB01rkLkW)b>1lox^UB3r+ z(uX@(OeHPMnI%>*`AbLO#6-djn2Mt=^diS1T>&i9!<`I0v*kh?>^o5xL|q8lK92q{ zcwk!Ldmz_kSkDj)_h8tlQ0)X0U>%o>T^9s#%XKW!HZ4pb4qedqV-O=qMn>rGBN$HN z-!@hz5JKAtp%o!AW?|2CT*Q>#_Isw|u{uKEk9LR*>$?JHXr&0gA#*+b-XJU@|K;#Sc3>*cvs!qUm^GIqYz0dgx;(GiCQ2uL359R0UgEJkHX7cd;TybzE`1<3!!K zRiB_W*67cgSfd zNeahNcc-Z8q(;I%5q#Yz9OHld&^b&<6aR3sl9mM#+93s!S_7w8)C?RhR~+-3)C0}l zjzdOT)>>%q_+iKNoI^4&(ubDo;E=+x2NDppLOiY~CcwhX)XGg8SBWuZR-2mPw&}Ro z{G!MVBjB_NDMEk@AlEhnF7kUWaIpE_GhN3fK9=-1eww)NL3NxM?(EzXYPEML^eNcN zw8jAsLqsv;Ue+m~hc@=3n01Emi9OiqLKnwwTokxj^VfaGE6JaeQ0k$RS`z7|ZRJ=a zABu;Xz|?A(+Ko}cCF&DJ0VMJt9BE(zT!RM=!hy~j(WYy-|4dw627pWtB$)sJqd;80 z)j8bq$aJuqBErG+2V{Iq^XY*A{29b5y$Xuxk% z0;K1!fv9UnDNI0-kGsP0Xo(P#{-zvP5>-lTNPa%Q##C(&hAkfv!%RZfbE)0MPw&2) zIvaG)JuIc`qX?V>)n1?UQjI>ZCivSA@l#b&Swf894nl51mL`(Xv=g}qoXhnI00_C{ z8iJiY7&`6#z|)rF**ms}D~}!f+#P7rkI!N2f+lV2T(9qk9sI@edxP+6ZE?M1a17}C z=bt&k&i56kfq>^DFaf2$H;h|s!T~do>dUuj8BycD4JCFnvgsZ>|%^r+k zh%)@Q!h#YsnTrA`GRjb11<{t{PIG!@2U4(+MPLnZZ*2BC$FJ`?`%Wix?E0>bYOEN^ z9GRU45I8~PMDiPTP1Xs7;!5ng^v;L|m~Sun(Mkvr^zc{?A>%5EIDqT)9316RC};&S zS+MD`-#yqPED1cnNRT54;k_|L10|H90t(KLRR@p8br)^Y@wf&fEMh#8iBsHl0)R&` z8Lo)~#@e;9UVyldGr{TDb0I?D8Vf+?2(2Zv2Z=F3FginOp>;GsJ?aIy_@$DC z5{y8?4epak;4qIv9Skr5id!s0?BRbgW7lZPH8N^~ILh-!8x`U)k0NN=`DiQSwIGi9 zc4j{&sfDBCh_Zs98CqSUaA`fP4{D?n91rIQQP)AF6=X`9W5ky+H~S8vKqq~S`P}b1 zR<}@}gFqO2#Ly;w7n_xY)_im)0;Y?65cmi=1U>nT_db|fSRg+syBHFBu^)kQHxcD*f*A) zhM=qy65ExM^23f}0~6(ehm%GsEE~y`&7>+;CAWl2Y&4TzD=)#EN73{jHt)SJ`e9!HJ7VMgPjvpta3gwCma}zW~0%YO`kG*uJ zsA&4yq2D81Zp^ciq}+zHk<1vCQB7b&VNqO*0kWeVmS7t^LbxpbWXkw7!JYz5kn}(vOWWJ02trg23MKm%yS(ruV zX^+Z|x+XYHx3rk}o_4SpV3(ITk4N&Dd^1r0+)!C()+12@_^E*?k=hqsbR%70>)(PUtT5aF)Y=6Io zoI`j{5Ow}QQ}m&ti_0^!4ZzmGA)TKb-NCGTr!32lNzhqLpctJ^CLKk^ibE&rH+?UH z2b4@9g>85_ipB*WMPSNxOhn|>^-;uru{~8s6%Inj$JGJLH4!eJBsB4F+6n@7So0D+ z9X$v#y)Ro_7~+Lxr!!*M22f4$LngvIyHOO}d4jTJ`l8h*RwsQzW+l?rrlcD{=XHUp zf7pT1N?8mljQ0I- zmyOY4SDRod{}dA>vns&xT7K-=APUWP+p+Ki0$}sgtVy&G_$)XeMd!Uv2oWl%$8oj! zar9uTn`O0SX84huCOS3#lzw&vzHW8RFv?a9lhqegUs_(DUE`vI=%LlY^E|K0GWpB% z{NP_jR{2wcEb@{ptGuk9;ssSug;RjPS&cmE6QjrsPw@}AuEEBAg?t1KoQDzU`*8^F zyzf51ZPMK;rrmSA4Fp4Q|NVE8dG~ax9h-Rugc}t+zIWpmv?3RDV3a`8S}g<9_uZE* z=vC8g4awsw;K2R&muG6nu1j{u(L6ODTt~i#9m2WleH;$gD4(^2wfn2!|7Y)AyCS!> zMbY)SeuZ|CwHk7;s|skkSDl^QO=vpdgr+$_Cwq^+8Q`*2RSGW4v217-^!@F3XzF3v zmLFBnt|f zCTpZkhaWnH%BMitb2TjNu`5Ba?4kDJPR)2|U{h^wKKUg6{9>c9xxdOm9@iaNjJA-z`=&`ZM#ih(IjZoDKwUnw-8?cs1(B&+(*-nNU5=!`_UahGPGfJG9$5_hWRDcg=6< zQt}i0$AA0>>~yKVV_2|5VB~x=#}U767F*Phc@o7m04pP$EJ(yJmo92}%oZPW)W)>_ z-u(JwKaKal3ovshVDRXsakye1E%(3S)=v5U_v+PX>-qlo6wlq=;A^y(rF4cP5Y7ji zD4_h*M++K|=`H$tAPJlBc|iTE(NNisftdN+oAvn-^V%J`uhmv@8Y)F3-4{m?PD(Aj zbhfsKW*fgZe*WrD^Zqw%Zy(S{^Zp<0?u(x)WEobUT) z!O`N+@mOvD!PmWmPqAWFOu0F%9=BH^6GcwWpa)>JQY$fNx*fd}+(DEDUeaZXDnKW@+t3wkAk5I z_PvZ@#OMN}AF~NgBFry2Lbd~L+n~V)Nwv~D5Qf&J;kuLqf$eLI{SCKwI7O0w6^&@# z8_*`+k+h&tMjqpLtV}A18&I2HB@{*s8Iby@F{;Yc7U+a>euq#$z5?*a-*lf<15S+z z(MplE*RU4eE)8IukSLv^{@&`|B>*aKrWVK1MPLvaHaq9PfLB*4Nrdy$1HF|O&k9+?R%n8gG`( z{f@su>mj9AS&aJEqdw{jkFdVw5HzK>8*PW=M)5pQ-I;_OLB4wZ34-tSQNO5j?l&C; z>xmZydtp5HjP)+Pvdn#kSebQ~s6p-KAX?3b<<{N)XaRgnjG{I!5CK0^IQlWBe zole5eplG45-z=2FHvgQ_u19niP+1m(Qd+&xR-YX}S9K3dRn4K$1g$p?Cm%l_Oqyhv zuSZzR4@QHS#Jc7Ey4&*JSRR+Lm(I^`<7IGoBCGs+;O;FrY$2MZX7B`QJezv6-o#@# zP)1JLF-NKzvzVm`_7?j%0dI={*c=YkZf#%6 zsH+^s8bTwHwikOt7fg2h6Om*;UDV6Hm#&0B08!APS7M{w`f8? zbtNH7uMfM%@CyM?0fHX+V}L}cNGUySZ#6Y|HRJyn!GDG4%g z{y;Lc+L~>kNi}P&mOw4D%>PKR{51`GIzG+%|Ck03)cyahRsX-Uz5QyZ?EkSfeAfRz z$zvDB(ZH^z^5;0nLJVMeJyDzyT{Pbxy9yg3;y>(rrcj}#jHNX3W_gwcpeoW!xB^vW zix~@s3Fl&MGtXy1`5T*C%wEOLm6WM zy=2D5=p*VoK40pKtAHfPiMzyUM*sK+l8qtfQ!&V@f-DdG##}b}y(*avl1nz-vqU93L(7Kj+XKxd_5c)v_ zcK$No^+K6_LJ;2)mAsV!LaVT7|> zU{;{7x^j3g$`;Eq9R6AVG|ME|3*JYe+}Y}v2}&#JZQ@r7w7A8c$zi@@v|%IDsw>ig zI2X&-lH_8sT0(5=xt07r0w_yUiI6nNp9}X_w9v&SW@4Y)X>oTczP50}t z*kPtialNX50(2*PRkXD&yK2kJ!dVQRTY>EC$HSA0^P{tipAJqB-~LY*_{uHTK-9>e zsJ2$-=?gU05k=e2-TGB7fv-Ons#nxy4+Y)A^O5kUJreR&P(Bb=CbqiC2b0P$|9pi0UPq{vYt^$0fnVO?bWSaoktJ?ly1~wuuVRfyy?Zq_z3kPidAKr+ z`sdHQSqLuDP`Hx10Db( zlBQtyxsrCquKp}xLMmp;dYT$Bbg4AQy5fy0z}0fCn%_CA+7uw0dY0EOEy&t7?onhe z%rBsTFmjApv<_xMLMs_C)9h9EJEYBP1wkj0vjC8%7i=I)7Ujae3Z4Jh`h|iTMCm+o zN5p==yE3AITlnKCavIgshQcsUH}jQI##9~F)hwwRzp56L*SPh$$9}}Os%-yPMx*{l z<@J<=xWBPcMQ+}(b9a|RT(c4?_%?O7rTT+h-3jS-EepC|DbjikyDQmkTc^RuMO{Ga z`W3!q&E@YAqe%*`i>=slSHzNQ)A8y*2VW!DrzD!`on)qeArXX!^B98gv|!}B8o8^J z` z=qcE=7J%{$fGseUa#*c^YO}dkZ0i(fCXNey=dz0M%EAawik zbEf+HGtHZkce*-vd1#Q<`O|wd%w{CA5TozkUK}2uo$nnTU7Q?!`2O(ttQk$dV&NMx z)fSe#iUA2=S3w{oATLgUbtSwzKY83e+t{;xMxlyjBbK&c8SF=2wyT0(vlhFp!ivaH zha#%wpT2*4k>_nYJ2?I6;Pm3dX+>+3XP;qd-VSMiXNs@yCI#~X`E=+O5r*--9$lU>!S18C(K8%f+kgqlHI6W2CkdTkn+~c_z@mTR_Z`yM zLa~Iki3S~5o$jlmX;$L&l0^=m9`seS*-fFHhz3~4&HMbhXZ|h#xI~-Y6p|LerBMC7 zYFjA{vB>KLV>EttY*C08;3{a5q-kbNEwksTvPzpf`UCW7J=shACD!aYejv9#ulyDLb{R!Zf z9dT8CS5e&w`Z%^T|Dt{kA1i8B9FH68l-0_WRytm1D(o+$3(85nBlv3$i({x4`4d+Z zk5zGWM~C8d{F_z91__t?@wMMC?Fm!wrz@mF7`GpKM4wwe|55enHerw$hH6V)iSLZC zfsZm!mU=0kW+B6=81Nd3XgVj+tXG`xG8!5bn7Ytb(Bu@c`OGg;?lLtlx56JuBi_+K7BXIxDFeSE z{fhn6V!T^`LKhn&)AA*+ZL@JvefW&D@HLDS+vPRtt!AtjrxwLWUDs+liiB2;dI{of zZJ>aLIb{VEg!-)-jJ39+T(Xhqphl0_j_Q;;^mN@+#ruW8R+HU-rD!9I|L-00sqi$$ zf0B7<6B=Sr53krqOa8af%a^Yz`QLV*^S?dGQ<0D8qszk7LQrm*oTQ)>f)85K8Eja- zW(*%ll6Fc%3>@DTiGnebNl5wIULG8GjG97#_f&pQPV7Xy{oJ0j!pnv_0YQEU?AT=o z+KN~bGUG+fjenaGEq!l0A+psp_bn$jZdJ(}DhwvoF7pR21X3{wk-g zI%XOcRjokf_uhrS-_uP0)xm2K>@0B-w9x;povoK8`oBHee&+w5lXM33UL}R&ql;*2;9y@p6VI`Q-%2v0F$Mk&vbKKIt+|%v+e~9$2_59x* zmhXR~SKBXlp3ncMc(i~In>Zklgsd3Lo3J+ms4+^D3>TEHCmPJcnHiT~ zXGv%pziREaE~hiH=c&X8eK3Ce6x(Z@BzQ_beKXJO`ik5G78bW^W!Ha3w?5p z(TmaIMMYh28h*+ju(*DuBtreozO7_|1vG3u`Ha+CPl|k?RvS@umVL}!d4DOJ1K|)a zHpM?bc_H{St5C{^G>{UF7|eDseS%D$Tt>( zwDkC5n$X1s%O)3`o%jY8f7RJFcOe#%-m(c-yY+0T4TSPZ7Yj%j$<~gr z7DwWCAS)HhAl5*)99VU}!(EVASM$pP%8;mN51y?^^4fn4GL}H>bsSwI8C9F%6gfDX z+H8xPyr@^lMx{Q~)XcRmrK8Go(kq#GFk3G-Q*qm|@7Z3RA!C;S4$VXUs#;hh(-z8M z7UA?Tump3+lF*3d;AcsQ9CkHD|F1YP`Gq~ge(LZugk~w)9qLaD?-RVv-xV&n63dg_ z9iK)ij4*L`D{76Wa?%qCU3)py| z!^HS>G4Ntij)5T>5VI#7afJ5-|7v!s-t)Hedit%ruF1|v#va#!^83b1;nE}d>h1F=CrFVi2pUvCy)7zqiAjrBJNvlkkb#Q_=1#({1p9>cpk z$O%kpB`4(DM+qikQohP+sW<0&wuJdNe;V)qh6OrbYtD-DfHwW_?xsKM_-s$CM zF8^Y2Mk`u<+rTYa@vO_`>VA(kbH`tx4dXu1CHk?+BbLx0j@r74j^mg=|0oA>GyTVL zNN*Q7N*^QuZlV7>!!rFJy?Xg-^i2Ps;%TD)no3x*AY1%>HoAB=ZIp?<7sqU%DgGP1 z;{G%OX4CNv%Gu4-<+ob7@)xl)im^ZLSv9v_mQv<>A&F*1f0YEoEwZRwR4*&n30_`Y z(|#Zd%jisi>I@RqMzyDRyV&oQ$2NiI?J;j2Fe^uQE2% z1bpLaEe6z@l5iXAls{o%2rCTYwRI3N4UTNfD+lzoMbuhaH3}wwj{Pf^Eu@AI=1ak- zT>IT~IZXFEk6~~h>6bKu{SD|}L>F6@_x?p-!tLOOw9^-NnDkXCCb8!Uzu zC6Lleye3<$qQ{TYREh$%Apo&D{1)1=xaDqaJXS;~bB2UhTIabCFZu5i{%bD&aYB`ibsRqM}3#$WJhrvI_`Fv~yjw9)^qowEFAceK0xO#h$a z=`8=R5f~IiIeC%6_0|NZo+8;l(1iSpMyVH`&|ps%!^z4r9+M7xaon05PQ*vR;lgEe zvD|B_z&E3g7su{ZHo-}RQw)A-3or6!IB+I15a%flvB$70PbF0`5wDA7R9p>3xB(02 zUNSR6&DmsQ{C!0}hL()yx**Z1O}1lBtWic*@;RyAq&S{SogBRxy9Ex1!L$RnA|o{J2v8a){?;bpXZse?W~u_W|1V1I7A(7HvWM zFdilrEn0TkJ7NAD2U)0{#8hZB^R*>yVr~7d$ug}`%MFiy1C+|QMtE#3tqPPid!Z$N zp}DpjNnp9j(z;#+$~k4}UPwGJcD|6GCCIp~qNpPAi`ph-pJJ%fVBV z100!IHgzWRygf@I@h(kPlZzK%t{;P$E!@^rA@^yOO(@XWW>gsIQ4KUS?XE5X(L5jDGuN0j>0HP7Ugkkkzs{ikZY)XQz^c6_pdWc3Yj+cFHMTYlm81sHmLT zsk$_qD6%!%Q)$Ilay~WYwkWk?%L9dqIh%?Q|8RbOa&hwh^t{wiHII7(7?#WhT^wG@ zH;U`(B(-KpC_}4VglZtFEalpB3?{{ky$K0PN^oh$5egC-kI}~$UKqakrSJ`0_)ijY zO+q}w2h8_E>E&U%Xo^SWFFU0)1tZ#9mf}UhYZKL^^i?=A5|a(mMO*??V6b$LPZ#mj zOytwRfaM7R3TU$@5ipWcY_*(F4PBN~0?2{1CkALcrJF8RM#=y=i_M7x9?(}QrGTi@ zo>-u%M2?O31>5Ll#Y>*a}~QnvMZD({8aJLG@%O}6haB)Vj7%1g=EZVsrJ@t3XjxT(I(o= zE7f+Gdlm}{a()}<(Jc=3d`NVu4<&|MyjY1vwz0?4Lb^UDUkt^`P>*F&imX=06{JBb z&w7WBmNw@Y?NoW@(gLtfdHvMeCcCZ|%~KRyUml&Z9Q;uT3w|=Xf{SD&rO!mAL{tvc zTwsHwtcfcc3L*vBacx2<&K!<&DAsU6Z=8QG=-XR6Fow}v; znYKMCZM*xk#gJQu|6kP#xXu3S)vHSU?^nA$#%J=6xch5o-B?Yu1ee-C$`>Hm{Fo$Y_sT2LiaxV5k3 zhrO5_vNH%qiN@a`{F^1Gq-ul6f{?D4M`V(AQ-B$0`)1w4z@O?UQL1svEOWk)c|c3;G3=l>2Y`! zW-D)ebxJ3@8OpJYy1bk`egzMp#iFpoH?(C9~mYb-j zG{py)#L8QQqHHly;hanoZkHKUOnD5*jyu3%^w71-+N|FuFphnJif%2%l&V-Tgm@4+}?Wj|9q0CtNgDd$}92v zkF8-dqt`WV(KzRO|h_h4vE^s2!i~ zf<$}Q9tm^K*?d#D33sV}R~rS(THE{dA@WMy7v8l4P-&vTD1O0sf6qMUiccH;R|fJA z;Q?Fd|JKV&{O{4$bN=V2c{*!Sro)~<-I z7shk16z2nMgm7?@&?yN)D8R<(ML_*4oD5{nugxTjQnJA6_lxe$%Nj=WV^nVbMGsZ2 zS=2myV0bT%_mV~KCE>-f$A9NN^U3HmF6%%#wPzoQwLOdcI97f#LGB*Y?m&~^-!hWm z;7vl~{fd*$8j{)1AX^U@+r9z3ZVDy&o4x&u_dgw+o*uqA0Lk_K5AT0ImPe3-_KuEv zC6@{Zz3^2i`WNbra-PsH;)MQ+{S*iKjn9bwTLg&IKLdIb-FQi`cXF6FpyHvWKSA#h zGL3(oQI{^694PfbN=rq zc{=%jJX!ol2oYarC3a#XkAI50q{S!D9J{HaUCJ=aB8ac;WJ)40B>%#R>LY*AxeIk_ zi%g{NXpkR}#?8>OO7aR)Z;A`KP5F6_S#J5vQksBw``w*RuiDT$Bmcrp-}OzBmn$f^ zORL}3wg3e*&GHs&ys0RtJGF3E$ko+2YsJfN>owXskGqw5tp53DJi4BrM|c{~|AdCC z#(XxPw)1~$s}lcn`{lFz_eq|m&VRUj`U&O-W#Pbw4;jM;@~}UpA>OcAg;md4s9Iwa z#+&(59Pkwy{s^Yl68|)*6~U}m=0=Z6WSh{+I}%{uON^9jIp1b2mS-tf3-hX_7KczT=+@KImBj*oPfwCx)U{UfKClggll*||| zv>`810=LimPl9Ss(j4 zq%2bTslJrw~Qcp^DB9e6N}s%~g4T1QsAsce0emEGCoSM2kT({0cw-~j9%`P z*vKAUz@C^L@SB)GQ@7l?j^b;yT^nu~TO0X+R zA}D_1F?K_fz-%~{@2ZXK()AtWOt+=6>XJ~O^@?bA^C~5Ow(JeLt^zN1Qo0Cp|3kBi z5H5jBWbHVfg*tz#r{(;ANZL>Rw4DFjFSpC~AFqZlpY1=M0+J%d*qyyEHKT|iv=5_(aYg**eiNT0LN*V3h973U?!yF4)r(CI@n=a3vS4BzcpCV zQsIeO0ut%NoJ>*e0FI~^eCLH;swyiG6p#m>V#=Vf zVNGR=;!!TW(PlpP7)g`=bf&;v_&-lb<5y+_Tjjr_owEP;&eqFk{_iQC&hp=)W=3fO zTlhq!^D)`QN0eQZV0r{m##1Y&e)s!vm;Jw>5lLzCrS|{U&dYNA*Wu1{{J*DpmfipF z(c~N=K6FoD3b=+(8@Q-X7KcWvu)3AbqZABA-4Z*u!e=;)OXbs zh4%6_{gh1gaTLUq$h=V;?i452Uf6^ORi+#C%ul=+pNspPs#18_)R(n(DWK7uMvCJy zGGjIJ#U=}_V<^>t5E~(tm_NsM&O8U0m*|F?G-UfiVk(R47vqjoows0#e_ujVGb{;) zWkaHwUTMawuJD+xG_OB|ex(zbG?}BY6iuc7MdAUxE^T*9z!zZ-svQ8!plJf|hdH6Y z?9+Vyr(TMuS;!uy1a3M1cSf(O_J1#*^FKYw^T+c4GZtLM53rbA{!ErU`#09RHs*3K zW^eyOp;xLw6I)Gu=GY5Dk(A{TvZgPeC;iWo8NJd)?pNUa%4p;)+oiq!QXnwj(XBXf zLa#}HlcfRC9a|b}faTd9#b%FEog-+v`)n9CUUdoj>%2l9oSna6W5+bG(wpnt^;?Zb zUTw((>!bF#7k40L=2@AiCITC za~z>bMnZ_46MO!ZH^a>Bt)cTdVMryvKx~e~5OD$|r~`>+n;=$5$u$PSi}Buzg5Da6 z@C=R%ekkE7`Gf;OoBm_NMen2V7SRZ5a*$Ar6BLpNyFK^K*~M8(6Wm)v`*g9O5&CKW z3^d6c4NW}%DvQySgqZcdcG*qb``Vp& zSG}*@bP@Ny{{MPw=qE2BG-K%S%>nDV*UYB@?zuAJQ_m#<_6DMSLVxYO|M~df^z2VT z<1G9C^kDDJy90L-JXRl#`+qdr-F{WJ|JoUjp5^~f@~oln#k8XzaJWGGd@1yLU!%i_ zrCt~cm*X2eL7G?RBteA2LLX(_U)CXWz(6e_mnO5pMz6QFhQ7xsI>j+XhtZVwdY6}% zK8+X+agU!e{OU@P7X-?#2~rR+qoo;1=h?(1bim6yVyPN+vpDcl%-iktF3vDUAN4l@ z6>p5ca@l+kP@lmM_}>LDG}u5>nn0pe&c_GT&mhbhxZYjB5F{#87%_>WbBc)gAiT2q zTS3Yo)|e0}0uzcZ7q^alaXGHyKsA$>QrJ*f5W`zrLmwhi2TZ0u4FVkE6vJnu$pFV8 zy@hPDRS;yAytwG`9Nf(XW)i%h*O(zS;2(7qKwc!K5sp%ZJjSR`yi}3(95sMl=!TgX za>cmfS>nOU@*<=zNl40N`N8C&2;ZQE_bW}%wU7w)dVBJP=s>>azoHpN>mj+q=z8RC zx!cJ37F}<-LwC4QaHjWC4oZR%VaSVmB;q6SQZflKnk3k};!w!;B8&mRg%@RBh&&%I zJple1I;UudH;>l$q5y5Xqb+x|!SP3!hD!!5WMZRz`wiOS#fEtcFL?{S-v7`H&An?e z>>xmnGo^_SKW}N4AegWW5SGu5Cy**tLv(5&9@-Lv2=dN$MQ6)oRhFYy*Q#R+PA?$H~}{DeTH;MUTjyomX<>~r6{ce+2zlwGB~QL}fCE|oy;vVHu= zkj#WnW5h40t>rIAt&P^<3thHFlD^FQIiUf{1Hz*PiO3>bpgiJp8Lo(gt3Xx{S0TXk zWgT4JCx9!9tdKnruAGpbrHPl~*=yNzh{>`|t)t63)K70?+#jRpR@KH$blD=!5n*G z3Xw$kt)M?f+r!}yeMWr}k(7Ah8ytGKXV|Av!1`mfJKRKlY5$wQ-R5tn9tpDqpU)G_ z<}?iYV~{CaBDsP15z%3U_vlm=?KHww2l)9C;8JmE_Gy&jPiZ}1#RXBURR>pJ2XXC> z(ax)_O(fnxya4@bYX0U+MeLx2Q0PL*rs_buN1&CZQ*7_>-oFe+Y|4lH`$)P+S>Z( ztKI2j6#Ua0ZNHwre&u_+-ar4jy)*TLtzGZs)ZcpTPXhcZm`uG(6T~Ty(luoCKW{AjH`hp~D zHo%o8StI@L0vA7p8r!%xF8~TAP~z6Yr5!Mq1FirUC&1m|0u%Wg;P4fQgdq$z;em$h z_3+{IsEm#p*1aOJoHGZ&Xas-TCp2$+S>THEsBQdfnUupw_%E0!0{CjmU-HxMq3cT- z{5Rmsvc6O;J6sPrk}N6G?QPvF`$bfU+FRfHRSafc(Gt0QpDrz9maz$MMAQP^# z^G*Yu*b4$8I2{e5&`wNgf@H)+15#Pk$OSpg;q|;`wI9w;7 z2wWPt0$i}4nuQ(}eDOM9;#ULL$36&99sJ2##385Mz8Cf%W0UHP+zm{b02YdhOw62$ z#0idZ6!1UsHBN3}49GWSvo89;@H7jB5?6g9(F}d`#Y&c8L zt!ac2P0)fSSPT_MX+kh_AB$e5jhnwP73W81MNHR}e-K>S4ygUAzkl*!@D49%a;vR} zw9U{UBnwhFU>-=9IOna5>09X1F%7gLf*-*#4J3C3a<5AH2f>xn5GPINO5V@8F4_gU z2hO86osx*8^>UEBpFLSL7gDys^$>gkJvod>y000c`tz8i2H&LAQIeNY;s_4sW5Lx1 zlj!HH!qPQ?_NZ`)yfaql?veZ`)QY|rp$SF{nnnDmA(4d>U2%=&a2>TVFBblVuPsnE zdl8vpRuOVN^j_%Cagc>LX%vQ3`j?e%UItwCo(q>{xIQq9 z@=R>f;FPZ|mhxjL@KUc({z3e4*#)A3uR2%!R9*$GU&hz|B|joB4cCdw4S)+iE&tTv zi;;g>z~lSzUq!ey&iv3Li%R*+t>O1%)p@M-EGaiA=Fzk!IQeNGSSZngWgZ_QcW5#6 zy)^o%%wrqA9vQCOi21V6&xx_Fq^Mz0uYKmU>lAyz&k0HK`^d+g30Y+x8}Ekq_OxHM z(zgfFFMGx;tNh3gmzid(1lLE@FQ(3nuYD&@Xo`U$?jt)2D{!%Fa#DLeyhrc&nS(sU zvIz=EqD9V?<)+|zkdoeji$sDQwNlo5bXX)^gh5C`S$30OR|M6k;?xxOEd0u)tlklc zoP}U`Q;NtUPUtm^Yf6)a$J<<;7&i@v6TTzPPhXnDIa6=pNaeuh;1$7s1T=F7aeq#4 zqBmaZH3@kD&47bV6wv5J%I^kPQagIX*^W7hk(VNtdP$na5DF}-RVOV4SA$;n9`T}% z&&xUhjOK$7vFLd3(d8s#w-dUwT%Z;%t6sMquGE_q%3qG$?UB2@!A&h(R=sXJTtyx4 zJ-Y1oA95Y8!ev$Fw!>9a>^@X|TY#(Ka$JRrbN`ilG5QCRH{+GhiDlW`y%5CZDK;2f zunMgy{~$Vnqw8*U@6iEBmT+`U5-RSP*Iq(|XgKy5!*HKcW*-l|Mn%`)d-OJ;6;7=T zmlYz&;_T(9l*yCO85V}Fdnbn|pgu!1LLU!!Z&JNtlVU;IYU;B=f~Pp)#C|Zt5l*}m z2d1#gi^<@6ES1zabhsNDY{^*90WLK@A4{9k#0sPYe* zMxc1d%ucH%r0i;iPDVh~$Bg+x^0B%r9 zS){WPiCP3(_iDerU?@(=!b@%&bxIF}%Zx_Q%65U3;nADD6Xa#-97iefYt>v2lzLiX zA=J^;^SN}Q;;fM&&6B62!RT<7MzoyLMR*DRoSP_}6NZ3&5f}-2I%%_EmHn8Cn_SnH8?-8wpoVKnTl@lCDrIh~&p@siw8@ifghhO5m{;DHh*1y`%#MGluP+T8jB6{!*~ae9A<$Zr{bOF|5n{G#IK45z~T z)3%P$hvUQl`{4j08y)YxJ2?Qsjq$dUs z25ziDzy`vln+*(a;RPa*rZr_)>)<-uS8VmA6)upW@UxmuwRT>y(uH|^kV8ckE*IUS{)Gpkb|W1%&Yx^#<1KCAvp@ zeoC&rRJd?oQxXUi@$YiIiHi~rO3*FERJXzfhT|WIa3N&?7=I&TCO~8tUXl?S6#)TZ zD~I^Q;VPVdD_jLbhkK;fZ1oc)*Bk76OQw+UTvQp?WLB=gfOF@Y5rR8o_a zs^E*Mu>}xQ6$L6GvJ6Dba61ieMHOb<_#n~$JDkpGP|)OvS9}ba2(l316#Hp@b(T>B zn2UZU=^XvMcfS7st3Xu0gAg}MYI#G#5Cs%@@|#k#Yzo=Z+0ntl2_mT|2YwA+7#de| zem%nxVj;#c90$8YezBubAcU{b>Sb|Y?25$aA^Qm@WNJ09g98y{+({2tn#G};lQ+87*t^VwGd)TW@zEX(hj#uKUE9}la5N#cA-nR z+ZQ&$2s)WsKtXfxAaJ{2vkt%lu{U6QtEs)ZYXF1_3K|Mx>(emAeu=`aXHke5v-T$q z%|!F!8blakINfkX)GM%Khn1Wg_Q5J0q>Fe^5}7rRD!-Xt%2A!qO^34TCJQOOf+b6o zO*hO%N)f%rNy51*am28cL~4NpqCiK*3D5&A=(?dy4@Q1dP;jPZQt(++briw&N z%o1U_rp`t0Iq)}xiTTS@5NK}3?1?axWD!Wa)I!V{UxkP13x>RqQ4~`q^lzN-0lc?p z@8}4A3J^&IR9JV_d12IHOx=erOFQ*UY)Nwfd>M0805srZy=g9InxF|y=di2zADxtd zIYe52V84=dy^-WtN{H@hnd zxUTT6V!kz80D&Di-Y_4E0E++W09X!}%5&ZX+e&b$h6P+YoqCJz12l1#F!MD^=;h`S z8eVB44Y)c?WIetF1YIUFPo>`ko&i^fiChh?;zSmc=hwkzhCa~A1GAKZiI3$zyqBdk zH%eDg24q@Hl+r6485h#W3BhAYYFMRQg`#ar%=_g5YJf}J;p_sKeR01lBdV_TN5xlE ztHCrvw@83sZwmR>rtp^*`E1ph#e^V98C$AqWLk?2Au zB%~2u;dOvn`UHT(nJe6rC!XIh^OiNhxik*$>34hk4VtqDt^t5cGYXC2F9ci+ozhT1 z$N!j+2!gvR8G_VzC?ST7T&1gs-~8uyq9W5yAd2aPV7!r!O^I#}YR2$^ByPNwn&U&t zF+G$}q4-mCuL&dnfE9C=X+-7^&9WI-4MV&*Y)gF|5gXkR7; zxIQqv7sq?aqRv;HUo}KfT~!W=lNWZ21y~&!w=(vP=Czx{1-7JovQM+HPRm>{Bh{f} z4A*8@foA!SztkeX@hQP&EN~lmt#FwesD|n%&ZyD5PHS#_uSrS4)HsF6Q zW%5u5SB1dX;A%3zyGNI=+*jMHl9f8RDn@uVxSGuG9%y=32Uo?x(*{?Q`P~D_o!(LJ zr3$nDcz9n_a%wx`Tnt%)i{oqkvXa=h8k*Sxmp#+DmE3(``j#uRo(qeQIH3#90Avi& z7$+Y8@L;R?x^(3hfkmFl7{s_G5D8N`M+A*~9Kk?2eYovH`vbi}Q?L?=((o3Yy0puF%^;;L5$^PmsTxYM$Y!iweB6P zrc+Tu%(l4N#c>ExHW-9HWN3n?6l^SJn29KZOZf`Q*mlC$@CU*;^H2_{VZxOsK%rrP zPKAk`uydv;!JNSzSw0KiYz{ml|z76)a#j z`9s{7VOuVXLM@o3+zL*ym&zuuV?tbzs`PC2GLznq#3B%r=&; z7EF7V1v{Aey>?j$6_|VBjd$A=kk7V->Z4HH%aEoxx^}L;M9}IIOGFO@J`?V%o32&p93+;ZC+s< zcLuYK4Yhz7;q-T2xv;%z+90rZgS)lDpeg7Oojrpa3FC&JJa}1$0elQ zJP?88kqkaA3<&vF?)AXIMvZ^h-4@o36M7AqSZ}>W*fWAmjC)8%ml5gt1G?}?G)D5G zOQM;pg4X+V0Ywr@(~B$#$Eg4J^*Es^^=Y`l#)HA%*G2UWodLQJHK2nFzMw8xHo4$% zj=`s^sxUT2gN!8uHun-7)DE1bb zcX<3iG{rY)0d8w%xS603gGQc{F8U6K^ad(2y5LhRWf^>H;ZJesMbcKvgL-p%qwOm^ ze=hqO=hK>Z!)H_4hKRM!JIJ*#;vGW2G8&E1eNT~ryIxKb`i8Mqf7~;^xUOsbf-Lnh zWM}5hfh!N#WRz-{O{XLcA&2N0AGa3!_czT#83}k1Z{i1|t?ixSN0|}0KSm?{`ZN3` za!6Fix+a{Y2t%IcPCNmvHYhH52?r#BYy!9DcagDJf0AwUR?L+-C2K4cZ#RuN_KIdE{v5q*pj7Z5l#<-F;c%3E0Mp+?d!*eFcCgN@YgDWfcC2X6YoZ5 z(a0E1e11cVQhqL$Y+Ri(KLTTO^9DM>`8VS9ge6e?OC|p;hB2W7B&1KT>DGft<1Lj@+951{v`cJja zZfTaxpb;_G18;B_C-hhBrw*t0p9hn~i~KoegFEq;LxRr(Xn%0W|AF5U?cd5=ip3`Q z;^^tJC}Tp0&Y^oTcp-J@BvGnz2>A#&xkc|joSpMU$$4(i7dNfHZ@rbXZ}!45s<*}_ z(y;RPVv9h-5{lyd1SA;^0+MXyTc@X1iC9R4HKL4n{P(T5yz3laa$^cq@h<+p^_F+S z|54p&#K8_#@k5GnFM493VPk(Z2H6mXnwxz4mNQE5Z{l8>+b&2-*NbT&{aEwj;217b z`WxO2C2x>|JB~zhiWA0pdJz>O!h)CWc_LmBKTrcO(gHB*U8G&G_!g_#sF2Pn-vT*A z{0zTINSflPC(ae&Gzv^IgsLwXez|C0IIgSBS87VKr8s~F#6?wvW9XUjND>*c0SD5> zC~$kdJ(OUdE*3ZngyM@Z#Wm2#=!vtX`7Cd8lz1q?LFTIv5Q{8K3FmYR&a50)y<$sz z&axyCDpO8o;)KRH32)JYBngZo_jYo7J;gzn{ZI{7#*4{M;)oxk>rwBDM8P=s%kM34 z>hTkfpIw426nY@dN#+{~$q-fYn5#Ibk5PZr@2SSFUbE7vJx2FV5225)>;9t8vf@tB zo@~uq#p3;`_euoep;zB2Az`W5Cn_~C-z536+T?3Co6Y43Di;V2Z2PBIyMIvY{waHs z+BZ;7Y#;@yv3pR5-NShUX)NiY4@~-5FpT)h5q)j;1T&Xl z^|>^e4ZvrGuvBQ$2RbMViDrW}d24a<<{jy|$7f=ra6QpD8uv_$x%@NiPjlM?V_&Qz zIpzOkDZjFBB5Cy;d|MFe<#ludw?JO_1+Dl#@!~m2aZ`f^w)jQ5)DnW1W4@wVTQX9W zxSTS6y5$U-;9SxAnP72gg1$@W4a);Zd^p^wbSjz}i&pYJ3FmxhGD9SAWyllh7m!l^ z{xCoz&aVJ}sN67Rb@-g$qVx#9<=jX4W-uNM;PSw4(&N{|*TZTpj<*Tl8ERE;@}4uM zJ5=v0(`nS@`=eZsziX-^12`1wsEg*zm+dbL<5;VX_+kyq|xqw6^e)D@nv*otRtZo%abi zg&3CQOjafAjVVunwiaIeEt(P>1~RFb^6v3zbd4c;R7!$?5IhmhbXZT$EnzAb*cE)YO-);tj$i^+^~I3Q`^;4#8tDWG{7 zzF^8?ER%uJ9>`7Xi^V(sFUGu-{p^2N)IVn9`rqte6%Mmn0Fe@zd{44mKkzlN+TohSD zn51)@^dwVoL9}>r4bl9y=<6NlYu_g`L1qZMP{QV%+AVFEzm z)f4yji)AO$GI8cyHMXE8Q=R#$;z%C~KL`Z_h?TT9Uy@D`na7X_8?CD~z0vC(Mu;YX za3r%=ly$H>b2o()Ydjbj+BVhLw;P+nW3M1FOewmhnG(*yO~FuPH7NQF^@W(vVtO5w zUm2`r#;707Nc2e@!SJW63KXj)m{WV~q+T+^=@<>Z8nxER7z8~Dqi@tA_JKvbApF%= z7Z-fIcLSXI1KH((*SsjSGthND=M6~cmu^Iq_$(I2K`}bWCcm_52mU-^3uiKnkvooK zm*$Dtu!!luXu4i>M{CHnEY?cqt8FUXbvMk%mS=f z$ZjiST+yMMHB2I2I|v8XO;24Z5)5`qO`{f9SwXem(EyLpaA!wey-oFZ^OW}Fk*3tz zu)?%aLuD)|1MbvAI=3`Blj$N4qZB7o&&Oze|Lkx>Z)+PB@X-F*Aqo-#iH6*nixQkB z1YhUtNOkpb?_XJh(U15R{R9V;3cG7i3z65K6ALJ;hU>u9((xH_L|2+9I36#z(Rtd6 z4U#A%5w6%7Bw3_-?pct^U-^tEE%&n@d zfnM{aF%}HNdcE&%b19n5QZQgbQ~=@BJS45plqaI_paBCixV1BFQ&lj8vl*AEz#P55 zJwJc@+uyE6!vQQfHpp!!2K)RCaD$K0$8b0t?(Xcszr*3M{O@S@)z05WJ6pr;ot@p` z&hFoaqnBGRcmIZlkI{MCld;rG{x*DYU**C5NuC1lu+{V8SpVE|w|3oOFTgS1mXS|5 zfpqw>Ex5Qb@cd_~mtx^ZriE9U*Wd-f5C=Qbaj}OCKcxKbX4vicbg{@HlHN)S8P4N# zrpFP>v}cI$w%r}~>t2AFpO9F9WO(+0EmCfRh(ri>!L$;HZj%tqv=rOz%_)bu3KEB) zM||FVy8(}}i;NcXp-8IhF@_6!?joc!>iCnFFDH|i{`StU8%ML=!XuJD0Nmq+N5V0h z5id%z38DWr<1eImKXn*7tI;Lb4zLUDKESr31y*o?asa6Uvq-5Ew%nTC9~Va z=wU%4n&PXiZA)SKEiW8-4CBRrrHL0YlDb|T>$2k0RThTcyeF+CbL%RtsdA9z$TJWR zVhAB|N?HpW9kdqB*nH-t_{O_!Ez22$3;apQN$jQbd+flnQ3N**W<)~N(91>FJ1*au zkFo#l^xqH%JVGCh^ndhnXM6W$nf|}r+Ipt{Pw`xhhQND`fiTYqOF3Dd`H5hiOQNC69Y+`ijsOtYDmlp}e8>fn}Ca zpNh_RA^-$PCVaYxX@sMcxk&2LK#il{zhG#Zh2bswE%QRq(L!yvx3<x@8=|}eL5)QKJYitzpc^bTMaV0L@B2=N?^9v>K%Hbn z4t{@p^gp6uxMD55E3kVrN5roe4lE_10=$`16i^r>TtXzMdk~8>PRKRe1kZvSFG@>o zrBsIfz|!4`B#7Qb20)vpIjz)}+>ihyX-pgFnF^ux*ob)BUgxda);_MG6UM!z^V9;Du5~< z#;D(yEf{jonz1P_6Q=jjr!?{Ql9}9@eK){9}6#)1oH~t!bZ|rnpB&z4UkG|Fb8K!17QM5LR?+d?(HJv z02>-v#h&p6UE6e7&<>qCUZko@Fv`%4g_^miaBD@W!84oatn%Q3 z?lfw}MR)e1X~-PU=Rm#S6a1nDs+^n)bNs?obFwTVW2rNvz9oL7iAxn62r9sha8s5Q z-^oKk>OulUrn)|1C>H{15QN@6KMfFaipyKLAZu@BQ6hgiD07Kg!>?8@|9LK5azQTu z`Tbt&HLUBLgIMJR7y^xo1Bgk(KLalnCm{0#E5K`<+)91Exuad}@0bZr2%@m_O9WAx zS|gtuXCDH^Ycn^W7JjT1k4ZVMuQeVfo_~cSX|lQ|wNucLShS3VPJ)HObG6I~P0}%k z(aFJSUICTZZmRUsJb}3a(y%=L<^gVqe*q@uq#-i~W7uu1N!42Y0+KwDIW{Bp{8`FR zi`gw0CIL^x(sr(wRn3ixCG90yq_Z%zFEFZU2X8zp03 z^g=D}c+kW7H1ctrvXUu=#~7JKPD}%bH#|^Bh5=Av5nX}R>lMDu`6U&Ha*X;1zhz!{ z4|g|~7XX>x81>=b{576VvCj|8V|pe=i0@CpV#RoIObUXi&8^V$d zDBpKkFvF=fT@xjeu9L{g0YCZKAf^Eu1iD$KVU4T3jco@sD9OD~A0qGCBcZT49+|S0 z2zen~Xi}_}B2G4iQ9z%iG`Z@#-M~*hcEz-55a+&xG(xXQ>?q$gNB5MEXf5v^HaD1K zm0~os@GnM^$N^Zhq7&T!k|7}Wbufd1lnR6)G9-hQ-%mbMYqcZ8!Gsk~K*jb^ zCZknI5zNU?;DK@*c?;qjIhGYX`3i4c;5ptLVXFXW&bbZ%?Hz|q*&I0N6nnwX2}$w$ z$X{-xBKSo|4riH$HEJLN1=2npfD4&fp+L>c>%}dbAF5fGqP|UZ^3#6BE?XZ^((hWJ zP;X z91{f$HuyiySKk_Sq&&9o^ljDp{Y`lxlqSclx(P@$Guq1{C8}LFq&OQ$*c!tznuXye zifE-fxQHO4M}&m4gg>WL+5Ky&kg)|_gw|*A7NQZ_`}vHI!ixeg2~f`FAoTr@2b=se z`UiR+fKUEt_)F_;1M1L=s*l_w2Qc{W>}W0CcKJG#Vhb|m%mfXPQc@xrsZ4lk1=vw4`|@eBB>yf9)i~ehs6d>K zd}qK}Rh8OB3578L(_A{F9}T!QFOI!rAze})uUUqDpDyCZZuOS=X{Z}je<0bLj-Bdy zVLbOrjssvhh=Y@aPNmF^jnRv*Uo6*(B#lNZWAsAUO{#Bsiwe1UlM|eFYB9;8lq_)G zq^VW~8?0#-o?*!GCPNtR3k^BOf!anhSjrIbCqg5%&M-z39MYQ&wI!#7Z=HrvXul#P>;n6C7}E{8k@_1);b-IDR|52ut!lA?8x1#4+Xs4zoO_DUJdJA^XKGT^k*9Kz`Vhj$Nh}wYrJh zZLtb=$EOE-Z{8ia3+1FOkpB*|`3c5JVGc^ttk*-EW>$L}f(IP9yheeH@z`7P-o>|l zmSRT-lN{t5L|LKbVqVclE}rt^Y>Y<3T$H)M3(!t)Z@nY(r2?{REx1_kL)Sbiq(Z|%(ze@Vk4Jb`>vD>I8yp1eG`m=i(5B{ zROV`zWz#895^o_|E;cKq@BvLt;hz#rZS{M_f;foSP+LaG^W2x=uDdc}r7+pjq#%p- z7=#%>yDwjE@8mC!X>>}d>4^!izfWiun}w&vS3SLi>ks)7Rgt(oJSi~LkGX{(bpx-J zHZUrQT8;<(hbP)9 z0)e{jJ1_JiUr#{!^%;B;+PJ;QsdTVPv{q7WA zjeBN9#c@^IRtT;o+c5)-8CGH{VnMr5uAxCgas>YfI#3x&hnAEvk#5Q`C@Pv1HK9zD zRt7~cT@|sV$)c!ivC#meX%$pd=V{BJ4(-Xithy=_Ty0L=yx$OI8Pug&ZBX1w5e3}9C{i{Dk>{t{8qMvOEkSD6N((>n7#xMhT$!t=8lOIIyXS59 zyzRc|w(FJMnJOmGRWA*W6ycb(lu7j3T60^PyPxPtbG*(8NZb%J>}RYzTkb<;dQJ_} z1mgi`)Ro7F#p`5kt!IO6-0PJs+6$N|iq4j{4%Z@pVmlzHgh1jz^1up1d4if!MrfCR zl}@IswtaH=#@_D9;hWBFN_$FZaKJg-0snK5P@0-e@>|4|6=!Osfh3yPBX$r(o(In)C-s_)##g5SgWQ zNWAK%<+$M2Ifes~oS>|ZRG96uXO)=Uk+(Av&D6WWnrhflrON6J5Pf&o6O_)Z-&Iin z`~-;h|2dM9{YTLq=+-LQfZVs6lU*Z$z6hjezcoIUNA?nq*z#!oZmeV0-e53RP|n@J zctkWhl~nV?>Cp>@*83{;et!cwV3-8AMmTWj0@Fy=l;dzb?XOKjEOUZLm?2anI7stT z{g!5%%2G7RqDcF3KrwlkDNIUJYW^4w<%Y%91Kr7swqFl$CRd35ADW4DJ6^~rOrtR= z(#soSIdhDPpj*Q3Ii(8G^KnFVg6{A)-DEs+yG##kBD2ECAlal&w~QHOq{1O zvbAwja=>X93{3pO{V6W*Q)u8vd^-S7Qt5X3BL*`@u>Ft$#4puh^`AfD+ix#>f@;Gt zsbH;up*|plZt%pQ7f3!r2&CQ5stqW^m~c+bpCLFJ$eb%?B%T%Bb<)-6wyNJnmX6^=9bvXT{%$1cGb5TJ+{z zd&BXuqS&x>VHGk%8A+^z(6F*mRuiy@bhKyUL_H$W6+`@F^1YPbXy?3#1d(V;lZ9Y` zy$L51iV0B8t{}et75iEiNg}?PY4Czs9r@X&a0*dY7wk4ly-%Eiy#eVI3OOk9?D%=_ z^!V`j`?2XS%TN|E9fcd{u5?y_fF5F-{rH-R!Sn5RJ#t6e?$9ls^ybJ5hyHW42Wzy4 z!m3?cL+`xXiL_=07Qpn5Rg6_062cPwp|YK<%cXQa=f2gzQAp<%peb;$!J70?;@z0G zkP9y&Q_NDgm)k!suosD=9Hx>8OK&R-!Y5tFSk55=&oRa?8NRiqWI^?H>dnEs_ZQ!v z9vqw>9)GXdBX#@w0f!;YqjDMz_A7mgr~nOpsxCjA9h@HTy*p67L1%9hx)|r|nP}}t zd@BR9uBl@|MyI=Fxf8zp0-`4)`3#P(6~_;vy|2f(ItLNdkvt)JgNjBmhs zI9^OdC~WC6&kQaLOrVT-+}9ZWaDILQ*&oG%)mv+tMZQ>Q{L0EQe~vt@9EI5TK!Nfg zhq<@1bj<+{PVOUD%iL`6npdDT?fBxRESR_vbwcW2LBNFKu)ceP_!sT&gPhu$OL zq2tp8i_hZRqN|WLZfHUllQy75sHRH|vJjapnU6D{#9q6=B zS-Pz2e6C=W4b7TvGJ@&SrYAqp8hVsi9r3#SWV`1ib5Aa>94(Tj7AHy zMF?u1MJWmSVIdlnqAggwkY&cw9{)zObc{wrRu3WO)PVDFNfwrDFT@u@qcUh~Yc=6N zL0n+4IWm5JQN&#epD$x7!`I<2j~?bKzgmB}09UnPld8+-Id;-x#7?Tm>+c>wsR6WAqA1lj_jI9@ z491}?kdi@}g*@gtjM9U{DAg~4-#M0&B&{7{DcM($(v8BH9v(x9_q-Ppa2l5$ntaQF zRwad4{3#V@A;2iJp%-OSOqx&1b`%p0gCOx3^+#K;+@U)Z*-Y~h10qc+xP$semb8A8 zk&Mbw^=uu}#X6?C8=j*)Jx6(}W4E3x)Kj(VzjVB(?!#R!+*9XvyTp8|6IT6R(VjXF zZk3Qvog25ueR^%ged^rQbMU994gS=5V*Y(2Ky^mZ(}sbng7rmWLH+;iy=`~fHnKS0 z*ZLH=vhR)ETC(J)G}@cpzw4x}&!(x5lXlNN+5Byqge0yhf+avZUN_(Ucko04B)})h zvXdoWBo+w_2EbruFc{1jm7~glP{4Nm-X8X99Wn9pAb9JI7nOiNyOR~jT z%5P@4dZpY}Rc)r-4WRA@(ARhas0+H(1zoBNx&(^Wqwh!ALgWrgV0M0X0x0XFI*Ec2 z8%p3Lbgq5uB4#}k&Ztg^@ZgUG#(o-!iGscru3jZ1AE5#1`fCmHTF3q#Z&v0unAHE~awx5*wq;?Gm4umApTo3C5$%nDB4o@OOnC-7agy~Em( zUhv{TXb=TX2-B9X?if6m@f{$p_qz@e{%KU5dj?L=z{%;Wb2bl-xA3`Z%q_f{Gv`z% zhD5In+K}?qDnPhCijg+%)C#@-?9x&V1jeyR0OIC*Glx1JC zCi9{aINj3_GhYEVf+r)vKZu z_644X zkg`AKBAEuG7MebNEAsWbD3s=$8z0_ENWx9ibBM$D4vM;c&ViH;f@o+0Mpm_i?obGf z7k3JuHiMqo&<1t6AsD3Kd>j9&eyT zw%H%@MOAf4MQMH6e4yr@6dW%%Hh4@=aeXQ~YIEUm>eDyuutDwFkexoP%V*Q-VWoDk z^Q~HiZSp2`)Tn63OVb5qb^8RWR-S8F(|XE`(t z$@2EY#v!9bMy~wkctxf|O=bG>jfO8qFUB_GU~cg*={R5QRa>wWHSnjkZ@TqPeAUjf zH9FcVH);*Ib=+jt{mGN@m95dYHeRwdz*gr)tpTL-j9ss1Y>g@T&-IC|F*pzC4cp6S zQp+ycihEhTjg6jK;zvK=OjS$Te76t*BSi2BWzPt3;JDWpPUK%CaWDbTgyArTF7z`M zFH@hy&<;#>2QXkskwQot`2X%u@=X0mCouFbsE!=m=(j^+l#nzB$t*@m zbaCtWN_+|VHj5TxAc7(s_9Rrlv?#EU0a#$o5*#DMoQe>QAz#npAE7SSZw zbo$}!f`!c3l@px4VN=Yfb%PIy5;2cY+-XR82M_YeRb*CEzj?=?W@TTY+feyVrA`!2 zKoWtMFTqIrGDWc%{Op}jP_=h-wr^m}&T%7gDwKZ8X~JqP4`Mg=BYsHt?EW5Xa_B=} z;0iu}rYrVe=n5!b>Cexf!Q8>%3c?V0=r&}QoXqd>5G?#Kp{mTYJf_KF;n2<0GdUo@ z(l7=4Ox(18eRlr(i+v2x+wM@gz91jS|hvME0yk6*>yuRJxQ>l>w4TCxdRK} z#Z>Y@PSR?uA2ZluIpfH)F8%-mbPLEW;z%>sEv4I1PmAFK-9S%<)$(yFl6cM-kj7Uk zpqR$QF(M@{hl_YSIbJ)U)PIprDU|YB9x|-RplL-BE=YYzUkAu_f;qxu@?!L2)Jvtt zv5X3o3&w=vG(xMwMo|bH^^{QTREH@zcFvMucJVB7Vty>Hz~_c)QH_O{LLm$Z23!6R z4p|^B&W?HBm+j1&7G_5>ndb*|+_`iw_WbD-##Ej=37Nx?_%82@XgNhPg{gEzSkdLI zbGw0q)xL`&C-yj{}Em|CS+115~=5<;N;?>r0;Q$pG(H~F2xz%EsC$jm2p3u1yC6a#lVSWur>sWZj?tp*Mt)U6>vI7lFqwX@&Cz& zj-{BoMHyUXiH<`I{7`|Y#8c*R;k2fejynlK3l^5q*RIpWNho6`C}5dWs^Em+S2Bn3 zt&ibO8l{*573NOvyv2=^>3MvX-CqIvza|3vYc{tez~ZG!bgpwDd{u6ocsEW$8S^M- z=n*T>ab3FO*>#BCJ3K@177?c5=?GeD(fJEm?9*AyTp?n@FWN9$F9d!# zjU7zl#3f0rV<@?z@OWYD?h0lR9sQ6y-f^E7PYs|sWbs8^C)5B|ckvD+p^Fw2gxvH7 zekkrQ4OL*u-+svZa)Gb?=z;-M)9BELuj>o8u3OOrtEn=Lze5=DA*o>NIv-%E-zWoO z?40ue1u7G{j_0Lu4w(}cmFo(#=EfaG5;~s8W?43+^TBfbvOyKn&CI>|LC=M>&40MK zSU}=jD9bW+p4b7!kD@VF$n;%eenCftmHMcuY+P-asM1yxxs` zi;EoSC~#cn=Q>e6L1pA4sEB^#Pz424#SZr(a>%?4-(4s2cW=hb zT@3GTj(4LNeY%tBV|uk@(ZF@Q8y};%6H1*>>V#4!l>W1XlHfJ^T4tLfb|aXLFbWy; z01_}BJ|FIXr_??c*^IbRF~?z!IcMdIiZet{sVmYsQF`U3I23p=rsL19HPde_T^QwX zf}75vDgkuvS}GbQ2jdq9Ir*SaKH_wT01KFGxOwcN7$WtgV<6qGcYTZ#hG@G47%pD>xWU>Px%1AFMpSdgpNwtp433D%-!}L5M?F>@@DEsob z9AK$w*!@JP^eh(+K-kH_|9}O;gypoM2t%F{=rR+Bm-3FNb}}=x<4Z}Sh;1Y?U4)2$ zOGyWpS?F_$R~b+glOj;cil9iD$qt(`k;x7NG@+?no28?a4u%s&F^Xc}AuzbBGVTiy zgOn&&WaD4bwj|9*8L?hu<9Fes3qE=TNxgdn9XMzB;mnJX^xw8u zjWZ?P_86CPOcT>J;hai{GTWiO11Ki#J7AMe9QP0Z|K#I$+gpE|O#bP8xBa)jY|;Ph za$$2k+1y)!c$Iy&NZ1C}*tP>>g4L4p?B?9YXf(tiLY_?{XorHLtQuu{fNqO|{VzPR zn}d1O6_Iw8-LnJ%Z7R;B7>}mJBkG8cK)P~$G5W!Pjzgy;{KqH>Y`zsup^%0v^26FG2=660vA&OH>Gmmr7gt?0G zTpw#?7Xh0w)k07WkR|)pr8Ex)5Th%q&7@k0RCIm_9QXPPeFCq=?e{Oi3d6=r>Q_7z zY6JDELaeJ>ewFF+EPGcO1xHToEHq+RATOZ=^npr{i@i^uK7OUtS3{(y?EYh^RA7U6 z!QU=xM7I0a3*z^zK{MVGA+M6J6kM-YYTc!Hcbm3l zH*LaJAmW?RjT#Re=CMOQ;oPkRLRYW*(V>1Z9_?LLZOm9y-WDu3Lf*>QEmegc+e}-5 zL$j@ueOFX#TeUR4Pkq}|j(npn(~34j_4ZzEWf|@77rk9#o0ZkyUVN;PhA}_8FM?G z;EU1;)^DF(vqrHVt8AxMbZSMXR{Te*6&vD?RTjoNcad95Q^9W5Z=^wx9Dqs8tdEF^ zGwIapcph-TpTLfM>#wx+WV(ljP|pMnu#l~0YjOde3>slE z0d8mpKptJfVE*HuiZ}E(#H1?s^Rs9j(%(@@mo2^~D5xd8?Tj zpkg!AyJ%(_8z#O}HY<}CR>;nkWt2M^1>h>-iWYZs;PD*Tf9D(-jRK8mO5)6qyXSsP zr#=-c|Mb3-!RxfK_1s=BJ%wju)DOw`#F0j^(hB;y?tbV-ew9B| zlK${A2)!vS_f~$Hu!v8+Z@Nc5*W=NyJ9lED#c3gM5wTb;yKjGe|N7mKd?HJ< zD}K0Nt&Rq2zb%bGzkI}D0~jFiFN{K}%e?I04{-$DA%%)Rj6M!!R-@1T>@>B}{BP&5JUz?d$Yb@l!PF;k z!H}e?RDy?T%v<_S{P^5w2)J`3!heYlRir_N`=Ivv zJp1+4#oOawUgw63Sq$sVRsi}=6uq>KTQ*ob>0Qy6#f~z@{`qs?x{~Pbk>v%WCHdNs z2-m`s_I5Bu@vRekU{iJ*uz+HDPRjr6nPs0X!fu`qZ>;(e5CH)HI6i-S`u3+u76+6E z%a%78|Led0i!D)Rhz$^iTuidip)7k0Rhu=4*(QGu!MiL|nvc_DXwVyivPXru%3fP1 zN1Yoj(5NyYIpstPV_HW=(crE@QPJ94nYyAuZLYLvqgJlE7+Y$oz#QkAUAgXJoZKg> z!ITijSEt8R%*$7!&6LdPdUTo+1RhSasliek=r{G&@S|%v8k&!&>+ILmbu@gxevOBT znqP#dsjc^@5LemjdQ(KXdg=vTNYVAEb7V+cmWICO5s6uTU>*k^L@|nBOzwvEIZAM* zV%WPXQ{4}E62!tBY3D^s@~LKz4dNEdn_F{sl;*eBK}R`-P|X-(*Ot!`JpW1No~@xQ zt5DaVE33_|OjTBaHrJHZL=EMNvZ|KT*`YChUPL9VrWA6-9Iv3x}Jcx=(VR7v`yOxxjyQxq@68FgsLIIsh5eC(4+ z&z$_?i#TSkI{&?3DU_I(4IYvxXwydd`R{17cet;g|L%e6Q``vHBaYE1x zhA^hPp&j5MDqp|?dH(bce7h?a2v4R3`q$&pFr$^zg)@Wiomro%Pd9tRy`$ksE~i0h z%{1T9IiyS##yY6c@KA^J*hBSIK9%c#4uc5Bt=IlHmFs_hZ+|cE|MC3j;ILc&>v-7u zzws9n=AJ+G14xyJmkT;9?i^0!^56Zo_xe-hgkGANU%J3E>KXP5Y@(nYDBOVg%nwgk zO_sJW-Bvpb_^Wi={}Yfn30-is&;Ilm?^upr&>v*LBy->>p0NCu93_q<#aM=nbVN+t ztGf(w?lV`8J}a0t8O44`rl9}dc<|r2uXlt0{kv%JPw7br`rq|I|DxZ%OG)TYeduW! z^|PIP7NxhmBWSTe;gIEK<(cVut4 z847b4`^1^$aCj%K{MST7vwj8&BKP;e58WW~O3G`aC-(2gT87RJTVYt3uXMU6*0W4T z{C|Q!5=3AP91VS{MpCnZ)fh&t{91<5-lBaNwL0qBnBXI&QQ=BMb!gARA2judJl_yk6 zQQnKw@a-5apqlB1TXuaIcs%e~;4gf_&HwB!;Fb`z!V1hTT}05`E|<5N9Y%^o#ege6 zrdon5V=Yii<_0)}w8^?Pd0AgmQSeNyH5`gACR1Fsw~Z0ZYakl5&9F91Ws7F|o0HFv z>>4XT;!=UQy33DXP}pPXhvjus$B$=D6hUto!w6xYp!lvh)iX$j?jmNpKA-OS`XC#> z5Rr6@`2o@}p8w*+*N~hAPA20Ok`TpAjf297i9DAJCmL`JCTs|6fZ9seA@2a+wq~2w z;db7FueZz^Dgkm1BgCLIXsCEVrpa@nNTkZ{ z_#wzsTbECgG*?K8k4|{2ptIGGm4)}wu00g<wEw*)Z==)bQJVM%U;K-jPhf6JP<;XW@~xJoPUL`71a?zk*AWWU`+{kXj(j zQjV!a+f0tJB`y6^y<*AS(_hurVQtrSQU7ntdkS5vGyJ@0RXe*jLrdzr`l-gB8DTmL zZb#cCPWl`pGpyb+*?a^acUdhg5gG7kD0QiZ*8N!8uD&80@u00JA0YP95BCb;0~H(Q zOd(_BW;+IJLy3~m9!+sBA}e9KmR7quoXk%scuMBeB8&m(jzTQVA!jn6fac7KD?!IJ znTsS?+lruH&S?7`Wc~{pYO19MY9=#|%$bbHgDB|Vfc4G(fhP!f%=#?TLonM~(s`Uf z#m0k7$CnDge8mW%j|ze<2HR>7>@C^S8n-}mjT4(xA-LX-QR2`brG#ZsjWEF$w`qVK zmKnT_dbupUGmIygs6zUtHj8RCaDJUR@{lAeJvy*YpiL}^{HOI|)D_UlbHFH@b2J-M zN3;UK_jTw8G@8bK#?L5AkqZLX`WkJ;|Is~xrqXNJi+p6pC)P7{yp_(&n+eFhO)7b1 z?-T-Zcz15gCU=~wOxt39{XuSJ{l3}z0>w#j^f{P{`jj|Uom`-m3gilX^)vyGxw*e z^d{%gz-7n9Kp25Cg$JZkH!nO<%ntkGWK?DK&#bDVqBzrf3rUf4TB*E2_}^3`qb=^9 zuZOF2&+qFg1}S-3#W;NCpPww19Qsy8Cpx_HS^x5%qtk54C8Yx@Yc!6GwkH=PmSp4W z$G7UP`^``i(5n!JI-sY|4i%eOMg$MQEB;v$p&9T-Ui#F8RuE>(tiE|`p^+rG%$ky7ThI3=c!2Xzmj4 zKD7Q4u~_lc1t1CSvthS{88kB0$hG)oo&8( z?-lP~cBbG{c?GK2_t0tRM7_?D+CmJ%ehwTt2sv(l?|Jt<__+Gq;U}@!_)cJ_7GN$7 z)@L7!je(@n2#Hl+@^9Z=^DelJNj& zW8{&0{*%h@kYM!t1@l+HNnZj0l0=r=oSr}*O8ofgmzZkni|}MXp~!{COQfsg?_IHTL51l4Z zfe(-Ymt;bpzgu)y6Y+r$h=@lkqyg_UfhgBGJ$z)fHef(z-R6sTU&femfu$xBA!xk2 zLSw`gC^QVS0$9Y@u7fZP&5OKRIgt}n9xo!EnZlT!sawyViU#2YzNtkPQ zd_AO)cKb$3Ro?oF=*zCy4F4e}WX?!MbSDd$Qm1Q&F~)8VufBS(z{gzAb$?-&mUh#O zU>HH@b^U#u%OW_nw(8p5FXiY*?!&4~gUXejb}t^xj@BE!oVjXWI7h2M z`6z7BVUwkfyTT3E_oJ3l-93v)4m1Dp7MC=rAlYt%CH)kfDe&OEDanbUE&JqtxqWx9 zV$>k+MpMK>e;6P;4`|w8G?i{LALaRCMF+RciYCKtf^WX&5nsJoBnb)if&=NM2 zYnLjxf%DcsR~8&dCMpU6-+@Fe3ji~|65o#^eCup&$MChx-%(=l@MrCX8nPEAX8%)K zPXF6aPZdY}mM16$ADieJW_WPKnXG@i2Uy=e6z=_gaCq|i{4V70k)=N7A~-+;#g5@^ zQwT%JoZ^zk2s^Mdy_2Ee8LA_Zso|S2803x>%ZjC#@@Ep8UF3tlhfjhn^9Cri_Ke4$ zM=BmOW9kO|*zr@mD4%RBnPCRO={VT=EN~z08pVSi*eM8xN%0--MfnB+W_nBDq-b^k zvtDF=94!$1%QhlEbzvL0zAL8ehZd=yG7D+n<5l$hhLEKRPa$qCDq{-_fp~ zf)$7yB4DKWS!txx4xNX}Z#qF>b8?c@X!v0iB~`(a5dB^%HA={xa8-h*DeK#mvCsHp z_w`ocy~rfzXWT58Sr!HCu-EmgGba)92{w)gBZp`5zB-BR-YO58|Me(p#jx1)5!U)2&y`=7W zG`-rD`NA+q%U#+c$AxCprP45>f|7a)I)F>SPTk+?LFum&jb=K98diJIn|-4Li1EjX zg-WFF6!u-c9Kuu@8{oSTg^z`tw%%~6URD}A4f;2G3lVV(w$p~J49*A?d6Itj2E~7L zbzK3Sm{l}DJ(a~Gov`6I(N*42Wx;9YWtLi8L?>E#Pnx2O@qr{Kx^*u3QvJ9Al`Cz5 zL4u$49yO2j2cq1KgD%C4m;}nb23YFg<*Q@A6MhrQ+1$&xz^`z{iLx{l0Wc|pzhf!( zzGK=HQ!5LGNHz>|ByH)63dWPgwA9jZFEHfjsyUoJ46Gk1Ti*Y= zUG=N*UB2f`0^Ke9OTZQ&yav{bOuvJ!FFo%9g=W<6c2Y#M`M9D`M-l@WAou}e3O{vG zB?iMGay-*|3AGEJWQmtzN4>C9dz*T3U={OmR0X!R=PW}y>8`k-KsS&nwrTV`B&*Lfou3%`12 z;a%fc`zqbtI3gkWV|fw$?uJL89z3x<^T7+a@G3P#hJU52v>1D^Cud=u6diJ2sI)*= zIPfi^u8ED8i$Y|EG73d6n27#_VuYK}L-+D;47Azl{_%(IS zLKq0v4O$XBw(N4>h8P}ME2`=XP|uy~aW^&A=Srh`4hkZm1CiQN%*>Tbvi6eN^^n}2 z{{)>`F0n0)5W4&uK{iut|{g$KB6FMDx0PChG4|GI6C|}`PyWbdx!CaToJ`ZSmj4y`O1x>DI z8-sE`|KwR=-x|xC=`E=tPEa;5R_^I_??%0h*?x2UXCG36#J%IRHLdI4Q}erfX)vR* zxFPTW))}S?K7$_|5&0h!c!y9jsgcMnLpivEK^$a)@k)8sr;65U6Ha&ptL3CtQLGoMYw^eU1uqqOf)``gA-+$50+ z#S_EY$!xk+S>40eg+n|3LcPcKi7e6g@8DNdoYnEerHj{d%vcalK!#sqph^)P01*#^ zR@H1UvZ#kDqkiA-BEz2zo1Ac02Fv)B#943#)rzT=4pEG)7!Xzw)_pSoKgz<-oTGy2 z?DoZ>u0sBZ@@6e;;bthiB_i+NH`+9Tz%^Cx3^{LA;dtE{y=3&SJOtfKgaCKW55LN% zchk}jeHJ5yTtWkXUF=cq7zQGRnolb$8t>v$)YwhObbfTEP;CsZ;|WyOy*8G?7u>_8 zHUwRwlIljW&I%#-{`0KdS#YklK^$O6$)YWaG*q{SaU|pEhxjO!MBy9mr`zItSb;oy zehh%t^~)V?6Xv6?yt6}DhtlTECiVvW_4dHF0*+1&KGICbYs<|@(fKDhj4QZr1p%^) zB^LIFgCx>t&!flq{5QRqK+4x0&YM483^Rdw*GEU7Ij_Yrkh|c-1?Z1`kc%tZkF{y5 zP8&MsA@3od`!smM@3xk7(QKU^5qsH;NwwK}hKZk|Mr`u~5l#V8E`vN`AH4fp(oRDv zW}iD$U!Sdf0ri=Xg9cve4?uR_%cCHvsC2HIhg2A(K(FQQJi7htM{dXLsz?eWI8w*F z8|-0Jo`}RrVyAYneZ7Tb|hI1DyU&fGY={){DR*1G6lTV~lVH}8*ju+wauW|9#dNjdbZ z1?i4-Et^x0y!XN9%ud0G2kHnBNZ~1v-Ns!GPOps_pc6k<1w8&(csU~hq}?EDZYB) zCS5mb5xx!*^o zpGLcCyz^#$Cn4KfHewcPbvvniTnl$eaBBUOJLm-MXdZ*Sg!xy$y#x}uf9}|MlL&ss zV@i4P9DwP?;sgQ4J>v@a$pOf#9qK-sB@LtXh>M!A$UfMI_ryepi?cHKz|kEA2U4dK z`ZD^+glEL}HUN2IgZDWU6T2psynQ;^;m@wo2o*B1(KE=R7>Ks@$P+XDYGjb4Ni*N^hDM`IfFKri$=5-v!2ZI>xIQmv8kHPkUgCxU7kATYYF)5Y5vX- zd*yFq3Z&@-y&93Y$;D%Bb!VZUF|fB_#eI8edW$X=WGQn;E0Hqd?71V%^to?Gq`4~z zcvm889VBkUxmWuOTLG^3#$CQ#0eqCTaCH4{HuNkpp{<_XSyW=Cc|Cj*f~43b@cZ#a zzWZoAcWWilqP)}kk@Q*fHBj}@7feKoj)rynk*H0SJnYfGr@P__geS5yvjbZ94RBk0 z4Xi#GW25KasvENBiMz^D3qNvPX4EcNe3p>t4N(JJ;jmJLCSM?wRKR3VJ0V*$H+x7g z6jaYH6_xn@3R6z?m0X*oPCQ0?i5o--;xI{E*wLESN0GGAOo0)`&?jv1qhh#WdXcW4 zecYH8fD7f+X61*K!3AO0hfB%9CV;p@9 z(?8a_HAwrHjSy3RkSyAX*>$C@1Zx-f+x4lo7On(?i%V#@9VWc8MkNXiee*nq!>qh& z`)B%s@|EsI+*vUjU1B@EyYyEd)W>@ zLOmi9Gpai=Nat!vQGY%Im;6kJ4N$88W`gL!O$PZ#7)bA$d%tH-a7Sror$^k01n%4Q zhEz6vk~?ZPQO4${^2XGd1#ZXYrvc#v_jvJ;ijoKo<_K9cWm16Mt6scxb-0XDgu@Oj zA8iXoWupZmr9Y&GjkI@FM{w8a@~lJJg&}PiXS@{T?NRA4Sict9XZf__0Z+zo1-c?8#y${< z#^cz{gIZf@2u)hTtrM5PDv6_jON!o(;^9ala5d>&5kfi{h`hyqC zyBgpBBN1QP2e|)XLW ztgQU&D7;x)RjZplJ)GIsH5uk2D{k|Oh^w%4vKGmvBn4hiTt}G&duKq@J zXO!`QoJhbrqP_}I?0u1IA(4~+c!#AM(2+;5LqA_&1Ct%TFA$bpn$P zmsO^aXX$(J)89kI!c#ASonU~e0}ye{QwPw=&O7T4sFSBIri-FoZo4r9<_EXXmh)ADx5u3r}-GIc1>X>MNqc1O!pJ33 zO5`S!SWseGeRhl%(%CQwY*-O>DIrbi{ccv>S_@s&RlpgK zdu%9wCV&+_m}$jr1CG{qLxCoU9=v6oh*j&noC#VHWr*h#UGdUrX&ekvn-uuF98ufD zd2E(es~}5CcrL8LeK(nb4wc|p(&4Z6DrSqg)MB#fT$q?j6qxOZU`5=t8X5FY=Qt%r zQ<0pKC3v3^BV7p>XYO*X^JHF=P36vMU6TA__5zuMYK+GsU#xq9az0}S5Dd3m4H=Ai zbZw=I>xr1L!OMr+!+Sv>VqF-UJRH%@?7t83w4{fBObf{C!AH0A$FuOGz*;~fA0Wc| z&+jaH4_R{p83oG-7jMgmhCKWtldNc3VccesSgU?@o349Rj18%k1qO-El(F=A!1P^3 z9P$DUmkd2gEXU6j@g~}TUX5(roapNe*1xdD?=#TX)2{o(rLD(5FwiN-cbP&ovgs&V z-zIkFnDC1lEMwd76EAqI@`~OHy7}Y~QYn?jwKS5iE zKZpz^yiP{A4cup<@@SrnMRJDFcvQfRTs}QnNl5=)_V^wg=YDt+s|?(WIH)j;inU-i z2^GIM6TOzb5Eqi3E~9^nr8=1a6W#sQ3Q)r~T#GZp?^z;OjV@C_lQEFo1*__X%9NTP zK7qJekm=B|qGoyd(xw&1RdNdS_zgA{3=c$>9SNt*9 zN6uPWmPhJf_9}x#zM4x~@?YtrqjzZouwuxM$|UN#$o@-JKg>%H>dya)nNX%qwAxn# z{Y;h%fd=O$qI`G`e?yDY>8qpR_lfrG?S|lZBO&G6|6hd2aRnH9`Vpc0frn`%9E~9x zfw9fFFXq_RSYC^CNfY!&o^ChNc1dc^Hr4E&*MuBV(Zt^W-K9MV@jra{FM@HTE37bm zMj~rKGL;nCH*D8(C}m2|)+V(J=L|7gc8?j%q_Hiy0j9{N+OXt^pqwst(LoEv#z>%U z*HSVsCZT})nIiO^{-2WA@$H9{u+sm5hE?&PCZ6dL&>A6e4FAU$iz)FTa=7B;syyp1 zB>Aw8BXn7#UoJKoo_fZ`xHvLz?K|qb!u?GFGakOLtt9(}Q@X<2R#h@s1V(cly@;an z;!V|Zo%96O0rHWmD496^lXDN0=()82W|cko*+O#1%GZkCmmJ+00TQ@TbMD zWtY4c{U7nSpC-5<3+5S&XOZsen;B}LdCBmf#@!jd;aSe7pC4x`3=d7W zgUuF;$J~ziuNhlvNsB%ZN5{d^3{%e~)y^m($j?5st zr8i-5IrGmC@e}+lB;Y6A#$C6{ll*tdljpl*-Z1f&E*|&6_b@uyW_$}GOYjcYV5Fpt zf@f@Zot0x-DkY(sZuTTn;aZ+3!o8FTUf0kSWUC=4ZqEuT+}x1Lb~Gz)g}DuylwD9K z9_p!_zCiY9;Hg}rDX6Y%V89PJ($(dH3vyvriGJ^S91zLcOh`){JL)4Poh&Yut{bNi zNlW=d+2b*0T%6oeB&@AnO-sB!>{lJE#&cNgGM^>oR}EVvguod6y!yAWq+e(nKJsvD z)-Pg^VPxnH9phE0%!eG^j<`;rk|N8Q1xnISp554Ae-sopw{t!iIFk>$BqjNe*!em~ z?}5T$?fOqa#XWWA6Mx?l6faGC?9y{d=pO`pE0;w)AFPSrl7+NBjc4+44?u#~|NZ*$ zmV0h4=>^_4f}W|6N()=#0xR?N5Otq)#;zZN-=tv0+!nY|GWHm3AtoKV`Y*Yi$S~qfI zWit0M2sHsz;|y{F6-<0-B37wD5v_!qCrY`9);&qm@2Y~Df@<408YtBIe4Hx&;Xrp0 z_g7HS(wImf!wYCnjQkU1z4;2NJszv5<=d{ydC121$yx@j5aNw=TkRcHAn!`11v24u zHMa-~oUED$s3oWW$cWSMp5%&yxQ|GZ*1(^(hxojp-&;6Qz|QZKN`O&47-^Us?PVRE zf&(Farr^MlG~F4p=f35Og7Qs7&Sw%$-ME9I_8`z-;#X-;YotEU_S3qXiVBuBZs>Ua zZh+@Xj7+qUN{m!l=PgyPbi~Q>e=38S1;3BeR#H2JMvM;imU;OafP~CzU3rmlxiJk8 zLPbxyZ?9uyPmFsM#OEN_>B*5TpyjP4TEFHZ{X4+5$horK|9S=o9KA?`F%b&+GG}6R z2ZGv-<-oK%NOdEA8HNWO4=Y3Q8OY7-6csd58UOY8x#6F-Y6LkHVY>Hgn#nAAqn$k% zWvK|CtNu>_Z}wLUM0cLV@X>>^7LI~d*I4sUhIv*lO3O2iFZ2?9-j@@#QU<%r-yZA& z2S13r+PT{=)tSf*rWuk0fb%{z!1$r2$$ZL>tC5eh)#b9k#S8D*UdFaPMS%ZC=ULux z&M(13L!ZNVGRWFZc0v$Vv)hnG&wHMt5Vg++9w-o4Yy^xWV(*DyM?DE!LQcOiQ|2E+ zCjoO*Z$qQN_iJk_NO}{`%+%x!&~=01 zjS-at>U5v!#70Krpd*;bBE6I^G*xOhL54(EhvNzt_0%#o^&e=Ysm76P=@IjjUb6T4 zrxv63T-G@gE!0=xqDPHEqxStckp&H%3%>P|3{NLGClE0mV%Q^o4`~ z!@%4QpVZKawM>hvLKGP97tvOXrl^((mPEA|RWKvr@~{evMUO_3pMV~Zym+@sW8Qw0 z8}$)=3bh9Z$D#z@MZRDj4jj%S*Ceh*FGTPJcumDQeU_`mUGjO00~}^SJF}>tmC|zG zw9!1lQoWbB0i_6cEHrfgie~A2$IOY=#pLlFzGVg%L&-}$A%FI#yi^$f?9+>+QE>w(SjA9fgX*Q~xetcpXLp!&KwcD3Y z@d>3V7Z=LNSBnFT!@ZA2q&ZUxONuohL+0%vsYe%Og;jF}00VW3#6OCYE{vLop*N5( zB7xKBEOCyO{Ixx-YwRp6^WQ<%g`1B)k8;T*v#xr=kmstirgu8j&~$a$fo$&RBcZO-f0?Hr~d!rFIWtN>Ak<3WUEpb;u=MG-!Utds_z zJJXyJ)q$Qb=lYRC^&QI;tJh@Lcu4s|VzQ*n)C_}~Wger%Sv@)h8HMGjtaAO)Y*%;4 zl$c!ex!}3a+&QtlmIz5JBZ$djlgSt!(y9!AqGDL&#C9FqHr1Um%4_MN} zoH8`q1^Yggn&a{IP-gkwugj3`r8qMKK``|>Nb;#KDb*y#CQex# z_V_loayXVDW;2+UxW4SQ+DCDFHcsG`9c!jm=B$=}a}vE8?f%&J_6F&E>4QaVEA-IGR;~r2}2hrUs@>bzu?!e}_O1p1nt=MNZRT&yy#7+c<$l^KyhyOYbdP;IK8 z+Zl(Bh(oXCyzRiq(_=p)sUapK#R+RFHU&t_@p6NTs9L2lDg)|RPLx6+hXV7y8VuPi z2A(N{a@AFrT>(7PT!x{Eq<>45%m#J2jq}~!W%25*jHx?}co*aL?~E5GI<$W*K>0G3KKHLnbwiniy3zP=qH}RAiSFw`l zqUhUN@EOxUHnfoY-tE0_n0Wv^Zk)TfG|ckY ziq;#RDKUR2aRbI1rb!F+e-pV?EWbu4TBvt8B?M4M9Cs^-y&A?VHochdMQ*dELxi}F z0yh>x7ZnL0*n{KHcaei)<3Uqk=;iuj%58jpG#cQ*^cU{kJ+ z=$RnQX!{#ecqD^;lo3J>JdVak_~ZONB680S4p@Lvm*aFmNFSZlm4lFGiu744LpW!h zYfpP#|gX zM;FWIwSL~a_U+D5=Qqm7p3MAeh5!w{eozb>=hUNd)j!CRSKP=KCH&9)Tp;jVWLucS z{8dG$V}A1U$Qx9Wtn?P#Ib0%AVt3SpM1DGyY18o~6-BmXbS8{q>Ph54ciG=(>pN^J zL_6SYNlmbfSU(Y7TK>wRgSmc4{oH@Jpz=kh{hw+?ZnytSzSin1O7=h12)_9Lry3;} zNBrNa(f&T?4w*l9Z#-T1*F1n%9_ch#rbI7=b7R8jfLnCqmu!Sr_OB}R-UcEL1@#%# z7fg{&MWKFK`baW|Qd0Ntm4M=|ILi4-vu7nE$e{GoO69A9^5 z-ZS-oE0QZIJo&Avf1!1g{C2jFqdKb29l4T9^V0Z^R>t@9Z41ZG-+r0|udd9<{CwRs zeUX81u{6N;ujYIF5`%E^q&d&BzUaSwo?Fda&;>EheW1r}{>nYm9}uq8rOg(Cr`rOS zOYn4whJtr{-;Y@`o=rH$b#{2p}sQa=IePAl^P{n?Bq5#w?Ac8gajGTlWSB%52s?=!cw zJ#Et^#dxjlV-ikTy1B#l9gS_NLi5Ce89(agK@lzZt_X&_e5nc}YV991-!`i9?k~q> zlMh3^y66@IE@0dx<8-($~d1!EL<;6hsDFohTG(#0lpjitv(t=Fw4!{F9F? z%yVNF-!7Ok9Kpa~F{PYOr$eOeW$oJOdcfv+Z#IpncoAAIS~xUH`!*u(U+l#C{ANM2 zQTiPlf?#iZ<6}De+0HBL6hu*FGTQpP#eOfPpf;zA~ZrJ0H`ZooUD92Z6k5 zzdSi`5|*s7;C^qqfsIpiulEZ2srT@!5kPQB>DLHDy$agE@+|?;l>_d1R@Ws7ylrjZ zDGxvT@Ag@K$X=+KEmg2-qQAMN6V(HBzZM0QRchFaul|%V@XkYkp{r=5|JN=nmZ58J zv(?utA&l`4SJ91sH+$-k>*5L3{A@qQRNZaBQhz z1w`Gsv0)o;2!_mNV5l|Q6|rG2J+~&B|Xn~Fgxv^TuH2vv7}qn@0yI{ zYYp?w@jyy}koFX52#icv(YyA(i^9IDqn?wg+G1}O&b8q}dRs50!ZN`3QtLjF7dS}IMqRk5Y>}>ezDiV40)etez3X3` z4vM?{fS|e0|IlUKfz3H65YlYFQ`tEO?Ux}f9X!Eq1jQG=O%#`GJ`r-^*!2WINAkvR zlWC{ad{FC&%%RRXc%?Q%7Y~J2land=<|y+(_`Mzt*y5E>F%#q+sl$pey9E(Ob&bBk zV&sV7jS|%FJidFwqj|(Wh71{Nj{P+u^&dA6cLL(ied^2qvamp(gRUsAh^0q?4!VAu zwWeTe)+;aLB!0FWy*arx|Vo4I9Sd}fK&1C)S|U%?(Bjjy4j-= zjfCCiAM6R*g<>xH^|FTqj7XX_^X;kbAnef4MweuJ!*v@&xnXk#i?4{*TLjztiHbw_ z03St9_vsC%4DYOlCYt`E<0IA|H^e3er@t~`a2C@Z2QnOx83-w{8xA$hqYUr_`|A?? ztCF3`ixrSxlwlk`BG7y9g2n5ZF#;FH3lFz#0@toA>)mG$Vd(1&9 zMhfXRq+JgS2ceVDeud)O_570*vbC}oTO2BRHPSl$9O>JQdM&c5EaQg|vU@M%SA0gx z3G4>D5u$88k{58#3XiFg`yZ<=(ybq%WSsBiB$;k~r~GaG%0>Ye*eS{Ch;UHb3L&6c zij^cTzWW>*EEM*s$LuID7%FD~E@;8|E5>DFLSfH-rT7w%5#TC_YA!_BX5Ap0Vfc6T z^5VexU$VEW)JTvxf%9-mi2S?ok9ileQ?Y){FzOIZraG|0y9kfMl};~M@|T1F#+dEe z@b8lW^?@wbg%`h@nHzS)45FHMS_v&Qsf0DI7QhT*B*3R5-jeo%_?-%0liFB`6d_~DMOl_5eE!I_>ttBhP-Yj z7y5NV@CT9*2Fqi_0CMnjEVPB5fW05T!FqoC?Ivx>cM#b^ z<_6UeomX4O?tL;p*v;!;WYNcHpqc5heJ>!40B4pQ5#NJk(U+wAMSj$XN4eq&_t55$ zmUehQp70C2s#lLe1b$wcVyM`aY(B|-m^^C;CHmQj52c!FvaSi0R)jRa2LuNLIcFV2 z9}%@=eeb#?>jZd)`->OnBjinfJ>)F80+otQ{ug`LL0%)L8!U=Te`*jOu^#Mz{Kq}r z0#=had|0nLE0i_DJ{8g_9-pmNZ3u)26y2}gH{YCbOq#5%uR}!q650QYt&}dpI3+o? zu;DLmG^-SQe(O zV^r`G1)9^;GIUoW``U{Q*E^~_hf$VfZwwe+^C*ttDQSuC4IsWsy0ieeFA+b!NyP?~(#>UJ zn!7-k(OH&UXYAAA0SXpY@^hrHL~o)cgOZq(aZH)|Hror$R5M)SNHyuBO*#fVu1VT7 zIU%vU(j}Y;Ah3K{wS57LZ*E2}@?Bx942m>2UMr<^aI-a}(==V%UT^^lu*I{M<9mIK zeQD|*LE>Ov4cX~(D9+Fg*YI;_N3Sy|y`TN4Il3tIrCrWP7W3!e&febM$4n4uI5d>B zjudDyUv?$Q*d~#JzsRM%h5i~q?cqF2e&$HPmb{uhk*Cy$v&jb!+cG#c1V)py~T z(C=aiCBB`x0}0G`A{ZT5R6DbW$k(Jhk-6oZJLha$2ULAf%)mf}x$y_W8^~im)1Q0c z9MSsyhifd^^@ycK_HFoo!80Yg#|WHBXxvFfavV6wW690ee{|qw;$XNm^ui4k2a^Lx zc9Y^93!F}r*(nRG@cdLG_79ewbza-MBwDg~O$I~mCycCB+&ENKPkQqj-J=Q=piZ?M zGtEEz^+;#t9Ja>omqDo1KlV1|(y%w#n4_Oo+r@cWm09y-xyZX~h0A?|kyX}s95hH| z!?(*q%D@N2EB_|@Jt29E)-yUE2#CVJ72;E5P#pg5WFwYf_k?pM)|Mt$j)FFBM`?30 zB~20-Cunr=ur0GNfxJbj@jKvf2uF~hS?B#0Z^kbc= zh7-$-g_Rr~=pXG@yZfYgz+(B8NrT(y^POXf&CHdrYGRqn|A>4Spdbx3k2U?|%+c4T zX!uC>^Y(y-YP=85@Q5v^A5UF56?c7I_B}@~?>EV}>B+)q7Gs4CmioMa2y%gqwp}p7 zGj9T%c4I>RLx<%GRK1mN_0M3jS-ToXic6*^oOq{+NdqX^Qms-2`x1`==nPPV8uGj- z%Ghx942uUP;^xF%o`0v{PZlm^#CqSX(>uYg| zxr*9N0n0Op%Cgp4;q{szd*3@!6Z_z#x2x>@b9?|-PSmfmHrj|{uMnYBcq5%P?#v90 zkL`Ag%Hnf@YV@~|&MDtj%2hu;<~Cgj3(~gk9b`j1NigZDW9BuU2fOK(YD?ud%z~&7 zA;!2@@A^LJ&&onoBz&du{mk`&Ze;*wV-QZ={qA#C&`uR}9&_;DqqbalETqz`dA^J+ z7-{bs_Xu)vn=4@WLGoV+kGPY#_@t&JSt?9uUaeQJ7lrs-cMY3Eelh2fbU4xmg|(Li zJtAD?jCu;ZB&vM}jqyZ6)e7GK%mB!O*xEGj>i!<$NIT|77M!bGt8+xDxLc5P%8pL_ zVrgDPpq%*L&>FeOun>0htGIf&_HU2QSHhSGB9o>Wt?`}KuL6q-`?Z14ax#HE>vqMl{yR0e{Q(MDx zZZ>gZB1}y9TCL}_7~!<`q;JrVH4^$&=itfIJW!{?bhwYqllu2)trKE3 z2;kRr7PIEh$Bj~tbk_Hwm{_t6;{6T0uU!$yy^B~PnTitD0=mHccnF|Cw z#Yw-p0x1^~c6z-3K$Zr?MD9vs_*Qlq4+z|dFg%gD0(Al5nt??FO;UeIQjoXayjbjI zpgMjjqas1;?$#&ncAu*USn$Pi)+)OTrb9KBmPh_4Ow#xcB!tX`W#PJ{ly5BVA z(fM^dwC>AnOh^@8Y?HB|rhj)d)(bT|0~0X3krh|p<_@dMDXx*_FIg!a$+hk!ODSX^ zpt&A{LlgVe!!>JEEI#BPAzfB9%}KAjN)IHV)Bc7qT*b^&m&AZ)%XtvT=|PL1lnP+c zFac6 zpj4j2wCP}?23-ekX2+l8O8YpbQKk?v_WMw+KI4doIo`nE<;ZjkW*)yd;K0)h-_!(* zu!V8T7Ki{b9zSJ;le4AwXd06lfRhh&TD3$=7D5<8J-P2!-^UU1kpyUuu1@o=UK{g4 SxL{x)5GN{UF4#B%*#8AL3ToN_ literal 0 HcmV?d00001 diff --git a/charts/k10/k10/4.5.900/Chart.yaml b/charts/k10/k10/4.5.900/Chart.yaml new file mode 100644 index 000000000..65452a064 --- /dev/null +++ b/charts/k10/k10/4.5.900/Chart.yaml @@ -0,0 +1,15 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: K10 + catalog.cattle.io/release-name: k10 +apiVersion: v2 +appVersion: 4.5.9 +description: Kasten’s K10 Data Management Platform +home: https://kasten.io/ +icon: https://docs.kasten.io/_static/kasten-logo-vertical.png +kubeVersion: '>= 1.17.0-0' +maintainers: +- email: support@kasten.io + name: kastenIO +name: k10 +version: 4.5.900 diff --git a/charts/k10/k10/4.5.900/README.md b/charts/k10/k10/4.5.900/README.md new file mode 100644 index 000000000..631f6f4fb --- /dev/null +++ b/charts/k10/k10/4.5.900/README.md @@ -0,0 +1,226 @@ +# Kasten's K10 Helm chart. + +[Kasten's k10](https://docs.kasten.io/) is a data lifecycle management system for all your persistence.enabled container-based applications. + +## TL;DR; + +```console +$ helm install kasten/k10 --name=k10 --namespace=kasten-io +``` + +## Introduction + +This chart bootstraps Kasten's K10 platform on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + - Kubernetes 1.7+ with Beta APIs enabled + +## Installing the Chart + +To install the chart on a [GKE](https://cloud.google.com/container-engine/) cluster + +```console +$ helm install kasten/k10 --name=k10 --namespace=kasten-io +``` + +To install the chart on an [AWS](https://aws.amazon.com/) [kops](https://github.com/kubernetes/kops)-created cluster + +```console +$ helm install kasten/k10 --name=k10 --namespace=kasten-io --set secrets.awsAccessKeyId="${AWS_ACCESS_KEY_ID}" \ + --set secrets.awsSecretAccessKey="${AWS_SECRET_ACCESS_KEY}" +``` + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `k10` application: + +```console +$ helm delete k10 --purge +``` + +## Configuration + +The following table lists the configurable parameters of the K10 +chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`eula.accept`| Whether to enable accept EULA before installation | `false` +`eula.company` | Company name. Required field if EULA is accepted | `None` +`eula.email` | Contact email. Required field if EULA is accepted | `None` +`license` | License string obtained from Kasten | `None` +`rbac.create` | Whether to enable RBAC with a specific cluster role and binding for K10 | `true` +`scc.create` | Whether to create a SecurityContextConstraints for K10 ServiceAccounts | `false` +`services.dashboardbff.hostNetwork` | Whether the dashboardbff pods may use the node network | `false` +`services.executor.hostNetwork` | Whether the executor pods may use the node network | `false` +`services.aggregatedapis.hostNetwork` | Whether the aggregatedapis pods may use the node network | `false` +`serviceAccount.create`| Specifies whether a ServiceAccount should be created | `true` +`serviceAccount.name` | The name of the ServiceAccount to use. If not set, a name is derived using the release and chart names. | `None` +`ingress.create` | Specifies whether the K10 dashboard should be exposed via ingress | `false` +`ingress.class` | Cluster ingress controller class: `nginx`, `GCE` | `None` +`ingress.host` | FQDN (e.g., `k10.example.com`) for name-based virtual host | `None` +`ingress.urlPath` | URL path for K10 Dashboard (e.g., `/k10`) | `Release.Name` +`ingress.annotations` | Additional Ingress object annotations | `{}` +`ingress.tls.enabled` | Configures a TLS use for `ingress.host` | `false` +`ingress.tls.secretName` | Specifies a name of TLS secret | `None` +`ingress.pathType` | Specifies the path type for the ingress resource | `ImplementationSpecific` +`global.persistence.enabled` | Use PVS to persist data | `true` +`global.persistence.size` | Default global size of volumes for K10 persistent services | `20Gi` +`global.persistence.catalog.size` | Size of a volume for catalog service | `global.persistence.size` +`global.persistence.jobs.size` | Size of a volume for jobs service | `global.persistence.size` +`global.persistence.logging.size` | Size of a volume for logging service | `global.persistence.size` +`global.persistence.metering.size` | Size of a volume for metering service | `global.persistence.size` +`global.persistence.storageClass` | Specified StorageClassName will be used for PVCs | `None` +`global.airgapped.repository` | Specify the helm repository for offline (airgapped) installation | `''` +`global.imagePullSecret` | Provide secret which contains docker config for private repository. Use `k10-ecr` when secrets.dockerConfigPath is used. | `''` +`secrets.awsAccessKeyId` | AWS access key ID (required for AWS deployment) | `None` +`secrets.awsSecretAccessKey` | AWS access key secret | `None` +`secrets.awsIamRole` | ARN of the AWS IAM role assumed by K10 to perform any AWS operation. | `None` +`secrets.googleApiKey` | Non-default base64 encoded GCP Service Account key file | `None` +`secrets.azureTenantId` | Azure tenant ID (required for Azure deployment) | `None` +`secrets.azureClientId` | Azure Service App ID | `None` +`secrets.azureClientSecret` | Azure Service APP secret | `None` +`secrets.azureResourceGroup` | Resource Group name that was created for the Kubernetes cluster | `None` +`secrets.azureSubscriptionID` | Subscription ID in your Azure tenant | `None` +`secrets.azureResourceMgrEndpoint` | Resource management endpoint for the Azure Stack instance | `None` +`secrets.azureADEndpoint` | Azure Active Directory login endpoint | `None` +`secrets.azureADResourceID` | Azure Active Directory resource ID to obtain AD tokens | `None` +`secrets.vsphereEndpoint` | vSphere endpoint for login | `None` +`secrets.vsphereUsername` | vSphere username for login | `None` +`secrets.vspherePassword` | vSphere password for login | `None` +`secrets.dockerConfigPath` | Use --set-file secrets.dockerConfigPath=path_to_docker_config.yaml to specify docker config for image pull | `None` +`cacertconfigmap.name` | Name of the ConfigMap that contains a certificate for a trusted root certificate authority | `None` +`clusterName` | Cluster name for better logs visibility | `None` +`metering.awsRegion` | Sets AWS_REGION for metering service | `None` +`metering.mode` | Control license reporting (set to `airgap` for private-network installs) | `None` +`metering.reportCollectionPeriod` | Sets metric report collection period (in seconds) | `1800` +`metering.reportPushPeriod` | Sets metric report push period (in seconds) | `3600` +`metering.promoID` | Sets K10 promotion ID from marketing campaigns | `None` +`metering.awsMarketplace` | Sets AWS cloud metering license mode | `false` +`metering.awsManagedLicense` | Sets AWS managed license mode | `false` +`metering.redhatMarketplacePayg` | Sets Red Hat cloud metering license mode | `false` +`metering.licenseConfigSecretName` | Sets AWS managed license config secret | `None` +`externalGateway.create` | Configures an external gateway for K10 API services | `false` +`externalGateway.annotations` | Standard annotations for the services | `None` +`externalGateway.fqdn.name` | Domain name for the K10 API services | `None` +`externalGateway.fqdn.type` | Supported gateway type: `route53-mapper` or `external-dns` | `None` +`externalGateway.awsSSLCertARN` | ARN for the AWS ACM SSL certificate used in the K10 API server | `None` +`auth.basicAuth.enabled` | Configures basic authentication for the K10 dashboard | `false` +`auth.basicAuth.htpasswd` | A username and password pair separated by a colon character | `None` +`auth.basicAuth.secretName` | Name of an existing Secret that contains a file generated with htpasswd | `None` +`auth.k10AdminGroups` | A list of groups whose members are granted admin level access to K10's dashboard | `None` +`auth.k10AdminUsers` | A list of users who are granted admin level access to K10's dashboard | `None` +`auth.tokenAuth.enabled` | Configures token based authentication for the K10 dashboard | `false` +`auth.oidcAuth.enabled` | Configures Open ID Connect based authentication for the K10 dashboard | `false` +`auth.oidcAuth.providerURL` | URL for the OIDC Provider | `None` +`auth.oidcAuth.redirectURL` | URL to the K10 gateway service | `None` +`auth.oidcAuth.scopes` | Space separated OIDC scopes required for userinfo. Example: "profile email" | `None` +`auth.oidcAuth.prompt` | The type of prompt to be used during authentication (none, consent, login or select_account) | `select_account` +`auth.oidcAuth.clientID` | Client ID given by the OIDC provider for K10 | `None` +`auth.oidcAuth.clientSecret` | Client secret given by the OIDC provider for K10 | `None` +`auth.oidcAuth.usernameClaim` | The claim to be used as the username | `sub` +`auth.oidcAuth.usernamePrefix` | Prefix that has to be used with the username obtained from the username claim | `None` +`auth.oidcAuth.groupClaim` | Name of a custom OpenID Connect claim for specifying user groups | `None` +`auth.oidcAuth.groupPrefix` | All groups will be prefixed with this value to prevent conflicts | `None` +`auth.openshift.enabled` | Enables access to the K10 dashboard by authenticating with the OpenShift OAuth server | `false` +`auth.openshift.serviceAccount` | Name of the service account that represents an OAuth client | `None` +`auth.openshift.clientSecret` | The token corresponding to the service account | `None` +`auth.openshift.dashboardURL` | The URL used for accessing K10's dashboard | `None` +`auth.openshift.openshiftURL` | The URL for accessing OpenShift's API server | `None` +`auth.openshift.insecureCA` | To turn off SSL verification of connections to OpenShift | `false` +`auth.openshift.useServiceAccountCA` | Set this to true to use the CA certificate corresponding to the Service Account ``auth.openshift.serviceAccount`` usually found at ``/var/run/secrets/kubernetes.io/serviceaccount/ca.crt`` | `false` +`auth.ldap.enabled` | Configures Active Directory/LDAP based authentication for the K10 dashboard | `false` +`auth.ldap.restartPod` | To force a restart of the authentication service pod (useful when updating authentication config) | `false` +`auth.ldap.dashboardURL` | The URL used for accessing K10's dashboard | `None` +`auth.ldap.host` | Host and optional port of the AD/LDAP server in the form `host:port` | `None` +`auth.ldap.insecureNoSSL` | Required if the AD/LDAP host is not using TLS | `false` +`auth.ldap.insecureSkipVerifySSL` | To turn off SSL verification of connections to the AD/LDAP host | `false` +`auth.ldap.startTLS` | When set to true, ldap:// is used to connect to the server followed by creation of a TLS session. When set to false, ldaps:// is used. | `false` +`auth.ldap.bindDN` | The Distinguished Name(username) used for connecting to the AD/LDAP host | `None` +`auth.ldap.bindPW` | The password corresponding to the `bindDN` for connecting to the AD/LDAP host | `None` +`auth.ldap.bindPWSecretName` | The name of the secret that contains the password corresponding to the `bindDN` for connecting to the AD/LDAP host | `None` +`auth.ldap.userSearch.baseDN` | The base Distinguished Name to start the AD/LDAP search from | `None` +`auth.ldap.userSearch.filter` | Optional filter to apply when searching the directory | `None` +`auth.ldap.userSearch.username` | Attribute used for comparing user entries when searching the directory | `None` +`auth.ldap.userSearch.idAttr` | AD/LDAP attribute in a user's entry that should map to the user ID field in a token | `None` +`auth.ldap.userSearch.emailAttr` | AD/LDAP attribute in a user's entry that should map to the email field in a token | `None` +`auth.ldap.userSearch.nameAttr` | AD/LDAP attribute in a user's entry that should map to the name field in a token | `None` +`auth.ldap.userSearch.preferredUsernameAttr` | AD/LDAP attribute in a user's entry that should map to the preferred_username field in a token | `None` +`auth.ldap.groupSearch.baseDN` | The base Distinguished Name to start the AD/LDAP group search from | `None` +`auth.ldap.groupSearch.filter` | Optional filter to apply when searching the directory for groups | `None` +`auth.ldap.groupSearch.nameAttr` | The AD/LDAP attribute that represents a group's name in the directory | `None` +`auth.ldap.groupSearch.userMatchers` | List of field pairs that are used to match a user to a group. | `None` +`auth.ldap.groupSearch.userMatchers.userAttr` | Attribute in the user's entry that must match with the `groupAttr` while searching for groups | `None` +`auth.ldap.groupSearch.userMatchers.groupAttr` | Attribute in the group's entry that must match with the `userAttr` while searching for groups | `None` +`auth.groupAllowList` | A list of groups whose members are allowed access to K10's dashboard | `None` +`services.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for K10 service containers | `{"runAsUser" : 1000, "fsGroup": 1000}` +`services.securityContext.runAsUser` | User ID K10 service containers run as| `1000` +`services.securityContext.runAsGroup` | Group ID K10 service containers run as| `1000` +`services.securityContext.fsGroup` | FSGroup that owns K10 service container volumes | `1000` +`injectKanisterSidecar.enabled` | Enable Kanister sidecar injection for workload pods | `false` +`injectKanisterSidecar.namespaceSelector.matchLabels` | Set of labels to select namespaces in which sidecar injection is enabled for workloads | `{}` +`injectKanisterSidecar.objectSelector.matchLabels` | Set of labels to filter workload objects in which the sidecar is injected | `{}` +`injectKanisterSidecar.webhookServer.port` | Port number on which the mutating webhook server accepts request | `8080` +`gateway.insecureDisableSSLVerify` | Specifies whether to disable SSL verification for gateway pods | `false` +`gateway.exposeAdminPort` | Specifies whether to expose Admin port for gateway service | `true` +`genericVolumeSnapshot.resources.[requests\|limits].[cpu\|memory]` | Resource requests and limits for Generic Volume Snapshot restore pods | `{}` +`prometheus.server.enabled` | If false, K10 Prometheus server will not be created. k10 dashboard will not function properly if this option is set to false | `true` +`prometheus.server.persistentVolume.enabled` | If true, K10 Prometheus server will create a Persistent Volume Claim | `true` +`prometheus.server.persistentVolume.size` | K10 Prometheus server data Persistent Volume size | `30Gi` +`prometheus.server.persistentVolume.storageClass` | StorageClassName used to create Prometheus PVC. Setting this option overwrites global StorageClass value | `""` +`prometheus.server.retention` | (optional) K10 Prometheus data retention | `"30d"` +`prometheus.server.baseURL` | (optional) K10 Prometheus external url path at which the server can be accessed | `/k10/prometheus/` +`prometheus.server.prefixURL` | (optional) K10 Prometheus prefix slug at which the server can be accessed | `/k10/prometheus/` +`grafana.enabled` | (optional) If false Grafana will not be available | `true` +`grafana.prometheusPrefixURL` | (optional) URL for Prometheus datasource in Grafana (must match `prometheus.server.prefixURL`) | `/k10/prometheus/` +`resources...[requests\|limits].[cpu\|memory]` | Overwrite default K10 [container resource requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) | varies by container +`route.enabled` | Specifies whether the K10 dashboard should be exposed via route | `false` +`route.host` | FQDN (e.g., `.k10.example.com`) for name-based virtual host | `""` +`route.path` | URL path for K10 Dashboard (e.g., `/k10`) | `/` +`route.annotations` | Additional Route object annotations | `{}` +`route.labels` | Additional Route object labels | `{}` +`route.tls.enabled` | Configures a TLS use for `route.host` | `false` +`route.tls.insecureEdgeTerminationPolicy` | Specifies behavior for insecure scheme traffic | `Redirect` +`route.tls.termination` | Specifies the TLS termination of the route | `edge` +`apigateway.serviceResolver` | Specifies the resolver used for service discovery in the API gateway (`dns` or `endpoint`) | `dns` +`limiter.genericVolumeSnapshots` | Limit of concurrent generic volume snapshot create operations | `10` +`limiter.genericVolumeCopies` | Limit of concurrent generic volume snapshot copy operations | `10` +`limiter.genericVolumeRestores` | Limit of concurrent generic volume snapshot restore operations | `10` +`limiter.csiSnapshots` | Limit of concurrent CSI snapshot create operations | `10` +`limiter.providerSnapshots` | Limit of concurrent cloud provider create operations | `10` +`cluster.domainName` | Specifies the domain name of the cluster | `cluster.local` +`kanister.backupTimeout` | Specifies timeout to set on Kanister backup operations | `45` +`kanister.restoreTimeout` | Specifies timeout to set on Kanister restore operations | `600` +`kanister.deleteTimeout` | Specifies timeout to set on Kanister delete operations | `45` +`kanister.hookTimeout` | Specifies timeout to set on Kanister pre-hook and post-hook operations | `20` +`kanister.checkRepoTimeout` | Specifies timeout to set on Kanister checkRepo operations | `20` +`kanister.statsTimeout` | Specifies timeout to set on Kanister stats operations | `20` +`kanister.efsPostRestoreTimeout` | Specifies timeout to set on Kanister efsPostRestore operations | `45` +`awsConfig.assumeRoleDuration` | Duration of a session token generated by AWS for an IAM role. The minimum value is 15 minutes and the maximum value is the maximum duration setting for that IAM role. For documentation about how to view and edit the maximum session duration for an IAM role see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session. The value accepts a number along with a single character ``m``(for minutes) or ``h`` (for hours) Examples: 60m or 2h | `''` +`awsConfig.efsBackupVaultName` | Specifies the AWS EFS backup vault name | `k10vault` +`vmWare.taskTimeoutMin` | Specifies the timeout for VMWare operations | `60` +`encryption.primaryKey.awsCmkKeyId` | Specifies the AWS CMK key ID for encrypting K10 Primary Key | `None` +## Helm tips and tricks + +There is a way of setting values via a yaml file instead of using `--set`. +You can copy/paste values into a file (e.g., my_values.yaml): + +```yaml +secrets: + awsAccessKeyId: ${AWS_ACCESS_KEY_ID} + awsSecretAccessKey: ${AWS_SECRET_ACCESS_KEY} +``` +and then run: +```bash + envsubst < my_values.yaml > my_values_out.yaml && helm install helm/k10 -f my_values_out.yaml +``` + +To use non-default GCP ServiceAccount (SA) credentials, the credentials JSON file needs to be encoded into a base64 string. + + +```bash + sa_key=$(base64 -w0 sa-key.json) + helm install kasten/k10 --name=k10 --namespace=kasten-io --set secrets.googleApiKey=$sa_key +``` diff --git a/charts/k10/k10/4.5.900/app-readme.md b/charts/k10/k10/4.5.900/app-readme.md new file mode 100644 index 000000000..1b221891b --- /dev/null +++ b/charts/k10/k10/4.5.900/app-readme.md @@ -0,0 +1,5 @@ +The K10 data management platform, purpose-built for Kubernetes, provides enterprise operations teams an easy-to-use, scalable, and secure system for backup/restore, disaster recovery, and mobility of Kubernetes applications. + +K10’s application-centric approach and deep integrations with relational and NoSQL databases, Kubernetes distributions, and all clouds provide teams the freedom of infrastructure choice without sacrificing operational simplicity. Policy-driven and extensible, K10 provides a native Kubernetes API and includes features such as full-spectrum consistency, database integrations, automatic application discovery, multi-cloud mobility, and a powerful web-based user interface. + +For more information, refer to the docs [https://docs.kasten.io/](https://docs.kasten.io/) diff --git a/charts/k10/k10/4.5.900/charts/grafana/.helmignore b/charts/k10/k10/4.5.900/charts/grafana/.helmignore new file mode 100644 index 000000000..8cade1318 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.vscode +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/k10/k10/4.5.900/charts/grafana/Chart.yaml b/charts/k10/k10/4.5.900/charts/grafana/Chart.yaml new file mode 100644 index 000000000..e2e2ba77a --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/Chart.yaml @@ -0,0 +1,22 @@ +apiVersion: v2 +appVersion: 8.1.0 +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.net +icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png +kubeVersion: ^1.8.0-0 +maintainers: +- email: zanhsieh@gmail.com + name: zanhsieh +- email: rluckie@cisco.com + name: rtluckie +- email: maor.friedman@redhat.com + name: maorfr +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: mail@torstenwalter.de + name: torstenwalter +name: grafana +sources: +- https://github.com/grafana/grafana +type: application +version: 6.15.0 diff --git a/charts/k10/k10/4.5.900/charts/grafana/README.md b/charts/k10/k10/4.5.900/charts/grafana/README.md new file mode 100644 index 000000000..01219f7cb --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/README.md @@ -0,0 +1,528 @@ +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## Get Repo Info + +```console +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release grafana/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 4.0.0 (And 3.12.1) + +This version requires Helm >= 2.12.0. + +### To 5.0.0 + +You have to add --force to your helm upgrade command as the labels of the chart have changed. + +### To 6.0.0 + +This version requires Helm >= 3.1.0. + +## Configuration + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | +| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | +| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Image tag (`Must be >= 5.0.0`) | `8.0.3` | +| `image.sha` | Image sha (optional) | `80c6d6ac633ba5ab3f722976fb1d9a138f87ca6a9934fcd26a5fc28cbde7dbfa` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets | `{}` | +| `service.enabled` | Enable grafana service | `true` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.portName` | Name of the port on the service | `service` | +| `service.targetPort` | Internal service is port | `3000` | +| `service.nodePort` | Kubernetes service nodePort | `nil` | +| `service.annotations` | Service annotations | `{}` | +| `service.labels` | Custom labels | `{}` | +| `service.clusterIP` | internal cluster service IP | `nil` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | +| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | +| `service.externalIPs` | service external IP addresses | `[]` | +| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | +| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.path` | Ingress accepted path | `/` | +| `ingress.pathType` | Ingress type of path | `Prefix` | +| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | +| `extraContainers` | Sidecar containers to add to the grafana pod | `{}` | +| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | +| `extraLabels` | Custom labels for all manifests | `{}` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `global.persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | +| `global.persistence.size` | Size of persistent volume claim | `20Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` | +| `global.persistence.storageClass` | Type of persistent volume claim | `nil` | +| `global.persistence.accessMode` | Persistence access modes | `[ReadWriteOnce]` | +| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | +| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` | +| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | +| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | +| `initChownData.enabled` | If false, don't reset data ownership at startup | true | +| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | +| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | +| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | +| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | +| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. | `{}` | +| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` | +| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | +| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` | +| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | +| `notifiers` | Configure grafana notifiers | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `ldap.enabled` | Enable LDAP authentication | `false` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `labels` | Deployment labels | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod labels | `{}` | +| `podPortName` | Name of the grafana port on the pod | `grafana` | +| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` | +| `sidecar.image.tag` | Sidecar image tag | `1.12.2` | +| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | +| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | +| `sidecar.resources` | Sidecar resources | `{}` | +| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable | `false` | +| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | +| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | +| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | +| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | +| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | +| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | +| `sidecar.dashboards.provider.type` | Provider type | `file` | +| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | +| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | +| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `nil` | +| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | +| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | +| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | +| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` | +| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | +| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | +| `sidecar.notifiers.searchNamespace` | If specified, the sidecar will search for notifiers config-maps (or secrets) inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | +| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | +| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | +| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` | +| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | +| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | +| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` | +| `serviceAccount.annotations` | ServiceAccount annotations | | +| `serviceAccount.create` | Create service account | `true` | +| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | +| `rbac.create` | Create and use RBAC resources | `true` | +| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | +| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | +| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` | +| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` | +| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | +| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `testFramework.enabled` | Whether to create test-related resources | `true` | +| `testFramework.image` | `test-framework` image repository. | `bats/bats` | +| `testFramework.tag` | `test-framework` image tag. | `v1.1.0` | +| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | +| `testFramework.securityContext` | `test-framework` securityContext | `{}` | +| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | +| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | +| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` | +| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | +| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | +| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | +| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | +| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | +| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | +| `serviceMonitor.path` | Path to scrape | `/metrics` | +| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | +| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | +| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | +| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | +| `serviceMonitor.relabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | +| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | +| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | +| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | +| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | +| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | +| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | +| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | +| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | +| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | +| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | +| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | +| `imageRenderer.service.portName` | image-renderer service port name | `'http'` | +| `imageRenderer.service.port` | image-renderer service port used by both service and deployment | `8081` | +| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | +| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | +| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | +| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | +| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | +| `imageRenderer.resources` | Set resource limits for image-renderer pdos | `{}` | + +### Example ingress with path + +With grafana 6.3 and above +```yaml +grafana.ini: + server: + domain: monitoring.example.com + root_url: "%(protocol)s://%(domain)s/grafana" + serve_from_sub_path: true +ingress: + enabled: true + hosts: + - "monitoring.example.com" + path: "/grafana" +``` + +### Example of extraVolumeMounts + +Volume can be type persistentVolumeClaim or hostPath but not both at same time. +If none existingClaim or hostPath argument is givent then type is emptyDir. + +```yaml +- extraVolumeMounts: + - name: plugins + mountPath: /var/lib/grafana/plugins + subPath: configs/grafana/plugins + existingClaim: existing-grafana-claim + readOnly: false + - name: dashboards + mountPath: /var/lib/grafana/dashboards + hostPath: /usr/shared/grafana/dashboards + readOnly: false +``` + +## Import dashboards + +There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +dashboards: + default: + some-dashboard: + json: | + { + "annotations": + + ... + # Complete json file here + ... + + "title": "Some Dashboard", + "uid": "abcd1234", + "version": 1 + } + custom-dashboard: + # This is a path to a file inside the dashboards directory inside the chart directory + file: dashboards/custom-dashboard.json + prometheus-stats: + # Ref: https://grafana.com/dashboards/2 + gnetId: 2 + revision: 2 + datasource: Prometheus + local-dashboard: + url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json +``` + +## BASE64 dashboards + +Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) +A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. +If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. + +### Gerrit use case + +Gerrit API for download files has the following schema: where {project-name} and +{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard +the url value is + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with +a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported +dashboards are deleted/updated. + +A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside +one configmap is currently not properly mirrored in grafana. + +Example dashboard config: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the data sources in grafana can be imported. + +Secrets are recommended over configmaps for this usecase because datasources usually contain private +data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): + +```yaml +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false +``` + +## Sidecar for notifiers + +If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the notification channels in grafana can be imported. The secrets must be created before +`helm install` so that the notifiers init container can list the secrets. + +Secrets are recommended over configmaps for this usecase because alert notification channels usually contain +private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): + +```yaml +notifiers: + - name: notification-channel-1 + type: slack + uid: notifier1 + # either + org_id: 2 + # or + org_name: Main Org. + is_default: true + send_reminder: true + frequency: 1h + disable_resolve_message: false + # See `Supported Settings` section for settings supporter for each + # alert notification type. + settings: + recipient: 'XXX' + token: 'xoxb' + uploadImage: true + url: https://slack.com + +delete_notifiers: + - name: notification-channel-1 + uid: notifier1 + org_id: 2 + - name: notification-channel-2 + # default org_id: 1 +``` + +## How to serve Grafana with a path prefix (/grafana) + +In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. + +```yaml +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + + path: /grafana/?(.*) + hosts: + - k8s.example.dev + +grafana.ini: + server: + root_url: http://localhost:3000/grafana # this host can be localhost +``` + +## How to securely reference secrets in grafana.ini + +This example uses Grafana uses [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. + +In grafana.ini: + +```yaml +grafana.ini: + [auth.generic_oauth] + enabled = true + client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} + client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} +``` + +Existing secret, or created along with helm: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-generic-oauth-secret +type: Opaque +stringData: + client_id: + client_secret: +``` + +Include in the `extraSecretMounts` configuration flag: + +```yaml +- extraSecretMounts: + - name: auth-generic-oauth-secret-mount + secretName: auth-generic-oauth-secret + defaultMode: 0440 + mountPath: /etc/secrets/auth_generic_oauth + readOnly: true +``` + +### extraSecretMounts using a Container Storage Interface (CSI) provider + +This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) + +```yaml +- extraSecretMounts: + - name: secrets-store-inline + mountPath: /run/secrets + readOnly: true + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "my-provider" + nodePublishSecretRef: + name: akv-creds +``` + +## Image Renderer Plug-In + +This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/docs/remote_rendering_using_docker.md) + +```yaml +imageRenderer: + enabled: true +``` + +### Image Renderer NetworkPolicy + +By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/NOTES.txt b/charts/k10/k10/4.5.900/charts/grafana/templates/NOTES.txt new file mode 100644 index 000000000..ca7d88e3d --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/NOTES.txt @@ -0,0 +1,54 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}.svc.cluster.local +{{ if .Values.ingress.enabled }} + If you bind grafana to 80, please update values in values.yaml and reinstall: + ``` + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + + command: + - "setcap" + - "'cap_net_bind_service=+ep'" + - "/usr/sbin/grafana-server &&" + - "sh" + - "/run.sh" + ``` + Details refer to https://grafana.com/docs/installation/configuration/#http-port. + Or grafana would always crash. + + From outside the cluster, the server URL(s) are: +{{- range .Values.ingress.hosts }} + http://{{ . }} +{{- end }} +{{ else }} + Get the Grafana URL to visit by running these commands in the same shell: +{{ if contains "NodePort" .Values.service.type -}} + export NODE_PORT=$(kubectl get --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{ else if contains "LoadBalancer" .Values.service.type -}} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ template "grafana.namespace" . }} -w {{ template "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} +{{ else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ template "grafana.namespace" . }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ template "grafana.namespace" . }} port-forward $POD_NAME 3000 +{{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.global.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/_definitions.tpl b/charts/k10/k10/4.5.900/charts/grafana/templates/_definitions.tpl new file mode 100644 index 000000000..f726b9b11 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/_definitions.tpl @@ -0,0 +1,3 @@ +{{/* Autogenerated, do NOT modify */}} +{{- define "k10.grafanaImageTag" -}}8.1.8{{- end -}} +{{- define "k10.grafanaInitContainerImageTag" -}}8.5-230{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/_helpers.tpl b/charts/k10/k10/4.5.900/charts/grafana/templates/_helpers.tpl new file mode 100644 index 000000000..aea79b673 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/_helpers.tpl @@ -0,0 +1,235 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "grafana.serviceAccountNameTest" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} +{{- else -}} + {{ default "default" .Values.serviceAccount.nameTest }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "grafana.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "grafana.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "grafana.selectorLabels" -}} +app: {{ include "grafana.name" . }} +release: {{ .Release.Name }} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "grafana.imageRenderer.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.imageRenderer.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels ImageRenderer +*/}} +{{- define "grafana.imageRenderer.selectorLabels" -}} +app: {{ include "grafana.name" . }}-image-renderer +release: {{ .Release.Name }} +{{- end -}} + +{{/* +Looks if there's an existing secret and reuse its password. If not it generates +new password and use it. +*/}} +{{- define "grafana.password" -}} +{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) -}} + {{- if $secret -}} + {{- index $secret "data" "admin-password" -}} + {{- else -}} + {{- (randAlphaNum 40) | b64enc | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "grafana.rbac.apiVersion" -}} + {{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} + {{- print "rbac.authorization.k8s.io/v1" -}} + {{- else -}} + {{- print "rbac.authorization.k8s.io/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "grafana.ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return if ingress is stable. +*/}} +{{- define "grafana.ingress.isStable" -}} + {{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "grafana.ingress.supportsIngressClassName" -}} + {{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}} +{{- end -}} + +{{/* +Return if ingress supports pathType. +*/}} +{{- define "grafana.ingress.supportsPathType" -}} + {{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}} +{{- end -}} + +{{/* +Figure out the grafana image tag +based on the value of global.upstreamCertifiedImages +*/}} +{{- define "get.grafanaImageTag"}} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.grafanaImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.grafanaImageTag" .) }} +{{- end }} +{{- end }} + +{{- define "get.grafanaImageRepo" }} +{{- if .Values.global.upstreamCertifiedImages }} +{{- printf "%s/%s/grafana" .Values.k10image.registry .Values.k10image.repository }} +{{- else }} +{{- print .Values.image.repository }} +{{- end }} +{{- end }} + +{{/* +Figure out the config based on +the value of airgapped.repository +*/}} +{{- define "get.grafanaServerimage" }} +{{- if not .Values.global.rhMarketPlace }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/grafana:%s" .Values.global.airgapped.repository (include "get.grafanaImageTag" .) }} +{{- else }} +{{- printf "%s:%s" (include "get.grafanaImageRepo" .) (include "get.grafanaImageTag" .) }} +{{- end }} +{{- else }} +{{- printf "%s" .Values.global.images.grafana }} +{{- end -}} +{{- end }} + +{{/* +Figure out the grafana init container busy box image tag +based on the value of global.airgapped.repository +*/}} +{{- define "get.grafanaInitContainerImageTag"}} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.grafanaInitContainerImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.grafanaInitContainerImageTag" .) }} +{{- end }} +{{- end }} + +{{- define "get.grafanaInitContainerImageRepo" }} +{{- if .Values.global.upstreamCertifiedImages }} +{{- printf "%s/%s/ubi-minimal" .Values.k10image.registry .Values.k10image.repository }} +{{- else }} +{{- print .Values.ubi.image.repository }} +{{- end }} +{{- end }} + +{{/* +Figure out the config based on +the value of airgapped.repository +*/}} +{{- define "get.grafanaInitContainerImage" }} +{{- if not .Values.global.rhMarketPlace }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/ubi-minimal:%s" .Values.global.airgapped.repository (include "get.grafanaInitContainerImageTag" .) }} +{{- else }} +{{- printf "%s:%s" (include "get.grafanaInitContainerImageRepo" .) (include "get.grafanaInitContainerImageTag" .) }} +{{- end }} +{{- else }} +{{- printf "%s:%s" (include "get.grafanaInitContainerImageRepo" .) (include "get.grafanaInitContainerImageTag" .) }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/_pod.tpl b/charts/k10/k10/4.5.900/charts/grafana/templates/_pod.tpl new file mode 100644 index 000000000..46cee7d64 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/_pod.tpl @@ -0,0 +1,509 @@ + +{{- define "grafana.pod" -}} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +serviceAccountName: {{ template "grafana.serviceAccountName" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} +{{- if .Values.securityContext }} +securityContext: +{{ toYaml .Values.securityContext | indent 2 }} +{{- end }} +{{- if .Values.hostAliases }} +hostAliases: +{{ toYaml .Values.hostAliases | indent 2 }} +{{- end }} +{{- if .Values.priorityClassName }} +priorityClassName: {{ .Values.priorityClassName }} +{{- end }} +{{- if ( or .Values.global.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.sidecar.notifiers.enabled .Values.extraInitContainers) }} +initContainers: +{{- end }} +{{- if ( and .Values.global.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + image: "{{ include "get.grafanaInitContainerImage" . }}" + imagePullPolicy: {{ .Values.ubi.image.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }}", "/var/lib/grafana"] + resources: +{{ toYaml .Values.initChownData.resources | indent 6 }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + {{- if .Values.downloadDashboardsImage.sha }} + image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" + {{- else }} + image: "{{ include "get.grafanaInitContainerImage" . }}" + {{- end }} + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] + resources: +{{ toYaml .Values.downloadDashboards.resources | indent 6 }} + env: +{{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} +{{- if .Values.downloadDashboards.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl .Values.downloadDashboards.envFromSecret . }} +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: {{ template "grafana.name" . }}-sc-datasources + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- if .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.datasources.labelValue }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.datasources.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: {{ template "grafana.name" . }}-sc-notifiers + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.notifiers.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- if .Values.extraInitContainers }} +{{ toYaml .Values.extraInitContainers | indent 2 }} +{{- end }} +{{- if (or .Values.global.imagePullSecret .Values.image.pullSecrets) }} +imagePullSecrets: +{{- if .Values.global.imagePullSecret }} + - name: {{ .Values.global.imagePullSecret }} +{{- end }} +{{- if .Values.image.pullSecrets }} +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end }} +enableServiceLinks: {{ .Values.enableServiceLinks }} +containers: +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ template "grafana.name" . }}-sc-dashboard + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + {{- if .Values.sidecar.dashboards.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.dashboards.labelValue }} + {{- end }} + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: {{ quote .Values.sidecar.dashboards.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.dashboards.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.sidecar.dashboards.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ .Values.sidecar.dashboards.folderAnnotation }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{- end}} + - name: {{ .Chart.Name }} + {{- if .Values.image.sha }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}" + {{- else }} + image: "{{ include "get.grafanaServerimage" . }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . }} + {{- end }} + {{- end}} +{{- if .Values.containerSecurityContext }} + securityContext: +{{- toYaml .Values.containerSecurityContext | nindent 6 }} +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- if .Values.dashboards }} +{{- range $provider, $dashboards := .Values.dashboards }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} +{{- if .Values.dashboardsConfigMaps }} +{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" +{{- end }} +{{- end }} +{{/* Mounting default datasources in pod as yaml */}} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" + subPath: datasources.yaml +{{- if .Values.notifiers }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml" + subPath: notifiers.yaml +{{- end }} +{{- if .Values.dashboardProviders }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" + subPath: dashboardproviders.yaml +{{- end }} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{ if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml +{{- end}} +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + subPath: {{ .subPath | default "" }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + containerPort: {{ .Values.service.port }} + protocol: TCP + - name: {{ .Values.podPortName }} + containerPort: 3000 + protocol: TCP + env: + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ template "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{ if .Values.imageRenderer.enabled }} + - name: GF_RENDERING_SERVER_URL + value: http://{{ template "grafana.fullname" . }}-image-renderer.{{ template "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render + - name: GF_RENDERING_CALLBACK_URL + value: http://{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} + {{ end }} + - name: GF_PATHS_DATA + value: {{ (get .Values "grafana.ini").paths.data }} + - name: GF_PATHS_LOGS + value: {{ (get .Values "grafana.ini").paths.logs }} + - name: GF_PATHS_PLUGINS + value: {{ (get .Values "grafana.ini").paths.plugins }} + - name: GF_PATHS_PROVISIONING + value: {{ (get .Values "grafana.ini").paths.provisioning }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: +{{ toYaml $value | indent 10 }} + {{- end }} +{{- range $key, $value := .Values.env }} + - name: "{{ tpl $key $ }}" + value: "{{ tpl (print $value) $ }}" +{{- end }} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl .Values.envFromSecret . }} + {{- end }} + {{- if .Values.envRenderSecret }} + envFrom: + - secretRef: + name: {{ template "grafana.fullname" . }}-env + {{- end }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 6 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 6 }} + resources: +{{ toYaml .Values.resources | indent 6 }} +{{- with .Values.extraContainers }} +{{ tpl . $ | indent 2 }} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: +{{ toYaml . | indent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ template "grafana.fullname" . }} +{{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} +{{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{ $root := . }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ template "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} +{{- if and .Values.global.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} +{{- else if and .Values.global.persistence.enabled (eq .Values.persistence.type "statefulset") }} +# nothing +{{- else }} + - name: storage +{{- if .Values.persistence.inMemory.enabled }} + emptyDir: + medium: Memory +{{- if .Values.persistence.inMemory.sizeLimit }} + sizeLimit: {{ .Values.persistence.inMemory.sizeLimit }} +{{- end -}} +{{- else }} + emptyDir: {} +{{- end -}} +{{- end -}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: {} +{{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ template "grafana.fullname" . }}-config-dashboards +{{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: {} +{{- end -}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + emptyDir: {} +{{- end -}} +{{- range .Values.extraSecretMounts }} +{{- if .secretName }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} +{{- else if .projected }} + - name: {{ .name }} + projected: {{- toYaml .projected | nindent 6 }} +{{- else if .csi }} + - name: {{ .name }} + csi: {{- toYaml .csi | nindent 6 }} +{{- end }} +{{- end }} +{{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + {{- if .existingClaim }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} + {{- else if .hostPath }} + hostPath: + path: {{ .hostPath }} + {{- else }} + emptyDir: {} + {{- end }} +{{- end }} +{{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} +{{- end -}} +{{- if .Values.extraContainerVolumes }} +{{ toYaml .Values.extraContainerVolumes | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/clusterrole.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/clusterrole.yaml new file mode 100644 index 000000000..6d2aa55c9 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/clusterrole.yaml @@ -0,0 +1,27 @@ +{{- if .Values.enabled }} +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }} +rules: +{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end}} +{{- with .Values.rbac.extraClusterRoleRules }} +{{ toYaml . | indent 0 }} +{{- end}} +{{- else }} +rules: [] +{{- end}} +{{- end}} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/clusterrolebinding.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..5e50cd7fe --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/clusterrolebinding.yaml @@ -0,0 +1,26 @@ +{{- if .Values.enabled }} +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "grafana.fullname" . }}-clusterrolebinding + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +roleRef: + kind: ClusterRole +{{- if (not .Values.rbac.useExistingRole) }} + name: {{ template "grafana.fullname" . }}-clusterrole +{{- else }} + name: {{ .Values.rbac.useExistingRole }} +{{- end }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/configmap-dashboard-provider.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/configmap-dashboard-provider.yaml new file mode 100644 index 000000000..c3dcc0810 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,31 @@ +{{- if .Values.enabled }} +{{- if .Values.sidecar.dashboards.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-config-dashboards + namespace: {{ template "grafana.namespace" . }} +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: '{{ .Values.sidecar.dashboards.provider.name }}' + orgId: {{ .Values.sidecar.dashboards.provider.orgid }} + {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + folder: '{{ .Values.sidecar.dashboards.provider.folder }}' + {{- end}} + type: {{ .Values.sidecar.dashboards.provider.type }} + disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} + allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} + updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} + options: + foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} +{{- end}} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/configmap.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/configmap.yaml new file mode 100644 index 000000000..6bbfaeb52 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/configmap.yaml @@ -0,0 +1,99 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +data: + # Adding default prometheus datasource for grafana + datasources.yaml: | + apiVersion: 1 + datasources: + - access: proxy + editable: false + isDefault: true + name: Prometheus + type: prometheus + url: http://{{ .Values.prometheusName | trimSuffix "/" }}-exp/{{ .Values.prometheusPrefixURL | trimPrefix "/"}} + jsonData: + timeInterval: '1m' +{{- if .Values.plugins }} + plugins: {{ join "," .Values.plugins }} +{{- end }} + grafana.ini: | +{{- range $key, $value := index .Values "grafana.ini" }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} +{{- end }} + [server] + root_url=/{{ include "k10.ingressPath" . | trimSuffix "/"}}/grafana + serve_from_sub_path=true + +{{- if .Values.datasources }} +{{ $root := . }} + {{- range $key, $value := .Values.datasources }} + {{ $key }}: | +{{ tpl (toYaml $value | indent 4) $root }} + {{- end -}} +{{- end -}} + +{{- if .Values.notifiers }} + {{- range $key, $value := .Values.notifiers }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -skf \ + --connect-timeout 60 \ + --max-time 60 \ + {{- if not $value.b64content }} + -H "Accept: application/json" \ + {{- if $value.token }} + -H "Authorization: token {{ $value.token }}" \ + {{- end }} + -H "Content-Type: application/json;charset=UTF-8" \ + {{ end }} + {{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \ + > "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + {{- end -}} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/dashboards-json-configmap.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/dashboards-json-configmap.yaml new file mode 100644 index 000000000..232cd5a5e --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/dashboards-json-configmap.yaml @@ -0,0 +1,37 @@ +{{- if .Values.enabled }} +{{- if .Values.dashboards }} +{{ $files := .Files }} +{{- range $provider, $dashboards := .Values.dashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }} + namespace: {{ template "grafana.namespace" $ }} + labels: + {{- include "grafana.labels" $ | nindent 4 }} + dashboard-provider: {{ $provider }} +{{- if $dashboards }} +data: +{{- $dashboardFound := false }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} +{{- $dashboardFound = true }} +{{ print $key | indent 2 }}.json: +{{- if hasKey $value "json" }} + |- +{{ $value.json | indent 6 }} +{{- end }} +{{- if hasKey $value "file" }} +{{ toYaml ( $files.Get $value.file ) | indent 4}} +{{- end }} +{{- end }} +{{- end }} +{{- if not $dashboardFound }} + {} +{{- end }} +{{- end }} +--- +{{- end }} + +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/deployment.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/deployment.yaml new file mode 100644 index 000000000..21395889a --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/deployment.yaml @@ -0,0 +1,52 @@ +{{- if .Values.enabled }} +{{ if (or (not .Values.global.persistence.enabled) (eq .Values.persistence.type "pvc")) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- with .Values.deploymentStrategy }} + strategy: +{{ toYaml . | trim | indent 4 }} +{{- end }} + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} +{{- with .Values.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} +{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.envRenderSecret }} + checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} +{{- end }} +{{- with .Values.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/headless-service.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/headless-service.yaml new file mode 100644 index 000000000..4715281ab --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/headless-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.enabled }} +{{- if and .Values.global.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }}-headless + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + clusterIP: None + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + type: ClusterIP +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/hpa.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/hpa.yaml new file mode 100644 index 000000000..b4e610c6c --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/hpa.yaml @@ -0,0 +1,22 @@ +{{- if .Values.enabled }} +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app: {{ template "grafana.name" . }} + helm.sh/chart: {{ template "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "grafana.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: +{{ toYaml .Values.autoscaling.metrics | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-deployment.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-deployment.yaml new file mode 100644 index 000000000..5fed1a5f1 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-deployment.yaml @@ -0,0 +1,117 @@ +{{- if .Values.enabled }} +{{ if .Values.imageRenderer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} +{{- if .Values.imageRenderer.labels }} +{{ toYaml .Values.imageRenderer.labels | indent 4 }} +{{- end }} +{{- with .Values.imageRenderer.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.imageRenderer.replicas }} + revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} +{{- with .Values.imageRenderer.deploymentStrategy }} + strategy: +{{ toYaml . | trim | indent 4 }} +{{- end }} + template: + metadata: + labels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} +{{- with .Values.imageRenderer.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- with .Values.imageRenderer.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + + {{- if .Values.imageRenderer.schedulerName }} + schedulerName: "{{ .Values.imageRenderer.schedulerName }}" + {{- end }} + {{- if .Values.imageRenderer.serviceAccountName }} + serviceAccountName: "{{ .Values.imageRenderer.serviceAccountName }}" + {{- end }} + {{- if .Values.imageRenderer.securityContext }} + securityContext: + {{- toYaml .Values.imageRenderer.securityContext | nindent 8 }} + {{- end }} + {{- if .Values.imageRenderer.hostAliases }} + hostAliases: + {{- toYaml .Values.imageRenderer.hostAliases | nindent 8 }} + {{- end }} + {{- if .Values.imageRenderer.priorityClassName }} + priorityClassName: {{ .Values.imageRenderer.priorityClassName }} + {{- end }} + {{- if .Values.imageRenderer.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.imageRenderer.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ .Chart.Name }}-image-renderer + {{- if .Values.imageRenderer.image.sha }} + image: "{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" + {{- else }} + image: "{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} + {{- if .Values.imageRenderer.command }} + command: + {{- range .Values.imageRenderer.command }} + - {{ . }} + {{- end }} + {{- end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + containerPort: {{ .Values.imageRenderer.service.port }} + protocol: TCP + env: + - name: HTTP_PORT + value: {{ .Values.imageRenderer.service.port | quote }} + {{- range $key, $value := .Values.imageRenderer.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + securityContext: + capabilities: + drop: ['all'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: image-renderer-tmpfs + {{- with .Values.imageRenderer.resources }} + resources: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.imageRenderer.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.imageRenderer.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.imageRenderer.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: image-renderer-tmpfs + emptyDir: {} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-network-policy.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-network-policy.yaml new file mode 100644 index 000000000..3730e7eba --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-network-policy.yaml @@ -0,0 +1,78 @@ +{{- if .Values.enabled }} +{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitIngress) }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer-ingress + namespace: {{ template "grafana.namespace" . }} + annotations: + comment: Limit image-renderer ingress traffic from grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- if .Values.imageRenderer.podLabels }} + {{ toYaml .Values.imageRenderer.podLabels | nindent 6 }} + {{- end }} + + policyTypes: + - Ingress + ingress: + - ports: + - port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + from: + - namespaceSelector: + matchLabels: + name: {{ template "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- if .Values.podLabels }} + {{ toYaml .Values.podLabels | nindent 14 }} + {{- end }} +{{ end }} + +{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitEgress) }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer-egress + namespace: {{ template "grafana.namespace" . }} + annotations: + comment: Limit image-renderer egress traffic to grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- if .Values.imageRenderer.podLabels }} + {{ toYaml .Values.imageRenderer.podLabels | nindent 6 }} + {{- end }} + + policyTypes: + - Egress + egress: + # allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # talk only to grafana + - ports: + - port: {{ .Values.service.port }} + protocol: TCP + to: + - namespaceSelector: + matchLabels: + name: {{ template "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- if .Values.podLabels }} + {{ toYaml .Values.podLabels | nindent 14 }} + {{- end }} +{{ end }} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-service.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-service.yaml new file mode 100644 index 000000000..530931327 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/image-renderer-service.yaml @@ -0,0 +1,32 @@ +{{- if .Values.enabled }} +{{ if .Values.imageRenderer.enabled }} +{{ if .Values.imageRenderer.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} +{{- if .Values.imageRenderer.service.labels }} +{{ toYaml .Values.imageRenderer.service.labels | indent 4 }} +{{- end }} +{{- with .Values.imageRenderer.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: ClusterIP + {{- if .Values.imageRenderer.service.clusterIP }} + clusterIP: {{ .Values.imageRenderer.service.clusterIP }} + {{end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + targetPort: {{ .Values.imageRenderer.service.targetPort }} + selector: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} +{{ end }} +{{ end }} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/ingress.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/ingress.yaml new file mode 100644 index 000000000..80dbc798b --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/ingress.yaml @@ -0,0 +1,80 @@ +{{- if .Values.enabled }} +{{- if .Values.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $ingressPathType := .Values.ingress.pathType -}} +{{- $extraPaths := .Values.ingress.extraPaths -}} +apiVersion: {{ include "grafana.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} + {{- if .Values.ingress.annotations }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end -}} +{{- if .Values.ingress.tls }} + tls: +{{ tpl (toYaml .Values.ingress.tls) $ | indent 4 }} +{{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range .Values.ingress.hosts }} + - host: {{ tpl . $}} + http: + paths: +{{- if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + - backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if $ingressPath }} + path: {{ $ingressPath }} + {{- end }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + {{- end -}} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/networkpolicy.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/networkpolicy.yaml new file mode 100644 index 000000000..591ac7286 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/networkpolicy.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +{{ if .Values.service.enabled}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "grafana.name" . }}-network-policy + namespace: {{ template "grafana.namespace" . }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + app: {{ template "grafana.name" . }} + ingress: + - { } + egress: + - { } +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/poddisruptionbudget.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/poddisruptionbudget.yaml new file mode 100644 index 000000000..c1ee81e61 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/poddisruptionbudget.yaml @@ -0,0 +1,24 @@ +{{- if .Values.enabled }} +{{- if .Values.podDisruptionBudget }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} +spec: +{{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/podsecuritypolicy.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..0f4e58942 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/podsecuritypolicy.yaml @@ -0,0 +1,51 @@ +{{- if .Values.enabled }} +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "grafana.fullname" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + {{- if .Values.rbac.pspUseAppArmor }} + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + {{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + # Default set from Docker, with DAC_OVERRIDE and CHOWN + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'csi' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/pvc.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/pvc.yaml new file mode 100644 index 000000000..4389846c7 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/pvc.yaml @@ -0,0 +1,33 @@ +{{- if .Values.enabled }} +{{- if and .Values.global.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.persistence.finalizers }} + finalizers: +{{ toYaml . | indent 4 }} + {{- end }} +spec: + accessModes: + - {{ .Values.global.persistence.accessMode }} + resources: + requests: + storage: {{ default .Values.global.persistence.size .Values.global.persistence.grafana.size | quote }} + {{- if .Values.global.persistence.storageClass }} + storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: +{{ toYaml . | indent 6 }} + {{- end }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/role.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/role.yaml new file mode 100644 index 000000000..ab67f1d5b --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/role.yaml @@ -0,0 +1,34 @@ +{{- if .Values.enabled }} +{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} +apiVersion: {{ template "grafana.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }} +rules: +{{- if .Values.rbac.pspEnabled }} +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "grafana.fullname" . }}] +{{- end }} +{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }} +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} +{{- with .Values.rbac.extraRoleRules }} +{{ toYaml . | indent 0 }} +{{- end}} +{{- else }} +rules: [] +{{- end }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/rolebinding.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/rolebinding.yaml new file mode 100644 index 000000000..bd0bd5dea --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/rolebinding.yaml @@ -0,0 +1,27 @@ +{{- if .Values.enabled }} +{{- if .Values.rbac.create -}} +apiVersion: {{ template "grafana.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not .Values.rbac.useExistingRole) }} + name: {{ template "grafana.fullname" . }} +{{- else }} + name: {{ .Values.rbac.useExistingRole }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end -}} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/secret-env.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/secret-env.yaml new file mode 100644 index 000000000..be272234c --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/secret-env.yaml @@ -0,0 +1,16 @@ +{{- if .Values.enabled }} +{{- if .Values.envRenderSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }}-env + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $val := .Values.envRenderSecret }} + {{ $key }}: {{ $val | b64enc | quote }} +{{- end -}} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/secret.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/secret.yaml new file mode 100644 index 000000000..1bcd865d5 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/secret.yaml @@ -0,0 +1,28 @@ +{{- if .Values.enabled }} +{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +type: Opaque +data: + {{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ template "grafana.password" . }} + {{- end }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} + {{- end }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/service.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/service.yaml new file mode 100644 index 000000000..5f21759c9 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/service.yaml @@ -0,0 +1,58 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} + annotations: + getambassador.io/config: | + --- + apiVersion: ambassador/v1 + kind: Mapping + name: grafana-server-mapping + prefix: /{{- include "k10.ingressPath" . | trimSuffix "/" }}/grafana/ + rewrite: / + service: {{ template "grafana.fullname" .}}:{{ .Values.service.port }} + timeout_ms: 15000 + +spec: +{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} + {{- end -}} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort }} +{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{.Values.service.nodePort}} +{{ end }} + {{- if .Values.extraExposePorts }} + {{- tpl (toYaml .Values.extraExposePorts) . | indent 4 }} + {{- end }} + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/serviceaccount.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/serviceaccount.yaml new file mode 100644 index 000000000..4d178e1b5 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.enabled }} +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/servicemonitor.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/servicemonitor.yaml new file mode 100644 index 000000000..cbe9890d8 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.enabled }} +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "grafana.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- if .Values.serviceMonitor.labels }} + {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- end }} +spec: + endpoints: + - interval: {{ .Values.serviceMonitor.interval }} + {{- if .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} + {{- end }} + honorLabels: true + port: {{ .Values.service.portName }} + path: {{ .Values.serviceMonitor.path }} + scheme: {{ .Values.serviceMonitor.scheme }} + {{- if .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml .Values.serviceMonitor.tlsConfig | nindent 6 }} + {{- end }} + {{- if .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/templates/statefulset.yaml b/charts/k10/k10/4.5.900/charts/grafana/templates/statefulset.yaml new file mode 100644 index 000000000..86f04c1a5 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/templates/statefulset.yaml @@ -0,0 +1,55 @@ +{{- if .Values.enabled }} +{{- if and .Values.global.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + serviceName: {{ template "grafana.fullname" . }}-headless + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} +{{- with .Values.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} +{{- with .Values.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - {{ .Values.global.persistence.accessMode }} + storageClassName: {{ .Values.global.persistence.storageClass }} + resources: + requests: + storage: {{ .Values.global.persistence.size }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: +{{ toYaml . | indent 10 }} + {{- end }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.900/charts/grafana/values.yaml b/charts/k10/k10/4.5.900/charts/grafana/values.yaml new file mode 100644 index 000000000..792113e38 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/grafana/values.yaml @@ -0,0 +1,2701 @@ +# Value to control if grafana installation +enabled: true + +# Values for prometheus datasource +prometheusName: prometheus-server +prometheusPrefixURL: /k10/prometheus + +#general purpose image for init container +ubi: + image: + repository: registry.access.redhat.com/ubi8/ubi-minimal + tag: 8.5-230 + pullPolicy: IfNotPresent + +k10image: + registry: gcr.io + repository: kasten-images + +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-(cluster)role + pspEnabled: true + pspUseAppArmor: true + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + autoMount: true + +replicas: 1 + +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false +# minReplicas: 1 +# maxReplicas: 10 +# metrics: +# - type: Resource +# resource: +# name: cpu +# targetAverageUtilization: 60 +# - type: Resource +# resource: +# name: memory +# targetAverageUtilization: 60 + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: Recreate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + repository: grafana/grafana + tag: 8.1.0 + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +testFramework: + enabled: false + image: "bats/bats" + tag: "v1.1.0" + imagePullPolicy: IfNotPresent + securityContext: {} + +securityContext: + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: + {} + +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + repository: curlimages/curl + tag: 7.73.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana + +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## + +service: + enabled: true + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + annotations: {} + labels: {} + portName: service + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + # type: ClusterIP + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + + # pathType is only for k8s >= 1.1= + pathType: Prefix + + hosts: + - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: true + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 5Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + # subPath: "" + # existingClaim: + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the prometheus-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## +# image: +# repository: busybox +# tag: "1.31.1" +# sha: "" +# pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc +envRenderSecret: {} + +# Inject Kubernetes services as environment variables. +# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables +enableServiceLinks: true + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume-0 + # mountPath: /mnt/volume0 + # readOnly: true + # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +#datasources: +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: prometheus-server-exp/k10/prometheus +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: default +# defaultRegion: us-east-1 + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: false + options: + path: /var/lib/grafana/dashboards + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: + default: + default: + json: | + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 1, + "iteration": 1634946056135, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 18, + "panels": [], + "title": "Applications", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "yellow", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 0, + "y": 1 + }, + "id": 24, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Backups Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 5, + "y": 1 + }, + "id": 33, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Backups Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 8, + "y": 1 + }, + "id": 34, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Backups Skipped", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 13, + "y": 1 + }, + "id": 35, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Restores Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 18, + "y": 1 + }, + "id": 36, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Restores Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 21, + "y": 1 + }, + "id": 23, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Restores Skipped", + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 16, + "panels": [], + "title": "Cluster", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "yellow", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 0, + "y": 9 + }, + "id": 10, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_cluster_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Backups Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 5, + "y": 9 + }, + "id": 19, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_cluster_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Backups Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 8, + "y": 9 + }, + "id": 28, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_cluster_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Backups Skipped", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 13, + "y": 9 + }, + "id": 21, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_cluster_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Restores Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 18, + "y": 9 + }, + "id": 22, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_cluster_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Restores Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 21, + "y": 9 + }, + "id": 25, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_cluster_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Restores Skipped", + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 31, + "panels": [], + "title": "Backup Exports", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 17 + }, + "id": 38, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_export_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Exports Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 5, + "y": 17 + }, + "id": 29, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_export_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Exports Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 8, + "y": 17 + }, + "id": 20, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_export_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Exports Skipped", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 13, + "y": 17 + }, + "id": 27, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_import_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Imports Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 18, + "y": 17 + }, + "id": 39, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_import_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Imports Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 21, + "y": 17 + }, + "id": 37, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_import_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Imports Skipped", + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 23 + }, + "id": 14, + "panels": [], + "title": "System", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + }, + "unit": "runs" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 0, + "y": 24 + }, + "id": 12, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_run_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "format": "time_series", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "title": "Policy Runs", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "yellow", + "value": 1 + } + ] + }, + "unit": "runs" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 3, + "y": 24 + }, + "id": 40, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_run_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "format": "time_series", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "title": "Policy Runs Skipped", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#ccccdc", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 6, + "y": 24 + }, + "id": 6, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": true, + "expr": "catalog_persistent_volume_disk_space_used_bytes{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Catalog Volume Used", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 70 + }, + { + "color": "orange", + "value": 80 + }, + { + "color": "red", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 9, + "y": 24 + }, + "id": 2, + "options": { + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": true, + "expr": "100-catalog_persistent_volume_free_space_percent{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Catalog Volume Used Space", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#ccccdc", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 12, + "y": 24 + }, + "id": 8, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": true, + "expr": "jobs_persistent_volume_disk_space_used_bytes{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Jobs Volume Used", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 70 + }, + { + "color": "orange", + "value": 80 + }, + { + "color": "red", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 15, + "y": 24 + }, + "id": 4, + "options": { + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": true, + "expr": "100-jobs_persistent_volume_free_space_percent{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Jobs Volume Used Space", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#ccccdc", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 18, + "y": 24 + }, + "id": 7, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": true, + "expr": "logging_persistent_volume_disk_space_used_bytes{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Logging Volume Used", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 70 + }, + { + "color": "orange", + "value": 80 + }, + { + "color": "red", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 21, + "y": 24 + }, + "id": 3, + "options": { + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.0", + "targets": [ + { + "exemplar": true, + "expr": "100-logging_persistent_volume_free_space_percent{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Logging Volume Used Space", + "type": "gauge" + } + ], + "schemaVersion": 30, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "description": null, + "error": null, + "hide": 2, + "label": "Cluster", + "name": "cluster", + "query": "", + "skipUrlSync": false, + "type": "constant" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "K10 Dashboard", + "uid": "8Ebb3xS7k", + "version": 1 + } + + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + dashboards: + default_home_dashboard_path: /var/lib/grafana/dashboards/default/default.json +## grafana Authentication can be enabled with the following values on grafana.ini +# server: +# # The full public facing url you use in browser, used for redirects and emails +## domain: +# root_url: /k10/grafana +# serve_from_sub_path: true + + auth: + disable_login_form: true + disable_signout_menu: true + + auth.basic: + enabled: false + + auth.anonymous: + enabled: true + org_name: Main Org. + org_role: Admin + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + repository: quay.io/kiwigrid/k8s-sidecar + tag: 1.12.2 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + dashboards: + enabled: false + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: null + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # If specified, the sidecar will search for dashboard config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # search in configmap, secret or both + resource: both + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + datasources: + enabled: false + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: null + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # search in configmap, secret or both + resource: both + notifiers: + enabled: false + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # search in configmap, secret or both + resource: both + +## Override the deployment namespace +## +namespaceOverride: "" + +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + image: + # image-renderer Image repository + repository: grafana/grafana-image-renderer + # image-renderer Image tag + tag: latest + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # Enable the image-renderer service + enabled: true + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi diff --git a/charts/k10/k10/4.5.900/charts/prometheus/Chart.yaml b/charts/k10/k10/4.5.900/charts/prometheus/Chart.yaml new file mode 100644 index 000000000..3aa2d8141 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/Chart.yaml @@ -0,0 +1,30 @@ +apiVersion: v2 +appVersion: 2.26.0 +dependencies: +- condition: kubeStateMetrics.enabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 3.4.* +description: Prometheus is a monitoring system and time series database. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +maintainers: +- email: gianrubio@gmail.com + name: gianrubio +- email: zanhsieh@gmail.com + name: zanhsieh +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: monotek23@gmail.com + name: monotek +- email: naseem@transit.app + name: naseemkullah +name: prometheus +sources: +- https://github.com/prometheus/alertmanager +- https://github.com/prometheus/prometheus +- https://github.com/prometheus/pushgateway +- https://github.com/prometheus/node_exporter +- https://github.com/kubernetes/kube-state-metrics +type: application +version: 14.6.0 diff --git a/charts/k10/k10/4.5.900/charts/prometheus/README.md b/charts/k10/k10/4.5.900/charts/prometheus/README.md new file mode 100644 index 000000000..25f27f3f6 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/README.md @@ -0,0 +1,224 @@ +# Prometheus + +[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. + +This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.16+ +- Helm 3+ + +## Get Repo Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo add kube-state-metrics https://kubernetes.github.io/kube-state-metrics +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +# Helm +$ helm install [RELEASE_NAME] prometheus-community/prometheus +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Dependencies + +By default this chart installs additional, dependent charts: + +- [stable/kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) + +To disable the dependency during installation, set `kubeStateMetrics.enabled` to `false`. + +_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ + +## Uninstall Chart + +```console +# Helm +$ helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +# Helm +$ helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### To 9.0 + +Version 9.0 adds a new option to enable or disable the Prometheus Server. This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. To install the server `server.enabled` must be set to `true`. + +### To 5.0 + +As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. + +Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/). + +Users of this chart will need to update their alerting rules to the new format before they can upgrade. + +### Example Migration + +Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: + +1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: + + ```yaml + alertmanager: + enabled: false + alertmanagerFiles: + alertmanager.yml: "" + kubeStateMetrics: + enabled: false + nodeExporter: + enabled: false + pushgateway: + enabled: false + server: + extraArgs: + storage.local.retention: 720h + serverFiles: + alerts: "" + prometheus.yml: "" + rules: "" + ``` + +1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. + + ```yaml + prometheus.yml: + ... + remote_read: + - url: http://prometheus-old/api/v1/read + ... + ``` + + Old data will be available when you query the new prometheus instance. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +# Helm 2 +$ helm inspect values prometheus-community/prometheus + +# Helm 3 +$ helm show values prometheus-community/prometheus +``` + +You may similarly use the above configuration commands on each chart [dependency](#dependencies) to see it's configurations. + +### Scraping Pod Metrics via Annotations + +This chart uses a default configuration that causes prometheus to scrape a variety of kubernetes resource types, provided they have the correct annotations. In this section we describe how to configure pods to be scraped; for information on how other resource types can be scraped you can do a `helm template` to get the kubernetes resource definitions, and then reference the prometheus configuration in the ConfigMap against the prometheus documentation for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). + +In order to get prometheus to scrape pods, you must add annotations to the the pods as below: + +```yaml +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "8080" +``` + +You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. `prometheus.io/port` should be set to the port that your pod serves metrics from. Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be enclosed in double quotes. + +### Sharing Alerts Between Services + +Note that when [installing](#install-chart) or [upgrading](#upgrading-chart) you may use multiple values override files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, + +```yaml +# values.yaml +# ... + +# service1-alert.yaml +serverFiles: + alerts: + service1: + - alert: anAlert + # ... + +# service2-alert.yaml +serverFiles: + alerts: + service2: + - alert: anAlert + # ... +``` + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus -f values.yaml -f service1-alert.yaml -f service2-alert.yaml +``` + +### RBAC Configuration + +Roles and RoleBindings resources will be created automatically for `server` service. + +To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. + +> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. + +### ConfigMap Files + +AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. + +Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. + +### Ingress TLS + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```console +kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: + +```yaml +server: + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: prometheus-server-tls + hosts: + - prometheus.domain.com +``` + +### NetworkPolicy + +Enabling Network Policy for Prometheus will secure connections to Alert Manager and Kube State Metrics by only accepting connections from Prometheus Server. All inbound connections to Prometheus Server are still allowed. + +To enable network policy for Prometheus, install a networking plugin that implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. + +If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need to manually create a networkpolicy which allows it. diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/NOTES.txt b/charts/k10/k10/4.5.900/charts/prometheus/templates/NOTES.txt new file mode 100644 index 000000000..0e8868f0b --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/NOTES.txt @@ -0,0 +1,112 @@ +{{- if .Values.server.enabled -}} +The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.server.ingress.enabled -}} +From outside the cluster, the server URL(s) are: +{{- range .Values.server.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Prometheus server URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 +{{- end }} +{{- end }} + +{{- if .Values.server.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Server pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{ if .Values.alertmanager.enabled }} +The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.alertmanager.ingress.enabled -}} +From outside the cluster, the alertmanager URL(s) are: +{{- range .Values.alertmanager.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Alertmanager URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.alertmanager.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} +{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} + +{{- if .Values.alertmanager.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the AlertManager pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{- if .Values.nodeExporter.podSecurityPolicy.enabled }} +{{- else }} +################################################################################# +###### WARNING: Pod Security Policy has been moved to a global property. ##### +###### use .Values.podSecurityPolicy.enabled with pod-based ##### +###### annotations ##### +###### (e.g. .Values.nodeExporter.podSecurityPolicy.annotations) ##### +################################################################################# +{{- end }} + +{{ if .Values.pushgateway.enabled }} +The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.pushgateway.ingress.enabled -}} +From outside the cluster, the pushgateway URL(s) are: +{{- range .Values.pushgateway.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the PushGateway URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.pushgateway.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }} +{{- else if contains "ClusterIP" .Values.pushgateway.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 +{{- end }} +{{- end }} +{{- end }} + +For more information on running Prometheus, visit: +https://prometheus.io/ diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/_definitions.tpl b/charts/k10/k10/4.5.900/charts/prometheus/templates/_definitions.tpl new file mode 100644 index 000000000..d93364c7f --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/_definitions.tpl @@ -0,0 +1,3 @@ +{{/* Autogenerated, do NOT modify */}} +{{- define "k10.prometheusImageTag" -}}v2.26.0{{- end -}} +{{- define "k10.prometheusConfigMapReloaderImageTag" -}}v0.5.0{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/_helpers.tpl b/charts/k10/k10/4.5.900/charts/prometheus/templates/_helpers.tpl new file mode 100644 index 000000000..287ed192a --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/_helpers.tpl @@ -0,0 +1,400 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create unified labels for prometheus components +*/}} +{{- define "prometheus.common.matchLabels" -}} +app: {{ template "prometheus.name" . }} +release: {{ .Release.Name }} +{{- end -}} + +{{- define "prometheus.common.metaLabels" -}} +chart: {{ template "prometheus.chart" . }} +heritage: {{ .Release.Service }} +{{- end -}} + +{{- define "prometheus.alertmanager.labels" -}} +{{ include "prometheus.alertmanager.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.alertmanager.matchLabels" -}} +component: {{ .Values.alertmanager.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.labels" -}} +{{ include "prometheus.nodeExporter.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.matchLabels" -}} +component: {{ .Values.nodeExporter.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.labels" -}} +{{ include "prometheus.pushgateway.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.matchLabels" -}} +component: {{ .Values.pushgateway.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.server.labels" -}} +{{ include "prometheus.server.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.server.matchLabels" -}} +component: {{ .Values.server.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Figure out the config based on +the value of airgapped.repository +*/}} +{{- define "get.cmreloadimage" }} +{{- if not .Values.global.rhMarketPlace }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/configmap-reload:%s" .Values.global.airgapped.repository (include "get.cmReloadImageTag" .) }} +{{- else }} +{{- printf "%s:%s" (include "get.cmReloadImageRepo" .) (include "get.cmReloadImageTag" .) }} +{{- end }} +{{- else }} +{{- printf "%s" (get .Values.global.images "configmap-reload") }} +{{- end -}} +{{- end }} + +{{/* +Figure out the config based on +the value of airgapped.repository +*/}} +{{- define "get.serverimage" }} +{{- if not .Values.global.rhMarketPlace }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/prometheus:%s" .Values.global.airgapped.repository (include "get.promImageTag" .) }} +{{- else }} +{{- printf "%s:%s" (include "get.promImageRepo" .) (include "get.promImageTag" .) }} +{{- end }} +{{- else }} +{{- printf "%s" (get .Values.global.images "prometheus") }} +{{- end -}} +{{- end }} + + +{{/* +Figure out the configmap-reload image tag +based on the value of global.upstreamCertifiedImages +*/}} +{{- define "get.cmReloadImageTag"}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s-rh-ubi" (include "k10.prometheusConfigMapReloaderImageTag" .) }} +{{- else }} +{{- printf "%s-rh-ubi" (include "k10.prometheusConfigMapReloaderImageTag" .) }} +{{- end }} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.prometheusConfigMapReloaderImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.prometheusConfigMapReloaderImageTag" .) }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Figure out the prometheus image tag +based on the value of global.upstreamCertifiedImages +*/}} +{{- define "get.promImageTag"}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s-rh-ubi" (include "k10.prometheusImageTag" .) }} +{{- else }} +{{- printf "%s-rh-ubi" (include "k10.prometheusImageTag" .) }} +{{- end }} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.prometheusImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.prometheusImageTag" .) }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Figure out the configmap-reload image repo +based on the value of global.upstreamCertifiedImages +*/}} +{{- define "get.cmReloadImageRepo" }} +{{- if .Values.global.upstreamCertifiedImages }} +{{- printf "%s/%s/configmap-reload" .Values.k10image.registry .Values.k10image.repository }} +{{- else }} +{{- print .Values.configmapReload.prometheus.image.repository }} +{{- end }} +{{- end }} + +{{/* +Figure out the prom image repo +based on the value of global.upstreamCertifiedImages +*/}} +{{- define "get.promImageRepo" }} +{{- if .Values.global.upstreamCertifiedImages }} +{{- printf "%s/%s/prometheus" .Values.k10image.registry .Values.k10image.repository }} +{{- else }} +{{- print .Values.server.image.repository }} +{{- end }} +{{- end }} + +{{/* +Create a fully qualified alertmanager name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} + +{{- define "prometheus.alertmanager.fullname" -}} +{{- if .Values.alertmanager.fullnameOverride -}} +{{- .Values.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified node-exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.nodeExporter.fullname" -}} +{{- if .Values.nodeExporter.fullnameOverride -}} +{{- .Values.nodeExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.fullname" -}} +{{- if .Values.server.fullnameOverride -}} +{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server clusterrole name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.clusterrolefullname" -}} +{{- if .Values.server.clusterRoleNameOverride -}} +{{- .Values.server.clusterRoleNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- if .Values.server.fullnameOverride -}} +{{- printf "%s-%s" .Release.Name .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified pushgateway name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.pushgateway.fullname" -}} +{{- if .Values.pushgateway.fullnameOverride -}} +{{- .Values.pushgateway.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Get KubeVersion removing pre-release information. +*/}} +{{- define "prometheus.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "prometheus.deployment.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "prometheus.daemonset.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus.networkPolicy.apiVersion" -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "prometheus.podSecurityPolicy.apiVersion" -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "prometheus.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return if ingress is stable. +*/}} +{{- define "ingress.isStable" -}} + {{- eq (include "ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "ingress.supportsIngressClassName" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} +{{/* +Return if ingress supports pathType. +*/}} +{{- define "ingress.supportsPathType" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the alertmanager component +*/}} +{{- define "prometheus.serviceAccountName.alertmanager" -}} +{{- if .Values.serviceAccounts.alertmanager.create -}} + {{ default (include "prometheus.alertmanager.fullname" .) .Values.serviceAccounts.alertmanager.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.alertmanager.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the nodeExporter component +*/}} +{{- define "prometheus.serviceAccountName.nodeExporter" -}} +{{- if .Values.serviceAccounts.nodeExporter.create -}} + {{ default (include "prometheus.nodeExporter.fullname" .) .Values.serviceAccounts.nodeExporter.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.nodeExporter.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the pushgateway component +*/}} +{{- define "prometheus.serviceAccountName.pushgateway" -}} +{{- if .Values.serviceAccounts.pushgateway.create -}} + {{ default (include "prometheus.pushgateway.fullname" .) .Values.serviceAccounts.pushgateway.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.pushgateway.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the server component +*/}} +{{- define "prometheus.serviceAccountName.server" -}} +{{- if .Values.serviceAccounts.server.create -}} + {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.server.name }} +{{- end -}} +{{- end -}} + +{{/* +Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set +*/}} +{{- define "prometheus.namespace" -}} +{{- if .Values.forceNamespace -}} +{{ printf "namespace: %s" .Values.forceNamespace }} +{{- else -}} +{{ printf "namespace: %s" .Release.Namespace }} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/clusterrole.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/clusterrole.yaml new file mode 100644 index 000000000..c732ff4e5 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.alertmanager.useClusterRole (not .Values.alertmanager.useExistingRole) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.alertmanager.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml new file mode 100644 index 000000000..6f13e98b5 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.alertmanager.useClusterRole -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if (not .Values.alertmanager.useExistingRole) }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{- else }} + name: {{ .Values.alertmanager.useExistingRole }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/cm.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/cm.yaml new file mode 100644 index 000000000..cb09bf067 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/cm.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.alertmanager.enabled (and (empty .Values.alertmanager.configMapOverrideName) (empty .Values.alertmanager.configFromSecret)) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.alertmanagerFiles }} + {{- if $key | regexMatch ".*\\.ya?ml$" }} + {{ $key }}: | +{{ toYaml $value | default "{}" | indent 4 }} + {{- else }} + {{ $key }}: {{ toYaml $value | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/deploy.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/deploy.yaml new file mode 100644 index 000000000..fe6e9b9ac --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/deploy.yaml @@ -0,0 +1,161 @@ +{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.statefulSet.enabled) -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.alertmanager.deploymentAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + {{- if .Values.alertmanager.strategy }} + strategy: +{{ toYaml .Values.alertmanager.strategy | trim | indent 4 }} + {{ if eq .Values.alertmanager.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + {{- if .Values.alertmanager.podLabels}} + {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} + {{- if .Values.alertmanager.extraInitContainers }} + initContainers: +{{ toYaml .Values.alertmanager.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/{{ .Values.alertmanager.configFileName }} + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + - --cluster.advertise-address=[$(POD_IP)]:6783 + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/-/ready + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + + {{- if .Values.configmapReload.alertmanager.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} + image: "{{ include "get.cmreloadimage" .}}" + imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9093{{ .Values.alertmanager.prefixURL }}/-/reload + resources: +{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.alertmanager.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} + {{- end }} + volumes: + - name: config-volume + {{- if empty .Values.alertmanager.configFromSecret }} + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.alertmanager.configFromSecret }} + {{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + - name: storage-volume + {{- if .Values.alertmanager.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.alertmanager.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.alertmanager.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/headless-svc.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/headless-svc.yaml new file mode 100644 index 000000000..8c402c408 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/headless-svc.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.alertmanager.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.statefulSet.headless.labels }} +{{ toYaml .Values.alertmanager.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }}-headless +{{ include "prometheus.namespace" . | indent 2 }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.alertmanager.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9093 +{{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/ingress.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/ingress.yaml new file mode 100644 index 000000000..6e856360b --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/ingress.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.alertmanager.fullname" . }} +{{- $servicePort := .Values.alertmanager.service.servicePort -}} +{{- $ingressPath := .Values.alertmanager.ingress.path -}} +{{- $ingressPathType := .Values.alertmanager.ingress.pathType -}} +{{- $extraPaths := .Values.alertmanager.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.alertmanager.ingress.annotations }} + annotations: +{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- range $key, $value := .Values.alertmanager.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.alertmanager.ingress.ingressClassName }} + ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.alertmanager.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.alertmanager.ingress.tls }} + tls: +{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/netpol.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/netpol.yaml new file mode 100644 index 000000000..e44ade60e --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/netpol.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.alertmanager.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9093 +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/pdb.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/pdb.yaml new file mode 100644 index 000000000..41a92f364 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.alertmanager.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.alertmanager.labels" . | nindent 6 }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/psp.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/psp.yaml new file mode 100644 index 000000000..64fb13003 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/psp.yaml @@ -0,0 +1,46 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + annotations: +{{- if .Values.alertmanager.podSecurityPolicy.annotations }} +{{ toYaml .Values.alertmanager.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.alertmanager.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/pvc.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/pvc.yaml new file mode 100644 index 000000000..28774d0e0 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/pvc.yaml @@ -0,0 +1,39 @@ +{{- if not .Values.alertmanager.statefulSet.enabled -}} +{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}} +{{- if not .Values.alertmanager.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }} +{{- if .Values.alertmanager.persistentVolume.storageClass }} + {{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" + {{- end }} +{{- else if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} +{{- end }} +{{- if .Values.alertmanager.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.alertmanager.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/role.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/role.yaml new file mode 100644 index 000000000..ce60eaf0a --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create (eq .Values.alertmanager.useClusterRole false) (not .Values.alertmanager.useExistingRole) -}} +{{- range $.Values.alertmanager.namespaces }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: Role +metadata: + labels: + {{- include "prometheus.alertmanager.labels" $ | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" $ }} + namespace: {{ . }} +rules: +{{- if $.Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.alertmanager.fullname" $ }} +{{- else }} + [] +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/rolebinding.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/rolebinding.yaml new file mode 100644 index 000000000..906d6522d --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create (eq .Values.alertmanager.useClusterRole false) -}} +{{ range $.Values.alertmanager.namespaces }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.alertmanager.labels" $ | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.alertmanager" $ }} +{{ include "prometheus.namespace" $ | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not $.Values.alertmanager.useExistingRole) }} + name: {{ template "prometheus.alertmanager.fullname" $ }} +{{- else }} + name: {{ $.Values.alertmanager.useExistingRole }} +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/service.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/service.yaml new file mode 100644 index 000000000..9edc9ac65 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/service.yaml @@ -0,0 +1,53 @@ +{{- if .Values.alertmanager.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.service.annotations }} + annotations: +{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.service.labels }} +{{ toYaml .Values.alertmanager.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.alertmanager.service.clusterIP }} + clusterIP: {{ .Values.alertmanager.service.clusterIP }} +{{- end }} +{{- if .Values.alertmanager.service.externalIPs }} + externalIPs: +{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.alertmanager.service.servicePort }} + protocol: TCP + targetPort: 9093 + {{- if .Values.alertmanager.service.nodePort }} + nodePort: {{ .Values.alertmanager.service.nodePort }} + {{- end }} +{{- if .Values.alertmanager.service.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- if .Values.alertmanager.service.sessionAffinity }} + sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }} +{{- end }} + type: "{{ .Values.alertmanager.service.type }}" +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/serviceaccount.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/serviceaccount.yaml new file mode 100644 index 000000000..a5d996a85 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.alertmanager.enabled .Values.serviceAccounts.alertmanager.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.alertmanager.annotations | indent 4 }} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/sts.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/sts.yaml new file mode 100644 index 000000000..95bbfe6c8 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/alertmanager/sts.yaml @@ -0,0 +1,187 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.alertmanager.statefulSet.annotations }} + annotations: + {{ toYaml .Values.alertmanager.statefulSet.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + {{- if .Values.alertmanager.statefulSet.labels}} + {{ toYaml .Values.alertmanager.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + serviceName: {{ template "prometheus.alertmanager.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + podManagementPolicy: {{ .Values.alertmanager.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + {{- if .Values.alertmanager.podLabels}} + {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} +{{- end }} +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/alertmanager.yml + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - --cluster.advertise-address=[$(POD_IP)]:6783 + - --cluster.listen-address=0.0.0.0:6783 + {{- range $n := until (.Values.alertmanager.replicaCount | int) }} + - --cluster.peer={{ template "prometheus.alertmanager.fullname" $ }}-{{ $n }}.{{ template "prometheus.alertmanager.fullname" $ }}-headless:6783 + {{- end }} + {{- else }} + - --cluster.listen-address= + {{- end }} + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - containerPort: 6783 + {{- end }} + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/#/status + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.configmapReload.alertmanager.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} + image: "{{ include "get.cmreloadimage" .}}" + imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload + resources: +{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + volumes: + - name: config-volume + {{- if empty .Values.alertmanager.configFromSecret }} + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.alertmanager.configFromSecret }} + {{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.alertmanager.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" + {{- if .Values.alertmanager.persistentVolume.storageClass }} + {{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" + {{- end }} + {{- else if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: + {{- if .Values.alertmanager.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.alertmanager.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/daemonset.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/daemonset.yaml new file mode 100644 index 000000000..667be9f49 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/daemonset.yaml @@ -0,0 +1,146 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: {{ template "prometheus.daemonset.apiVersion" . }} +kind: DaemonSet +metadata: +{{- if .Values.nodeExporter.deploymentAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 6 }} + {{- if .Values.nodeExporter.updateStrategy }} + updateStrategy: +{{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.nodeExporter.podAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 8 }} +{{- if .Values.nodeExporter.pod.labels }} +{{ toYaml .Values.nodeExporter.pod.labels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }} + {{- if .Values.nodeExporter.extraInitContainers }} + initContainers: +{{ toYaml .Values.nodeExporter.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.nodeExporter.priorityClassName }} + priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }} + image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}" + imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}" + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + {{- if .Values.nodeExporter.hostRootfs }} + - --path.rootfs=/host/root + {{- end }} + {{- if .Values.nodeExporter.hostNetwork }} + - --web.listen-address=:{{ .Values.nodeExporter.service.hostPort }} + {{- end }} + {{- range $key, $value := .Values.nodeExporter.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + ports: + - name: metrics + {{- if .Values.nodeExporter.hostNetwork }} + containerPort: {{ .Values.nodeExporter.service.hostPort }} + {{- else }} + containerPort: 9100 + {{- end }} + hostPort: {{ .Values.nodeExporter.service.hostPort }} + resources: +{{ toYaml .Values.nodeExporter.resources | indent 12 }} + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + {{- if .Values.nodeExporter.hostRootfs }} + - name: root + mountPath: /host/root + mountPropagation: HostToContainer + readOnly: true + {{- end }} + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- if .mountPropagation }} + mountPropagation: {{ .mountPropagation }} + {{- end }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.hostNetwork }} + hostNetwork: true + {{- end }} + {{- if .Values.nodeExporter.hostPID }} + hostPID: true + {{- end }} + {{- if .Values.nodeExporter.tolerations }} + tolerations: +{{ toYaml .Values.nodeExporter.tolerations | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.nodeExporter.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.securityContext }} + securityContext: +{{ toYaml .Values.nodeExporter.securityContext | indent 8 }} + {{- end }} + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- if .Values.nodeExporter.hostRootfs }} + - name: root + hostPath: + path: / + {{- end }} + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/psp.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/psp.yaml new file mode 100644 index 000000000..bd9c73bee --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/psp.yaml @@ -0,0 +1,55 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + annotations: +{{- if .Values.nodeExporter.podSecurityPolicy.annotations }} +{{ toYaml .Values.nodeExporter.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'hostPath' + - 'secret' + allowedHostPaths: + - pathPrefix: /proc + readOnly: true + - pathPrefix: /sys + readOnly: true + - pathPrefix: / + readOnly: true + {{- range .Values.nodeExporter.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: {{ .Values.nodeExporter.hostNetwork }} + hostPID: {{ .Values.nodeExporter.hostPID }} + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + hostPorts: + - min: 1 + max: 65535 +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/role.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/role.yaml new file mode 100644 index 000000000..d8ef3ed90 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/role.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if or (default .Values.nodeExporter.podSecurityPolicy.enabled false) (.Values.podSecurityPolicy.enabled) }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{ include "prometheus.namespace" . | indent 2 }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "prometheus.nodeExporter.fullname" . }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/rolebinding.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/rolebinding.yaml new file mode 100644 index 000000000..06914b70a --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{ include "prometheus.namespace" . | indent 2 }} +roleRef: + kind: Role + name: {{ template "prometheus.nodeExporter.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{ include "prometheus.namespace" . | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/serviceaccount.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/serviceaccount.yaml new file mode 100644 index 000000000..0cf91afba --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.nodeExporter.enabled .Values.serviceAccounts.nodeExporter.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.nodeExporter.annotations | indent 4 }} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/svc.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/svc.yaml new file mode 100644 index 000000000..26d1eaa21 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/node-exporter/svc.yaml @@ -0,0 +1,47 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.nodeExporter.service.annotations }} + annotations: +{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{- if .Values.nodeExporter.service.labels }} +{{ toYaml .Values.nodeExporter.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.nodeExporter.service.clusterIP }} + clusterIP: {{ .Values.nodeExporter.service.clusterIP }} +{{- end }} +{{- if .Values.nodeExporter.service.externalIPs }} + externalIPs: +{{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: metrics + {{- if .Values.nodeExporter.hostNetwork }} + port: {{ .Values.nodeExporter.service.hostPort }} + protocol: TCP + targetPort: {{ .Values.nodeExporter.service.hostPort }} + {{- else }} + port: {{ .Values.nodeExporter.service.servicePort }} + protocol: TCP + targetPort: 9100 + {{- end }} + selector: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 4 }} + type: "{{ .Values.nodeExporter.service.type }}" +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/clusterrole.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/clusterrole.yaml new file mode 100644 index 000000000..76ecf053f --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.pushgateway.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml new file mode 100644 index 000000000..15770ee50 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.pushgateway.fullname" . }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/deploy.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/deploy.yaml new file mode 100644 index 000000000..ffdbfcc42 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/deploy.yaml @@ -0,0 +1,119 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.pushgateway.deploymentAnnotations }} + annotations: + {{ toYaml .Values.pushgateway.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + replicas: {{ .Values.pushgateway.replicaCount }} + {{- if .Values.pushgateway.strategy }} + strategy: +{{ toYaml .Values.pushgateway.strategy | trim | indent 4 }} + {{ if eq .Values.pushgateway.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.pushgateway.podAnnotations }} + annotations: + {{ toYaml .Values.pushgateway.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 8 }} + {{- if .Values.pushgateway.podLabels }} + {{ toYaml .Values.pushgateway.podLabels | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }} + {{- if .Values.pushgateway.extraInitContainers }} + initContainers: +{{ toYaml .Values.pushgateway.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.pushgateway.priorityClassName }} + priorityClassName: "{{ .Values.pushgateway.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }} + image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}" + imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.pushgateway.extraArgs }} + {{- $stringvalue := toString $value }} + {{- if eq $stringvalue "true" }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + ports: + - containerPort: 9091 + livenessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/healthy + {{- else }} + path: /-/healthy + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + readinessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/ready + {{- else }} + path: /-/ready + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + resources: +{{ toYaml .Values.pushgateway.resources | indent 12 }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.pushgateway.persistentVolume.mountPath }}" + subPath: "{{ .Values.pushgateway.persistentVolume.subPath }}" + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.nodeSelector }} + nodeSelector: +{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.pushgateway.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.securityContext }} + securityContext: +{{ toYaml .Values.pushgateway.securityContext | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.tolerations }} + tolerations: +{{ toYaml .Values.pushgateway.tolerations | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.affinity }} + affinity: +{{ toYaml .Values.pushgateway.affinity | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumes: + - name: storage-volume + persistentVolumeClaim: + claimName: {{ if .Values.pushgateway.persistentVolume.existingClaim }}{{ .Values.pushgateway.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.pushgateway.fullname" . }}{{- end }} + {{- end -}} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/ingress.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/ingress.yaml new file mode 100644 index 000000000..5f176aed4 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/ingress.yaml @@ -0,0 +1,54 @@ +{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.pushgateway.fullname" . }} +{{- $servicePort := .Values.pushgateway.service.servicePort -}} +{{- $ingressPath := .Values.pushgateway.ingress.path -}} +{{- $ingressPathType := .Values.pushgateway.ingress.pathType -}} +{{- $extraPaths := .Values.pushgateway.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.pushgateway.ingress.annotations }} + annotations: +{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.pushgateway.ingress.ingressClassName }} + ingressClassName: {{ .Values.pushgateway.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.pushgateway.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.pushgateway.ingress.tls }} + tls: +{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/netpol.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/netpol.yaml new file mode 100644 index 000000000..c8d1fb37e --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/netpol.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.pushgateway.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9091 +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/pdb.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/pdb.yaml new file mode 100644 index 000000000..50beb486d --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.pushgateway.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.pushgateway.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.pushgateway.labels" . | nindent 6 }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/psp.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/psp.yaml new file mode 100644 index 000000000..1ca3267f8 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/psp.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + annotations: +{{- if .Values.pushgateway.podSecurityPolicy.annotations }} +{{ toYaml .Values.pushgateway.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'persistentVolumeClaim' + - 'secret' + allowedHostPaths: + - pathPrefix: {{ .Values.pushgateway.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/pvc.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/pvc.yaml new file mode 100644 index 000000000..908f4e2f2 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/pvc.yaml @@ -0,0 +1,37 @@ +{{- if .Values.pushgateway.persistentVolume.enabled -}} +{{- if not .Values.pushgateway.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.pushgateway.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.pushgateway.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.pushgateway.persistentVolume.accessModes | indent 4 }} +{{- if .Values.pushgateway.persistentVolume.storageClass }} + {{- if (eq "-" .Values.pushgateway.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.pushgateway.persistentVolume.storageClass }}" + {{- end }} +{{- else if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} +{{- end }} +{{- if .Values.pushgateway.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.pushgateway.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.pushgateway.persistentVolume.size }}" +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/service.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/service.yaml new file mode 100644 index 000000000..f05f17c42 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.pushgateway.service.annotations }} + annotations: +{{ toYaml .Values.pushgateway.service.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +{{- if .Values.pushgateway.service.labels }} +{{ toYaml .Values.pushgateway.service.labels | indent 4}} +{{- end }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.pushgateway.service.clusterIP }} + clusterIP: {{ .Values.pushgateway.service.clusterIP }} +{{- end }} +{{- if .Values.pushgateway.service.externalIPs }} + externalIPs: +{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.pushgateway.service.servicePort }} + protocol: TCP + targetPort: 9091 + selector: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 4 }} + type: "{{ .Values.pushgateway.service.type }}" +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/serviceaccount.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/serviceaccount.yaml new file mode 100644 index 000000000..8c0b876f3 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/pushgateway/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.pushgateway.enabled .Values.serviceAccounts.pushgateway.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.pushgateway.annotations | indent 4 }} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/clusterrole.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/clusterrole.yaml new file mode 100644 index 000000000..539c56304 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/clusterrole.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.server.enabled .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.clusterrolefullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.server.fullname" . }} +{{- end }} + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - ingresses + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses/status + - ingresses + verbs: + - get + - list + - watch + - nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/clusterrolebinding.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/clusterrolebinding.yaml new file mode 100644 index 000000000..3c42e5827 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.server.enabled .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.clusterrolefullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.server.clusterrolefullname" . }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/cm.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/cm.yaml new file mode 100644 index 000000000..e012694fc --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/cm.yaml @@ -0,0 +1,82 @@ +{{- if .Values.server.enabled -}} +{{- if (empty .Values.server.configMapOverrideName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.serverFiles }} + {{ $key }}: | +{{- if eq $key "prometheus.yml" }} + global: +{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} +{{- if $root.Values.server.remoteWrite }} + remote_write: +{{ $root.Values.server.remoteWrite | toYaml | indent 4 }} +{{- end }} +{{- if $root.Values.server.remoteRead }} + remote_read: +{{ $root.Values.server.remoteRead | toYaml | indent 4 }} +{{- end }} +{{- end }} +{{- if eq $key "alerts" }} +{{- if and (not (empty $value)) (empty $value.groups) }} + groups: +{{- range $ruleKey, $ruleValue := $value }} + - name: {{ $ruleKey -}}.rules + rules: +{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} +{{- else }} +{{ toYaml $value | indent 4 }} +{{- end }} +{{- else }} +{{ toYaml $value | default "{}" | indent 4 }} +{{- end }} +{{- if eq $key "prometheus.yml" -}} +{{- if $root.Values.extraScrapeConfigs }} +{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} +{{- end -}} +{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }} + alerting: +{{- if $root.Values.alertRelabelConfigs }} +{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} + alertmanagers: +{{- if $root.Values.server.alertmanagers }} +{{ toYaml $root.Values.server.alertmanagers | indent 8 }} +{{- else }} + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if $root.Values.alertmanager.prefixURL }} + path_prefix: {{ $root.Values.alertmanager.prefixURL }} + {{- end }} + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + regex: {{ $root.Release.Namespace }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app] + regex: {{ template "prometheus.name" $root }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_component] + regex: alertmanager + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe] + regex: {{ index $root.Values.alertmanager.podAnnotations "prometheus.io/probe" | default ".*" }} + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: "9093" + action: keep +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/deploy.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/deploy.yaml new file mode 100644 index 000000000..4b9e11909 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/deploy.yaml @@ -0,0 +1,261 @@ +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.server.deploymentAnnotations }} + annotations: + {{ toYaml .Values.server.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | trim | indent 4 }} + {{ if eq .Values.server.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + image: "{{ include "get.cmreloadimage" .}}" + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ include "get.serverimage" .}}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + ports: + - containerPort: 9090 + readinessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- if .Values.server.dnsPolicy }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if (or .Values.global.imagePullSecret .Values.imagePullSecrets) }} + imagePullSecrets: + {{- if .Values.global.imagePullSecret }} + - name: {{ .Values.global.imagePullSecret }} + {{- end }} + {{- if .Values.imagePullSecrets }} +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} + - name: storage-volume + {{- if .Values.server.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/headless-svc.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/headless-svc.yaml new file mode 100644 index 000000000..d519f4e0e --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/headless-svc.yaml @@ -0,0 +1,37 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.statefulSet.headless.labels }} +{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }}-headless +{{ include "prometheus.namespace" . | indent 2 }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.server.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.statefulSet.headless.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.statefulSet.headless.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.statefulSet.headless.gRPC.nodePort }} + nodePort: {{ .Values.server.statefulSet.headless.gRPC.nodePort }} + {{- end }} + {{- end }} + + selector: + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/ingress.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/ingress.yaml new file mode 100644 index 000000000..000f39cab --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/ingress.yaml @@ -0,0 +1,59 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.server.fullname" . }} +{{- $servicePort := .Values.server.service.servicePort -}} +{{- $ingressPath := .Values.server.ingress.path -}} +{{- $ingressPathType := .Values.server.ingress.pathType -}} +{{- $extraPaths := .Values.server.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.server.ingress.annotations }} + annotations: +{{ toYaml .Values.server.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- range $key, $value := .Values.server.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.server.ingress.ingressClassName }} + ingressClassName: {{ .Values.server.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.server.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.server.ingress.tls }} + tls: +{{ toYaml .Values.server.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/netpol.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/netpol.yaml new file mode 100644 index 000000000..c8870e9ff --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/netpol.yaml @@ -0,0 +1,18 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + ingress: + - ports: + - port: 9090 +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/pdb.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/pdb.yaml new file mode 100644 index 000000000..364cb5b49 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.server.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.server.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.server.labels" . | nindent 6 }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/psp.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/psp.yaml new file mode 100644 index 000000000..e2b885f16 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/psp.yaml @@ -0,0 +1,51 @@ +{{- if and .Values.server.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + annotations: +{{- if .Values.server.podSecurityPolicy.annotations }} +{{ toYaml .Values.server.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + allowedCapabilities: + - 'CHOWN' + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + - 'hostPath' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} + {{- range .Values.server.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/pvc.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/pvc.yaml new file mode 100644 index 000000000..cef89151b --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/pvc.yaml @@ -0,0 +1,41 @@ +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +{{- if .Values.server.persistentVolume.enabled -}} +{{- if not .Values.server.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} +{{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} +{{- else if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} +{{- end }} +{{- if .Values.server.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.server.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/rolebinding.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/rolebinding.yaml new file mode 100644 index 000000000..93ce3ee13 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.server.enabled .Values.rbac.create .Values.server.useExistingClusterRoleName .Values.server.namespaces -}} +{{ range $.Values.server.namespaces -}} +--- +apiVersion: {{ template "rbac.apiVersion" $ }} +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" $ | nindent 4 }} + name: {{ template "prometheus.server.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" $ }} +{{ include "prometheus.namespace" $ | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $.Values.server.useExistingClusterRoleName }} +{{ end -}} +{{ end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/service.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/service.yaml new file mode 100644 index 000000000..68f988927 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/service.yaml @@ -0,0 +1,60 @@ +{{- if .Values.server.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.service.annotations }} + annotations: +{{ toYaml .Values.server.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.service.labels }} +{{ toYaml .Values.server.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} +{{- end }} +{{- if .Values.server.service.externalIPs }} + externalIPs: +{{ toYaml .Values.server.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.server.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} +{{- end }} +{{- if .Values.server.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.server.service.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.service.nodePort }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + {{- if .Values.server.service.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.service.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.service.gRPC.nodePort }} + nodePort: {{ .Values.server.service.gRPC.nodePort }} + {{- end }} + {{- end }} + selector: + {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} + statefulset.kubernetes.io/pod-name: {{ template "prometheus.server.fullname" . }}-{{ .Values.server.service.statefulsetReplica.replica }} + {{- else -}} + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- if .Values.server.service.sessionAffinity }} + sessionAffinity: {{ .Values.server.service.sessionAffinity }} +{{- end }} + {{- end }} + type: "{{ .Values.server.service.type }}" +{{- end -}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/serviceaccount.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/serviceaccount.yaml new file mode 100644 index 000000000..9c0502ab7 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.serviceAccounts.server.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.server" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/sts.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/sts.yaml new file mode 100644 index 000000000..b0e1e8bdb --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/sts.yaml @@ -0,0 +1,285 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.server.statefulSet.annotations }} + annotations: + {{ toYaml .Values.server.statefulSet.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- if .Values.server.statefulSet.labels}} + {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + serviceName: {{ template "prometheus.server.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + image: "{{ include "get.cmreloadimage" .}}" + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ include "get.serverimage" .}}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + ports: + - containerPort: 9090 + readinessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- if .Values.server.dnsPolicy }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if (or .Values.global.imagePullSecret .Values.imagePullSecrets) }} + imagePullSecrets: + {{- if .Values.global.imagePullSecrets }} + - name: {{ .Values.global.imagePullSecret }} + {{- end }} + {{- if .Values.imagePullSecrets }} +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} +{{- if .Values.server.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} + {{- else if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/templates/server/vpa.yaml b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/vpa.yaml new file mode 100644 index 000000000..981a9b485 --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/templates/server/vpa.yaml @@ -0,0 +1,24 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.verticalAutoscaler.enabled -}} +apiVersion: autoscaling.k8s.io/v1beta2 +kind: VerticalPodAutoscaler +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }}-vpa +{{ include "prometheus.namespace" . | indent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" +{{- if .Values.server.statefulSet.enabled }} + kind: StatefulSet +{{- else }} + kind: Deployment +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + updatePolicy: + updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} {{/* if .Values.server.verticalAutoscaler.enabled */}} +{{- end -}} {{/* .Values.server.enabled */}} diff --git a/charts/k10/k10/4.5.900/charts/prometheus/values.yaml b/charts/k10/k10/4.5.900/charts/prometheus/values.yaml new file mode 100644 index 000000000..2c33498ec --- /dev/null +++ b/charts/k10/k10/4.5.900/charts/prometheus/values.yaml @@ -0,0 +1,1737 @@ +k10image: + registry: gcr.io + repository: kasten-images + +rbac: + create: true + +podSecurityPolicy: + enabled: false + +imagePullSecrets: +# - name: "image-pull-secret" + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + alertmanager: + create: true + name: + annotations: {} + nodeExporter: + create: true + name: + annotations: {} + pushgateway: + create: true + name: + annotations: {} + server: + create: true + name: + annotations: {} + +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a Role and RoleBinding in the defined namespaces ONLY + ## This makes alertmanager work - for users who do not have ClusterAdmin privs, but wants alertmanager to operate on their own namespaces, instead of clusterwide. + useClusterRole: true + + ## Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. + useExistingRole: false + + ## alertmanager container name + ## + name: alertmanager + + ## alertmanager container image + ## + image: + repository: quay.io/prometheus/alertmanager + tag: v0.21.0 + pullPolicy: IfNotPresent + + ## alertmanager priorityClassName + ## + priorityClassName: "" + + ## Additional alertmanager container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + baseURL: "http://localhost:9093" + + ## Additional alertmanager container environment variable + ## For instance to add a http_proxy + ## + extraEnv: {} + + ## Additional alertmanager Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: alertmanager-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config + ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configFromSecret: "" + + ## The configuration file name to be loaded to alertmanager + ## Must match the key within configuration loaded from ConfigMap/Secret + ## + configFileName: alertmanager.yml + + ingress: + ## If true, alertmanager Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## alertmanager Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## alertmanager Ingress additional labels + ## + extraLabels: {} + + ## alertmanager Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - alertmanager.domain.com + # - domain.com/alertmanager + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## alertmanager Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - alertmanager.domain.com + + ## Alertmanager Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for alertmanager scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for alertmanager pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, alertmanager will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## alertmanager data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## alertmanager data Persistent Volume Claim annotations + ## + annotations: {} + + ## alertmanager data Persistent Volume existing claim name + ## Requires alertmanager.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## alertmanager data Persistent Volume mount root path + ## + mountPath: /data + + ## alertmanager data Persistent Volume size + ## + size: 2Gi + + ## alertmanager data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## alertmanager data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of alertmanager data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + emptyDir: + ## alertmanager emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to alertmanager pods + ## + podAnnotations: {} + ## Tell prometheus to use a specific set of alertmanager pods + ## instead of all alertmanager pods found in the same namespace + ## Useful if you deploy multiple releases within the same namespace + ## + ## prometheus.io/probe: alertmanager-teamA + + ## Labels to be added to Prometheus AlertManager pods + ## + podLabels: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + enableMeshPeer: false + + servicePort: 80 + + ## alertmanager resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # Custom DNS configuration to be added to alertmanager pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to alertmanager pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + # enableMeshPeer : true + + ## List of IP addresses at which the alertmanager service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + # nodePort: 30000 + sessionAffinity: None + type: ClusterIP + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + alertmanager: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +kubeStateMetrics: + ## If false, kube-state-metrics sub-chart will not be installed + ## + enabled: true + +## kube-state-metrics sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics +## +# kube-state-metrics: + +nodeExporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + ## If true, node-exporter pods share the host network namespace + ## + hostNetwork: true + + ## If true, node-exporter pods share the host PID namespace + ## + hostPID: true + + ## If true, node-exporter pods mounts host / at /host/root + ## + hostRootfs: true + + ## node-exporter container name + ## + name: node-exporter + + ## node-exporter container image + ## + image: + repository: quay.io/prometheus/node-exporter + tag: v1.1.2 + pullPolicy: IfNotPresent + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## node-exporter priorityClassName + ## + priorityClassName: "" + + ## Custom Update Strategy + ## + updateStrategy: + type: RollingUpdate + + ## Additional node-exporter container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional node-exporter hostPath mounts + ## + extraHostPathMounts: [] + # - name: textfile-dir + # mountPath: /srv/txt_collector + # hostPath: /var/lib/node-exporter + # readOnly: true + # mountPropagation: HostToContainer + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # configMap: certs-configmap + # readOnly: true + + ## Node tolerations for node-exporter scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for node-exporter pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to node-exporter pods + ## + podAnnotations: {} + + ## Labels to be added to node-exporter pods + ## + pod: + labels: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## node-exporter resource limits & requests + ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + + # Custom DNS configuration to be added to node-exporter pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to node-exporter pods + ## + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + # Exposed as a headless service: + # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + clusterIP: None + + ## List of IP addresses at which the node-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + hostPort: 9100 + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9100 + type: ClusterIP + +server: + ## Prometheus server container name + ## + enabled: true + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a RoleBinding in the defined namespaces ONLY + ## + ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. + ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. + ## + ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. + ## + # useExistingClusterRoleName: nameofclusterrole + + ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. + # namespaces: + # - yournamespace + + name: server + + # sidecarContainers - add more containers to prometheus server + # Key/Value where Key is the sidecar `- name: ` + # Example: + # sidecarContainers: + # webserver: + # image: nginx + sidecarContainers: {} + + # sidecarTemplateValues - context to be used in template for sidecarContainers + # Example: + # sidecarTemplateValues: *your-custom-globals + # sidecarContainers: + # webserver: |- + # {{ include "webserver-container-template" . }} + # Template for `webserver-container-template` might looks like this: + # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}" + # ... + # + sidecarTemplateValues: {} + + ## Prometheus server container image + ## + image: + repository: quay.io/prometheus/prometheus + tag: v2.26.0 + pullPolicy: IfNotPresent + + ## prometheus server priorityClassName + ## + priorityClassName: "" + + ## EnableServiceLinks indicates whether information about services should be injected + ## into pod's environment variables, matching the syntax of Docker links. + ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. + ## + enableServiceLinks: true + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access prometheus + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. + ## + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: [] + + extraFlags: + - web.enable-lifecycle + ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as + ## deleting time series. This is disabled by default. + # - web.enable-admin-api + ## + ## storage.tsdb.no-lockfile flag controls BD locking + # - storage.tsdb.no-lockfile + ## + ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) + # - storage.tsdb.wal-compression + + ## Path to a configuration file on prometheus server container FS + configPath: /etc/config/prometheus.yml + + ### The data directory used by prometheus to set --storage.tsdb.path + ### When empty server.persistentVolume.mountPath is used instead + storagePath: "" + + global: + ## How frequently to scrape targets by default + ## + scrape_interval: 1m + ## How long until a scrape request times out + ## + scrape_timeout: 10s + ## How frequently to evaluate rules + ## + evaluation_interval: 1m + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write + ## + remoteWrite: [] + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read + ## + remoteRead: [] + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional Prometheus server Volume mounts + ## + extraVolumeMounts: [] + + ## Additional Prometheus server Volumes + ## + extraVolumes: [] + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # subPath: "" + # hostPath: /etc/kubernetes/certs + # readOnly: true + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # subPath: "" + # configMap: certs-configmap + # readOnly: true + + ## Additional Prometheus server Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: prom-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress additional labels + ## + extraLabels: {} + + ## Prometheus server Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + # - domain.com/prometheus + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + # strategy: + # type: Recreate + + ## hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Prometheus server data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + emptyDir: + ## Prometheus server emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + ## Labels to be added to Prometheus server pods + ## + podLabels: {} + + ## Prometheus AlertManager configuration + ## + alertmanagers: [] + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + servicePort: 80 + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## Prometheus server readiness and liveness probe initial delay and timeout + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + readinessProbeInitialDelay: 30 + readinessProbePeriodSeconds: 5 + readinessProbeTimeout: 4 + readinessProbeFailureThreshold: 3 + readinessProbeSuccessThreshold: 1 + livenessProbeInitialDelay: 30 + livenessProbePeriodSeconds: 15 + livenessProbeTimeout: 10 + livenessProbeFailureThreshold: 3 + livenessProbeSuccessThreshold: 1 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + # When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet + dnsPolicy: ClusterFirst + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-server' + + # Custom DNS configuration to be added to prometheus server pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + ## Security context to be added to server pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + sessionAffinity: None + type: ClusterIP + + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## If using a statefulSet (statefulSet.enabled=true), configure the + ## service to connect to a specific replica to have a consistent view + ## of the data. + statefulsetReplica: + enabled: false + replica: 0 + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (default if not specified is 15 days) + ## + retention: "15d" + +pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## pushgateway container name + ## + name: pushgateway + + ## pushgateway container image + ## + image: + repository: prom/pushgateway + tag: v1.3.1 + pullPolicy: IfNotPresent + + ## pushgateway priorityClassName + ## + priorityClassName: "" + + ## Additional pushgateway container arguments + ## + ## for example: persistence.file: /data/pushgateway.data + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ingress: + ## If true, pushgateway Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## pushgateway Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## pushgateway Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - pushgateway.domain.com + # - domain.com/pushgateway + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## pushgateway Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - pushgateway.domain.com + + ## Node tolerations for pushgateway scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for pushgateway pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to pushgateway pods + ## + podAnnotations: {} + + ## Labels to be added to pushgateway pods + ## + podLabels: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## pushgateway resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # Custom DNS configuration to be added to push-gateway pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to push-gateway pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + + service: + annotations: + prometheus.io/probe: pushgateway + labels: {} + clusterIP: "" + + ## List of IP addresses at which the pushgateway service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9091 + type: ClusterIP + + ## pushgateway Deployment Strategy type + # strategy: + # type: Recreate + + persistentVolume: + ## If true, pushgateway will create/use a Persistent Volume Claim + ## + enabled: false + + ## pushgateway data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## pushgateway data Persistent Volume Claim annotations + ## + annotations: {} + + ## pushgateway data Persistent Volume existing claim name + ## Requires pushgateway.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## pushgateway data Persistent Volume mount root path + ## + mountPath: /data + + ## pushgateway data Persistent Volume size + ## + size: 2Gi + + ## pushgateway data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## pushgateway data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of pushgateway data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + +## alertmanager ConfigMap entries +## +alertmanagerFiles: + alertmanager.yml: + global: {} + # slack_api_url: '' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Prometheus server ConfigMap entries +## +serverFiles: + + ## Alerts configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + alerting_rules.yml: {} + # groups: + # - name: Instances + # rules: + # - alert: InstanceDown + # expr: up == 0 + # for: 5m + # labels: + # severity: page + # annotations: + # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' + # summary: 'Instance {{ $labels.instance }} down' + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml + alerts: {} + + ## Records configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ + recording_rules.yml: {} + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: kubernetes_node + + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints-slow' + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: kubernetes_node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + + # Example Scrape config for pods which should be scraped slower. An useful example + # would be stackriver-exporter which queries an API on every scrape of the pod + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods-slow' + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + +# adds additional scrape configs to prometheus.yml +# must be a string so you have to add a | after extraScrapeConfigs: +# example adds prometheus-blackbox-exporter scrape config +extraScrapeConfigs: + # - job_name: 'prometheus-blackbox-exporter' + # metrics_path: /probe + # params: + # module: [http_2xx] + # static_configs: + # - targets: + # - https://example.com + # relabel_configs: + # - source_labels: [__address__] + # target_label: __param_target + # - source_labels: [__param_target] + # target_label: instance + # - target_label: __address__ + # replacement: prometheus-blackbox-exporter:9115 + +# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager +# useful in H/A prometheus with different external labels but the same alerts +alertRelabelConfigs: + # alert_relabel_configs: + # - source_labels: [dc] + # regex: (.+)\d+ + # target_label: dc + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + +# Force namespace of namespaced resources +forceNamespace: null diff --git a/charts/k10/k10/4.5.900/config.json b/charts/k10/k10/4.5.900/config.json new file mode 100644 index 000000000..e69de29bb diff --git a/charts/k10/k10/4.5.900/eula.txt b/charts/k10/k10/4.5.900/eula.txt new file mode 100644 index 000000000..8eb11346c --- /dev/null +++ b/charts/k10/k10/4.5.900/eula.txt @@ -0,0 +1,458 @@ +KASTEN END USER LICENSE AGREEMENT + +This End User License Agreement is a binding agreement between Kasten, Inc., a +Delaware Corporation ("Kasten"), and you ("Licensee"), and establishes the terms +under which Licensee may use the Software and Documentation (as defined below), +including without limitation terms and conditions relating to license grant, +intellectual property rights, disclaimers /exclusions / limitations of warranty, +indemnity and liability, governing law and limitation periods. All components +collectively are referred to herein as the "Agreement." + +LICENSEE ACKNOWLEDGES IT HAS HAD THE OPPORTUNITY TO REVIEW THE AGREEMENT, PRIOR +TO ACCEPTANCE OF THIS AGREEMENT. LICENSEE'S ACCEPTANCE OF THIS AGREEMENT IS +EVIDENCED BY LICENSEE'S DOWNLOADING, COPYING, INSTALLING OR USING THE KASTEN +SOFTWARE. IF YOU ARE ACTING ON BEHALF OF A COMPANY, YOU REPRESENT THAT YOU ARE +AUTHORIZED TO BIND THE COMPANY. IF YOU DO NOT AGREE TO ALL TERMS OF THIS +AGREEMENT, DO NOT DOWNLOAD, COPY, INSTALL, OR USE THE SOFTWARE, AND PERMANENTLY +DELETE THE SOFTWARE. + +1. DEFINITIONS + +1.1 "Authorized Persons" means trained technical employees and contractors of +Licensee who are subject to a written agreement with Licensee that includes use +and confidentiality restrictions that are at least as protective as those set +forth in this Agreement. + +1.2 "Authorized Reseller" means a distributor or reseller, including cloud +computing platform providers, authorized by Kasten to resell licenses to the +Software through the channel through or in the territory in which Licensee is +purchasing. + +1.3 "Confidential Information" means all non-public information disclosed in +written, oral or visual form by either party to the other. Confidential +Information may include, but is not limited to, services, pricing information, +computer programs, source code, names and expertise of employees and +consultants, know-how, and other technical, business, financial and product +development information. "Confidential Information" does not include any +information that the receiving party can demonstrate by its written records (1) +was rightfully known to it without obligation of confidentiality prior to its +disclosure hereunder by the disclosing party; (2) is or becomes publicly known +through no wrongful act of the receiving party; (3) has been rightfully received +without obligation of confidentiality from a third party authorized to make such +a disclosure; or (4) is independently developed by the receiving party without +reference to confidential information disclosed hereunder. + +1.4 "Documentation" means any administration guides, installation and user +guides, and release notes that are provided by Kasten to Licensee with the +Software. + +1.5 "Intellectual Property Rights" means patents, design patents, copyrights, +trademarks, Confidential Information, know-how, trade secrets, moral rights, and +any other intellectual property rights recognized in any country or jurisdiction +in the world. + +1.6 "Node" means a single physical or virtual computing machine recognizable by +the Software as a unique device. Nodes must be owned or leased by Licensee or an +entity controlled by, controlling or under common control with Licensee. + +1.7 "Edition" means a unique identifier for each distinct product that is made +available by Kasten and that can be licensed, including summary information +regarding any associated functionality, features, or restrictions specific to +the Edition. + +1.8 "Open Source Software" means software delivered to Licensee hereunder that +is subject to the provisions of any open source license agreement. + +1.9 "Purchase Agreement" means a separate commercial agreement, if applicable, +between Kasten and the Licensee that contains the terms for the licensing of a +specific Edition of the Software. + +1.10 "Software" means any and all software product Editions licensed to Licensee +under this Agreement, all as developed by Kasten and delivered to Licensee +hereunder. Software also includes any Updates provided by Kasten to Licensee. +For the avoidance of doubt, the definition of Software shall exclude any +Third-Party Software and Open Source Software. + +1.11 "Third-Party Software" means certain software Kasten licenses from third +parties and provides to Licensee with the Software, which may include Open +Source Software. + +1.12 "Update" means a revision of the Software that Kasten makes available to +customers at no additional cost. The Update includes, if and when applicable and +available, bug fix patches, maintenance release, minor release, or new major +releases. Updates are limited only to the Software licensed by Licensee, and +specifically exclude new product offerings, features, options or functionality +of the Software that Kasten may choose to license separately, or for an +additional fee. + +1.13 "Use" means to install activate the processing capabilities of the +Software, load, execute, access, employ the Software, or display information +resulting from such capabilities. + + +2. LICENSE GRANT AND RESTRICTIONS + +2.1 Enterprise License. Subject to Licensee"s compliance with the terms and +conditions of this Agreement (including any additional restrictions on +Licensee"s use of the Software set forth in the Purchase Agreement, if one +exists, between Licensee and Kasten), Kasten grants to Licensee a non-exclusive, +non-transferable (except in connection with a permitted assignment of this +Agreement under Section 14.10 (Assignment), non-sublicensable, limited term +license to install and use the Software, in object code form only, solely for +Licensee"s use, unless terminated in accordance with Section 4 (Term and +Termination). + +2.2 Starter License. This section shall only apply when the Licensee licenses +Starter Edition of the Software. The license granted herein is for a maximum of +10 Nodes and for a period of 12 months from the date of the Software release that +embeds the specific license instance. Updating to a newer Software (minor or +major) release will always extend the validity of the license by 12 months. If +the Licensee wishes to upgrade to an Enterprise License instead, the Licensee +will have to enter into a Purchase Agreement with Kasten which will supersede +this Agreement. The Licensee is required to provide accurate email and company +information, if representing a company, when accepting this Agreement. Under no +circumstances will a Starter License be construed to mean that the Licensee is +authorized to distribute the Software to any third party for any reason +whatsoever. + +2.3 Evaluation License. This section shall only apply when the Licensee has +licensed the Software for an initial evaluation period. The license granted +herein is valid only one time 30 days, starting from date of installation, +unless otherwise explicitly designated by Kasten ("Evaluation Period"). Under +this license the Software can only be used for evaluation purposes. Under no +circumstances will an Evaluation License be construed to mean that the Licensee +is authorized to distribute the Software to any third party for any reason +whatsoever. If the Licensee wishes to upgrade to an Enterprise License instead, +the Licensee will have to enter into a Purchase Agreement with Kasten which will +supersede this Agreement.. If the Licensee does not wish to upgrade to an +Enterprise License at the end of the Evaluation Period the Licensee"s rights +under the Agreement shall terminate, and the Licensee shall delete all Kasten +Software. + +2.4 License Restrictions. Except to the extent permitted under this Agreement, +Licensee will not nor will Licensee allow any third party to: (i) copy, modify, +adapt, translate or otherwise create derivative works of the Software or the +Documentation; (ii) reverse engineer, decompile, disassemble or otherwise +attempt to discover the source code of the Software; (iii) rent, lease, sell, +assign or otherwise transfer rights in or to the Software or Documentation; (iv) +remove any proprietary notices or labels from the Software or Documentation; (v) +publicly disseminate performance information or analysis (including, without +limitation, benchmarks) relating to the Software. Licensee will comply with all +applicable laws and regulations in Licensee"s use of and access to the Software +and Documentation. + +2.5 Responsibility for Use. The Software and Documentation may be used only by +Authorized Persons and in conformance with this Agreement. Licensee shall be +responsible for the proper use and protection of the Software and Documentation +and is responsible for: (i) installing, managing, operating, and physically +controlling the Software and the results obtained from using the Software; (ii) +using the Software within the operating environment specified in the +Documentation; and; (iii) establishing and maintaining such recovery and data +protection and security procedures as necessary for Licensee's service and +operation and/or as may be specified by Kasten from time to time. + +2.6 United States Government Users. The Software licensed under this Agreement +is "commercial computer software" as that term is described in DFAR +252.227-7014(a)(1). If acquired by or on behalf of a civilian agency, the U.S. +Government acquires this commercial computer software and/or commercial computer +software documentation subject to the terms and this Agreement as specified in +48 C.F.R. 12.212 (Computer Software) and 12.211 (Technical Data) of the Federal +Acquisition Regulations ("FAR") and its successors. If acquired by or on behalf +of any agency within the Department of Defense ("DOD"), the U.S. Government +acquires this commercial computer software and/or commercial computer software +documentation subject to the terms of this Agreement as specified in 48 C.F.R. +227.7202 of the DOD FAR Supplement and its successors. + + +3. SUPPORT + +During the Term (as defined below) and subject to Licensee"s compliance with the +terms and conditions of this Agreement, Licensee may submit queries and requests +for support using Kasten"s support alias support@kasten.io and a private Slack +channel (except Starter and Evaluation Edition Licensees). Licensee shall be +entitled to the support service-level agreement specified in the Purchase +Agreement (including relevant Order Forms) between the Licensee and Kasten. If +there is no Purchase Agreement in place, support level shall default to Starter +Edition Support as specified below. Licensee shall also be permitted to download +and install all Updates released by Kasten during the Term and made generally +available to users of the Software. Support is provided only for the current +version of the Software (i.e. with all Updates and Upgrades installed) and for +each of the previous three Updates. + +3.1 Starter Edition Support. If the Licensee has licensed Starter Edition of +the Software, they will have access to the Kasten support alias, but Kasten +cannot guarantee a service level of any sort. Should a higher level of support +be needed, Licensee has the option to consider entering into a Purchase +Agreement with Kasten for licensing a different Edition of the Software. + + +4. TERM AND TERMINATION + +4.1 Term. The term of this Agreement, except for Starter and Evaluation +Licenses, shall commence on the Effective Date and shall, unless terminated +earlier in accordance with the provisions of Section 4.2 below, remain in force +for the Subscription Period as set forth in the applicable Order Form(s) (the +"Term"). The parties may extend the Term of this Agreement beyond the +Subscription Period by executing additional Order Form(s) and Licensee"s payment +of additional licensing fees. The term of this Agreement for the Starter and +Evaluation Licenses will coincide with the term for Starter Edition (as stated +in section 2.2) and the term for Evaluation Period (as stated in section 2.3), +respectively + +4.2 Termination. Either party may immediately terminate this +Agreement and the licenses granted hereunder if the other party (1) becomes +insolvent and"becomes unwilling or unable to meet its obligations under this +Agreement, (2) files a petition in bankruptcy, (3) is subject to the filing of +an involuntary petition for bankruptcy which is not rescinded within a period of +forty-five (45) days, (4) fails to cure a material breach of any material term +or condition of this Agreement within thirty (30) days of receipt of written +notice specifying such breach, or (5) materially breaches its obligations of +confidentiality hereunder. + +4.3 Effects of Termination. Upon expiration or +termination of this Agreement for any reason, (i) any amounts owed to Kasten +under this Agreement will be immediately due and payable; (ii) all licensed +rights granted in this Agreement will immediately cease; and (iii) Licensee will +promptly discontinue all use of the Software and Documentation and return to +Kasten any Kasten Confidential Information in Licensee"s possession or control. + +4.4 Survival. The following Sections of this Agreement will remain in effect +following the expiration or termination of these General Terms for any reason: +4.3 (Effects of Termination), 4.4 (Survival), 5 (Third Party Software) 5 +(Confidentiality), 9 (Ownership), 10.2 (Third-Party Software), 10.3 (Warranty +Disclaimer), 11 (Limitations of Liability), 12.2 (Exceptions to Kasten +Obligation), 13 (Export) and 14 (General). + + +5. THIRD PARTY AND OPEN SOURCE SOFTWARE Certain Third-Party Software or Open +Source Software (Kasten can provide a list upon request) that may be provided +with the Software may be subject to various other terms and conditions imposed +by the licensors of such Third-Party Software or Open Source Software. The +terms of Licensee"s use of the Third-Party Software or Open Source Software is +subject to and governed by the respective Third-Party Software and Open Source +licenses, except that this Section 5 (Third-Party Software), Section 10.2 (Third +Party Software), 10.3 (Warranty Disclaimer), Section 11 (Limitations of +Liability), and Section 14 (General) of this Agreement also govern Licensee"s +use of the Third-Party Software. To the extent applicable to Licensee"s use of +such Third-Party Software and Open Source, Licensee agrees to comply with the +terms and conditions contained in all such Third-Party Software and Open Source +licenses. + + +6. CONFIDENTIALITY Neither party will use any Confidential Information of the +other party except as expressly permitted by this Agreement or as expressly +authorized in writing by the disclosing party. The receiving party shall use +the same degree of care to protect the disclosing party"s Confidential +Information as it uses to protect its own Confidential Information of like +nature, but in no circumstances less than a commercially reasonable standard of +care. The receiving party may not disclose the disclosing party"s Confidential +Information to any person or entity other than to (i) (a) Authorized Persons in +the case the receiving party is Licensee, and (b) Kasten"s employees and +contractors in the case the receiving party is Kasten, and (ii) who need access +to such Confidential Information solely for the purpose of fulfilling that +party"s obligations or exercising that party"s rights hereunder. The foregoing +obligations will not restrict the receiving party from disclosing Confidential +Information of the disclosing party: (1) pursuant to the order or requirement of +a court, administrative agency, or other governmental body, provided that the +receiving party required to make such a disclosure gives reasonable notice to +the disclosing party prior to such disclosure; and (2) on a confidential basis +to its legal and financial advisors. Kasten may identify Licensee in its +customer lists in online and print marketing materials. + + +7. FEES Fees for Enterprise License shall be set forth in separate Order Form(s) +attached to a Purchase Agreement, between the Licensee and Kasten. + +If Licensee has obtained the Software through an Authorized Reseller, fees for +licensing shall be invoiced directly by the Authorized Reseller. + +If no Purchase Agreement exists, during the term of this Agreement, Kasten +shall license the Starter Edition only and no other Edition of the Software +"at no charge" to Licensee. + + +8. USAGE DATA Kasten may collect, accumulate, and aggregate certain usage +statistics in order to analyze usage of the Software, make improvements, and +potentially develop new products. Kasten may use aggregated anonymized data for +any purpose that Kasten, at its own discretion, may consider appropriate. + + +9. OWNERSHIP As between Kasten and Licensee, all right, title and interest in +the Software, Documentation and any other Kasten materials furnished or made +available hereunder, all modifications and enhancements thereof, and all +suggestions, ideas and feedback proposed by Licensee regarding the Software and +Documentation, including all copyright rights, patent rights and other +Intellectual Property Rights in each of the foregoing, belong to and are +retained solely by Kasten or Kasten"s licensors and providers, as applicable. +Licensee hereby does and will irrevocably assign to Kasten all evaluations, +ideas, feedback and suggestions made by Licensee to Kasten regarding the +Software and Documentation (collectively, "Feedback") and all Intellectual +Property Rights in and to the Feedback. Except as expressly provided herein, no +licenses of any kind are granted hereunder, whether by implication, estoppel, or +otherwise. + + +10. LIMITED WARRANTY AND DISCLAIMERS + +10.1 Limited Warranty. Kasten warrants for a period of thirty (30) days from +the Effective Date that the Software will materially conform to Kasten"s +then-current Documentation (the "Warranty Period") when properly installed on a +computer for which a license is granted hereunder. Licensee"s exclusive remedy +for a breach of this Section 10.1 is that Kasten shall, at its option, use +commercially reasonable efforts to correct or replace the Software, or refund +all or a portion of the fees paid by Licensee pursuant to the Purchase +Agreement. Kasten, in its sole discretion, may revise this limited warranty from +time to time. + +10.2 Third-Party Software. Except as expressly set forth in this Agreement, +Third-Party Software (including any Open Source Software) are provided on an +"as-is" basis at the sole risk of Licensee. Notwithstanding any language to the +contrary in this Agreement, Kasten makes no express or implied warranties of any +kind with respect to Third-Party Software provided to Licensee and shall not be +liable for any damages regarding the use or operation of the Third-Party +Software furnished under this Agreement. Any and all express or implied +warranties, if any, arising from the license of Third-Party Software shall be +those warranties running from the third party manufacturer or licensor to +Licensee. + +10.3 Warranty Disclaimer. EXCEPT FOR THE LIMITED WARRANTY PROVIDED ABOVE, +KASTEN AND ITS SUPPLIERS MAKE NO WARRANTY OF ANY KIND, WHETHER EXPRESS, IMPLIED, +STATUTORY OR OTHERWISE, RELATING TO THE SOFTWARE OR TO KASTEN"S MAINTENANCE, +PROFESSIONAL OR OTHER SERVICES. KASTEN SPECIFICALLY DISCLAIMS ALL IMPLIED +WARRANTIES OF DESIGN, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE +AND NON-INFRINGEMENT. KASTEN AND ITS SUPPLIERS AND LICENSORS DO NOT WARRANT OR +REPRESENT THAT THE SOFTWARE WILL BE FREE FROM BUGS OR THAT ITS USE WILL BE +UNINTERRUPTED OR ERROR-FREE. THIS DISCLAIMER SHALL APPLY NOTWITHSTANDING THE +FAILURE OF THE ESSENTIAL PURPOSE OF ANY LIMITED REMEDY PROVIDED HEREIN. EXCEPT +AS STATED ABOVE, KASTEN AND ITS SUPPLIERS PROVIDE THE SOFTWARE ON AN "AS IS" +BASIS. KASTEN PROVIDES NO WARRANTIES WITH RESPECT TO THIRD PARTY SOFTWARE AND +OPEN SOURCE SOFTWARE. + + +11. LIMITATIONS OF LIABILITY + +11.1 EXCLUSION OF CERTAIN DAMAGES. EXCEPT FOR BREACHES OF SECTION 6 +(CONFIDENTIALITY) OR SECTION 9 (OWNERSHIP), IN NO EVENT WILL EITHER PARTY BE +LIABLE FOR ANY INDIRECT, CONSEQUENTIAL, EXEMPLARY, SPECIAL, INCIDENTAL OR +RELIANCE DAMAGES, INCLUDING ANY LOST DATA, LOSS OF USE AND LOST PROFITS, ARISING +FROM OR RELATING TO THIS AGREEMENT, THE SOFTWARE OR DOCUMENTATION, EVEN IF SUCH +PARTY KNEW OR SHOULD HAVE KNOWN OF THE POSSIBILITY OF, OR COULD REASONABLY HAVE +PREVENTED, SUCH DAMAGES. + +11.2 LIMITATION OF DAMAGES. EXCEPT FOR THE BREACHES OF SECTION 6 +(CONFIDENTIALITY) OR SECTION 9 (OWNERSHIP), EACH PARTY"S TOTAL CUMULATIVE +LIABILITY ARISING FROM OR RELATED TO THIS AGREEMENT OR THE SOFTWARE, +DOCUMENTATION, OR SERVICES PROVIDED BY KASTEN, WILL NOT EXCEED THE AMOUNT OF +FEES PAID OR PAYABLE BY LICENSEE FOR THE SOFTWARE, DOCUMENTATION OR SERVICES +GIVING RISE TO THE CLAIM IN THE TWELVE (12) MONTHS FOLLOWING THE EFFECTIVE DATE. +LICENSEE AGREES THAT KASTEN"S SUPPLIERS AND LICENSORS WILL HAVE NO LIABILITY OF +ANY KIND UNDER OR AS A RESULT OF THIS AGREEMENT. IN THE CASE OF KASTEN"S +INDEMNIFICATION OBLIGATIONS, KASTEN"S CUMULATIVE LIABILITY UNDER THIS AGREEMENT +SHALL BE LIMITED TO THE SUM OF THE LICENSE FEES PAID OR PAYABLE BY LICENSEE FOR +THE SOFTWARE, DOCUMENTATION OR SERVICES GIVING RISE TO THE CLAIM IN THE TWELVE +(12) MONTHS FOLLOWING THE EFFECTIVE DATE. + +11.3 THIRD PARTY SOFTWARE. NOTWITHSTANDING ANY LANGUAGE TO THE CONTRARY IN THIS +AGREEMENT, KASTEN SHALL NOT BE LIABLE FOR ANY DAMAGES REGARDING THE USE OR +OPERATION OF ANY THIRD-PARTY SOFTWARE FURNISHED UNDER THIS AGREEMENT. + +11.4 LIMITATION OF ACTIONS. IN NO EVENT MAY LICENSEE BRING ANY CAUSE OF ACTION +RELATED TO THIS AGREEMENT MORE THAN ONE (1) YEAR AFTER THE OCCURRENCE OF THE +EVENT GIVING RISE TO THE LIABILITY. + + +12. EXPORT +The Software, Documentation and related technical data may be subject +to U.S. export control laws, including without limitation the U.S. Export +Administration Act and its associated regulations, and may be subject to export +or import regulations in other countries. Licensee shall comply with all such +regulations and agrees to obtain all necessary licenses to export, re-export, or +import the Software, Documentation and related technical data. + + +13. GENERAL + +13.1 No Agency. Kasten and Licensee each acknowledge and agree that the +relationship established by this Agreement is that of independent contractors, +and nothing contained in this Agreement shall be construed to: (1) give either +party the power to direct or control the daytoday activities of the other; (2) +deem the parties to be acting as partners, joint venturers, coowners or +otherwise as participants in a joint undertaking; or (3) permit either party or +any of either party"s officers, directors, employees, agents or representatives +to create or assume any obligation on behalf of or for the account of the other +party for any purpose whatsoever. + +13.2 Compliance with Laws. Each party agrees to comply with all applicable +laws, regulations, and ordinances relating to their performance hereunder. +Without limiting the foregoing, Licensee warrants and covenants that it will +comply with all then current laws and regulations of the United States and other +jurisdictions relating or applicable to Licensee"s use of the Software and +Documentation including, without limitation, those concerning Intellectual +Property Rights, invasion of privacy, defamation, and the import and export of +Software and Documentation. + +13.3 Force Majeure. Except for the duty to pay money, neither party shall be +liable hereunder by reason of any failure or delay in the performance of its +obligations hereunder on account of strikes, riots, fires, flood, storm, +explosions, acts of God, war, governmental action, earthquakes, or any other +cause which is beyond the reasonable control of such party. + +13.4 Governing Law; Venue and Jurisdiction. This Agreement shall be interpreted +according to the laws of the State of California without regard to or +application of choiceoflaw rules or principles. The parties expressly agree +that the United Nations Convention on Contracts for the International Sale of +Goods and the Uniform Computer Information Transactions Act will not apply. Any +legal action or proceeding arising under this Agreement will be brought +exclusively in the federal or state courts located in Santa Clara County, +California and the parties hereby consent to the personal jurisdiction and venue +therein. + +13.5 Injunctive Relief. The parties agree that monetary damages would not be an +adequate remedy for the breach of certain provisions of this Agreement, +including, without limitation, all provisions concerning infringement, +confidentiality and nondisclosure, or limitation on permitted use of the +Software or Documentation. The parties further agree that, in the event of such +breach, injunctive relief would be necessary to prevent irreparable injury. +Accordingly, either party shall have the right to seek injunctive relief or +similar equitable remedies to enforce such party's rights under the pertinent +provisions of this Agreement, without limiting its right to pursue any other +legal remedies available to it. + +13.6 Entire Agreement and Waiver. This Agreement and any exhibits hereto shall +constitute the entire agreement and contains all terms and conditions between +Kasten and Licensee with respect to the subject matter hereof and all prior +agreements, representations, and statement with respect to such subject matter +are superseded hereby. This Agreement may be changed only by written agreement +signed by both Kasten and Licensee. No failure of either party to exercise or +enforce any of its rights under this Agreement shall act as a waiver of +subsequent breaches; and the waiver of any breach shall not act as a waiver of +subsequent breaches. + +13.7 Severability. In the event any provision of this Agreement is held by a +court or other tribunal of competent jurisdiction to be unenforceable, that +provision will be enforced to the maximum extent permissible under applicable +law and the other provisions of this Agreement will remain in full force and +effect. The parties further agree that in the event such provision is an +essential part of this Agreement, they will begin negotiations for a suitable +replacement provision. + +13.8 Counterparts. This Agreement may be executed in any number of +counterparts, each of which, when so executed and delivered (including by +facsimile), shall be deemed an original, and all of which shall constitute one +and the same agreement. + +13.9 Binding Effect. This Agreement shall be binding upon and shall inure to +the benefit of the respective parties hereto, their respective successors and +permitted assigns. + +13.10 Assignment. Neither party may, without the prior written consent of the +other party (which shall not be unreasonably withheld), assign this Agreement, +in whole or in part, either voluntarily or by operation of law, and any attempt +to do so shall be a material default of this Agreement and shall be void. +Notwithstanding the foregoing, Kasten may assign its rights and benefits and +delegate its duties and obligations under this Agreement without the consent of +Licensee in connection with a merger, reorganization or sale of all or +substantially all relevant assets of the assigning party; in each case provided +that such successor assumes the assigning party"s obligations under this +Agreement. + diff --git a/charts/k10/k10/4.5.900/files/favicon.png b/charts/k10/k10/4.5.900/files/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..fb617ce12c6949ed2dd1bec208c179644bcec0d4 GIT binary patch literal 1802 zcmY*adt8!d8-9^Q;U~4MX*yHXJYWfUAO(S@qG@7bGxIP_4N#l{pHRT^D`su#Je8*P z)jUsR^SG>~RHnI>c|c94qVrIzB^}VzOi6TLef53s^LwBBdhYvruIstqKb|*(x_>Vm zW(orU0PgQcKB$Qp?W+&b%!hTB(=-9ZJ-F8ksFRr~Gz%&{)SnR;2smi4KA;0K1i)H~ zW&mkSV8c2F09#E20B|YjW3^Q0LlsjB{)n~2`_H@#H6mfm;80#@AO(MvorH>^v192d zK@vwx00;uS1}4#YF$h6YB8!U`5Uti3cn#L3(N>6c3hyhTRcIg;;muB_Bd{n}6vm1K zLm&`@WEum1knH<@yJkhSis$h-cr=>N=cD*8D0Xrj+6jllp)t;AXJD;5qOb(C9W+Ak?F|q7pJffAA*673Y?wmX(E=lC6DR@JYxrwGA?Yh`eb0}ft~q6 zS-@*H1iGV44=Z|xFY0HMKAkSj8vYM_RRZ#kFlhRKW%GYVP4jgFbD><&8I zv^XWN3}J45EHCNcSEWmiz#JQ&b_X2DY7W>@rOIn$xI_AF1rucFE!8<*l-JhgP|zUu z+v2>;RYq7)Qz|9=2ic-oiBNT`YySLfGuu*-{BD{G zZ`#7`+d>_8F5q}{&U~MZ3}zw|pk|7l4gHa4YjiMY7q8V@-1>riyq$4w9JQ$$Ns6tq zxd-``i1XUF(Zhxs5>eTjYk5i4=>QP3)TGI-P};Tjlnsrg>Ob|`j%;5zFDWN%Z%y}> zY-2O{QzLFqHcH_=D4@)S!_(U(fBl~u1%3WwFcSoL%=~ti|K!Gg=!?<&BiS1cWo-Ue zp`<5vcckA#0?*<~V+R9zV$)}H3(GPURF7yqh^aJg<5$e=A_Gg=tKXdR^>+X-8i@05 z-W|YKG@-|>d@Rl0EXk!sWgcb=S=69-eXoeAWmDQo`1zqnC%X~N=Q=;%4Jswh+Z*A5 z_QLU|!F?nmyQ=b@PNZ>RYn00lH7DvV?U|jj4cUb+==2H!O$R1Yl8wWsOWOscs1$Bw zfc<=a`~uD1BPLFG({Y3AcZa2KGL47TpB~`fk?V@~uPwJt!HuqMf)rtIAKWE$l-m4I z-8}Vmw4&_^O#!OXE6P{?)(Ar|cJ$gmhpaFSa!;`8Pg<|Un<-(>JS+8#5^g`R6}5@= zzOv3hZ|;|;hDWxNbDT~90D zl9pZ@$4O0bmu6AVD3Q&bJ}tfmgsRiZViC;Sy?NhWUV}}+6_;d(vWq`)XZE-`>^laO zJaBz=d5^@Wk`L}45!R+%ABk{HD(>Tmhm)Y6qZt&F7+WRS-7>Rl^~aR?HcHXO0`D7- z|2?nnC98m0i_>!5t;H_a{jzpHtzcD`KaNE9%mjUU_f3WQ!tf#S@TpG-m{oq%DKX5c z`Ra+*XdEGM1-sL%7~UOCaiTAM7Ylh*66f)}w|=c*El=@R!izK=A4n%&O>e+nGDg0% tn7HN~o~pnSpxO)Prh(w&4OH~a8i>le=(TeuS4aD2@%PzJuJejc{Re;E|Hl9T literal 0 HcmV?d00001 diff --git a/charts/k10/k10/4.5.900/files/kasten-logo.svg b/charts/k10/k10/4.5.900/files/kasten-logo.svg new file mode 100644 index 000000000..0d0ef14ee --- /dev/null +++ b/charts/k10/k10/4.5.900/files/kasten-logo.svg @@ -0,0 +1,24 @@ + + + + + + diff --git a/charts/k10/k10/4.5.900/files/styles.css b/charts/k10/k10/4.5.900/files/styles.css new file mode 100644 index 000000000..2d9205711 --- /dev/null +++ b/charts/k10/k10/4.5.900/files/styles.css @@ -0,0 +1,113 @@ +.theme-body { + background-color: #efefef; + color: #333; + font-family: 'Source Sans Pro', Helvetica, sans-serif; +} + +.theme-navbar { + background-color: #fff; + box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); + color: #333; + font-size: 13px; + font-weight: 100; + height: 46px; + overflow: hidden; + padding: 0 10px; +} + +.theme-navbar__logo-wrap { + display: inline-block; + height: 100%; + overflow: hidden; + padding: 10px 15px; + width: 300px; +} + +.theme-navbar__logo { + height: 100%; + max-height: 25px; +} + +.theme-heading { + font-size: 20px; + font-weight: 500; + margin-bottom: 10px; + margin-top: 0; +} + +.theme-panel { + background-color: #fff; + box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); + padding: 30px; +} + +.theme-btn-provider { + background-color: #fff; + color: #333; + min-width: 250px; +} + +.theme-btn-provider:hover { + color: #999; +} + +.theme-btn--primary { + background-color: #333; + border: none; + color: #fff; + min-width: 200px; + padding: 6px 12px; +} + +.theme-btn--primary:hover { + background-color: #666; + color: #fff; +} + +.theme-btn--success { + background-color: #2FC98E; + color: #fff; + width: 250px; +} + +.theme-btn--success:hover { + background-color: #49E3A8; +} + +.theme-form-row { + display: block; + margin: 20px auto; +} + +.theme-form-input { + border-radius: 4px; + border: 1px solid #CCC; + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); + color: #666; + display: block; + font-size: 14px; + height: 36px; + line-height: 1.42857143; + margin: auto; + padding: 6px 12px; + width: 250px; +} + +.theme-form-input:focus, +.theme-form-input:active { + border-color: #66AFE9; + outline: none; +} + +.theme-form-label { + font-size: 13px; + font-weight: 600; + margin: 4px auto; + position: relative; + text-align: left; + width: 250px; +} + +.theme-link-back { + margin-top: 4px; +} diff --git a/charts/k10/k10/4.5.900/license b/charts/k10/k10/4.5.900/license new file mode 100644 index 000000000..fb23dbb82 --- /dev/null +++ b/charts/k10/k10/4.5.900/license @@ -0,0 +1 @@ +Y3VzdG9tZXJOYW1lOiBzdGFydGVyLWxpY2Vuc2UKZGF0ZUVuZDogJzIxMDAtMDEtMDFUMDA6MDA6MDAuMDAwWicKZGF0ZVN0YXJ0OiAnMjAyMC0wMS0wMVQwMDowMDowMC4wMDBaJwpmZWF0dXJlczogbnVsbAppZDogc3RhcnRlci00ZjE4NDJjMC0wNzQ1LTQxYTUtYWFhNy1hMDFkNzQ4YjFjMzAKcHJvZHVjdDogSzEwCnJlc3RyaWN0aW9uczoKICBub2RlczogJzEwJwpzZXJ2aWNlQWNjb3VudEtleTogbnVsbAp2ZXJzaW9uOiB2MS4wLjAKc2lnbmF0dXJlOiBqT1N5NDNQZG5ZMFVCZitValhOdU1oUEFSb1J2ZkpzWElQWnhBWFNCaGpKbUwxNlNodi8vVzgyV2NMeGZJM25NZTA0TThtRU03eThPcnArQks1ekxpeFd3clpncmZSbTBEaWlELyttRjR5U3l1Rko0QW1neHV6NDhQTmdnU1VyWUM3S1FVcFYxSEJZV1ZaNm9udEJDeE1rVWtkaDVqdzZJdWMzN3lDaktIYy92bWZaenBzTVhybmxUdGhha2RjVVk0azNyVHJDa3VDcnFUMkpjM1o1amFGalZSZW1Zd1NBVXpkRldNazdQdkp3eHVFdE5rNitPV0pCVERQbnNYdldKdjdNc3NneDBJTmdtNUlJWDRVeEVhQWI4QXpTNkMyQ21XQzlhWURFTDg1aEFpeWhONXUwU0tQczA3ZXB0R1VHYmc3cWtPUVN0d0NhcDFKUURvbDVDT0E9PQo= diff --git a/charts/k10/k10/4.5.900/questions.yaml b/charts/k10/k10/4.5.900/questions.yaml new file mode 100644 index 000000000..713fcb116 --- /dev/null +++ b/charts/k10/k10/4.5.900/questions.yaml @@ -0,0 +1,295 @@ +questions: +# ======================== +# SECRETS And Configuration +# ======================== + +### AWS Configuration + +- variable: secrets.awsAccessKeyId + description: "AWS access key ID (required for AWS deployment)" + type: password + label: AWS Access Key ID + required: false + group: "AWS Configuration" + +- variable: secrets.awsSecretAccessKey + description: "AWS access key secret (required for AWS deployment)" + type: password + label: AWS Secret Access Key + required: false + group: "AWS Configuration" + +- variable: secrets.awsIamRole + description: "ARN of the AWS IAM role assumed by K10 to perform any AWS operation." + type: string + label: ARN of the AWS IAM role + required: false + group: "AWS Configuration" + +- variable: awsConfig.assumeRoleDuration + description: "Duration of a session token generated by AWS for an IAM role" + type: string + label: Role Duration + required: false + default: "" + group: "AWS Configuration" + +- variable: awsConfig.efsBackupVaultName + description: "Specifies the AWS EFS backup vault name" + type: string + label: EFS Backup Vault Name + required: false + default: "k10vault" + group: "AWS Configuration" + +### Google Cloud Configuration + +- variable: secrets.googleApiKey + description: "Required If cluster is deployed on Google Cloud" + type: multiline + label: Non-default base64 encoded GCP Service Account key file + required: false + group: "GoogleApi Configuration" + +### Azure Configuration + +- variable: secrets.azureTenantId + description: "Azure tenant ID (required for Azure deployment)" + type: string + label: Tenant ID + required: false + group: "Azure Configuration" + +- variable: secrets.azureClientId + description: "Azure Service App ID" + type: password + label: Service App ID + required: false + group: "Azure Configuration" + +- variable: secrets.azureClientSecret + description: "Azure Service App secret" + type: password + label: Service App secret + required: false + group: "Azure Configuration" + +- variable: secrets.azureResourceGroup + description: "Resource Group name that was created for the Kubernetes cluster" + type: string + label: Resource Group + required: false + group: "Azure Configuration" + +- variable: secrets.azureSubscriptionID + description: "Subscription ID in your Azure tenant" + type: string + label: Subscription ID + required: false + group: "Azure Configuration" + +- variable: secrets.azureResourceMgrEndpoint + description: "Resource management endpoint for the Azure Stack instance" + type: string + label: Resource management endpoint + required: false + group: "Azure Configuration" + +- variable: secrets.azureADEndpoint + description: "Azure Active Directory login endpoint" + type: string + label: Active Directory login endpoint + required: false + group: "Azure Configuration" + +- variable: secrets.azureADResourceID + description: "Azure Active Directory resource ID to obtain AD tokens" + type: string + label: Active Directory resource ID + required: false + group: "Azure Configuration" + +# ======================== +# Authentication +# ======================== + +- variable: auth.basicAuth.enabled + description: "Configures basic authentication for the K10 dashboard" + type: boolean + label: Enable Basic Authentication + required: false + group: "Authentication" + show_subquestion_if: true + subquestions: + - variable: auth.basicAuth.htpasswd + description: "A username and password pair separated by a colon character" + type: password + label: Authentication Details (htpasswd) + - variable: auth.basicAuth.secretName + description: "Name of an existing Secret that contains a file generated with htpasswd" + type: string + label: Secret Name + +- variable: auth.tokenAuth.enabled + description: "Configures token based authentication for the K10 dashboard" + type: boolean + label: Enable Token Based Authentication + required: false + group: "Authentication" + +- variable: auth.oidcAuth.enabled + description: "Configures Open ID Connect based authentication for the K10 dashboard" + type: boolean + label: Enable OpenID Connect Based Authentication + required: false + group: "Authentication" + show_subquestion_if: true + subquestions: + - variable: auth.oidcAuth.providerURL + description: "URL for the OIDC Provider" + type: string + label: OIDC Provider URL + - variable: auth.oidcAuth.redirectURL + description: "URL for the K10 gateway Provider" + type: string + label: OIDC Redirect URL + - variable: auth.oidcAuth.scopes + description: "Space separated OIDC scopes required for userinfo. Example: `profile email`" + type: string + label: OIDC scopes + - variable: auth.oidcAuth.prompt + description: "The type of prompt to be used during authentication (none, consent, login, or select_account)" + type: enum + options: + - none + - consent + - login + - select_account + default: none + label: The type of prompt to be used during authentication (none, consent, login, or select_account) + - variable: auth.oidcAuth.clientID + description: "Client ID given by the OIDC provider for K10" + type: password + label: OIDC Client ID + - variable: auth.oidcAuth.clientSecret + description: "Client secret given by the OIDC provider for K10" + type: password + label: OIDC Client Secret + - variable: auth.oidcAuth.usernameClaim + description: "The claim to be used as the username" + type: string + label: OIDC UserName Claim + - variable: auth.oidcAuth.usernamePrefix + description: "Prefix that has to be used with the username obtained from the username claim" + type: string + label: OIDC UserName Prefix + - variable: auth.oidcAuth.groupClaim + description: "Name of a custom OpenID Connect claim for specifying user groups" + type: string + label: OIDC group Claim + - variable: auth.oidcAuth.groupPrefix + description: "All groups will be prefixed with this value to prevent conflicts" + type: string + label: OIDC group Prefix + +# ======================== +# External Gateway +# ======================== + +- variable: externalGateway.create + description: "Configures an external gateway for K10 API services" + type: boolean + label: Create External Gateway + required: false + group: "External Gateway" + show_subquestion_if: true + subquestions: + - variable: externalGateway.annotations + description: "Standard annotations for the services" + type: multiline + default: "" + label: Annotation + - variable: externalGateway.fqdn.name + description: "Domain name for the K10 API services" + type: string + label: Domain Name + - variable: externalGateway.fqdn.type + description: "Supported gateway type: `route53-mapper` or `external-dns`" + type: string + label: Gateway Type route53-mapper or external-dns + - variable: externalGateway.awsSSLCertARN + description: "ARN for the AWS ACM SSL certificate used in the K10 API server" + type: multiline + label: ARN for the AWS ACM SSL certificate + +# ======================== +# Storage Management +# ======================== + +- variable: global.persistence.storageClass + label: StorageClass Name + description: "Specifies StorageClass Name to be used for PVCs" + type: string + required: false + default: "" + group: "Storage Management" + +- variable: prometheus.server.persistentVolume.storageClass + type: string + label: StorageClass Name for Prometheus PVC + description: "StorageClassName used to create Prometheus PVC. Setting this option overwrites global StorageClass value" + default: "" + required: false + group: "Storage Management" + +- variable: prometheus.server.persistentVolume.enabled + type: boolean + label: Enable PVC for Prometheus server + description: "If true, K10 Prometheus server will create a Persistent Volume Claim" + default: true + required: false + group: "Storage Management" + +- variable: global.persistence.enabled + type: boolean + label: Storage Enabled + description: "If true, K10 will use Persistent Volume Claim" + default: true + required: false + group: "Storage Management" + +# ======================== +# Service Account +# ======================== + +- variable: serviceAccount.name + description: "Name of a service account in the target namespace that has cluster-admin permissions. This is needed for the K10 to be able to protect cluster resources." + type: string + label: Service Account Name + required: false + group: "Service Account" + +# ======================== +# License +# ======================== + +- variable: license + description: "License string obtained from Kasten" + type: multiline + label: License String + group: "License" +- variable: eula.accept + description: "Whether to enable accept EULA before installation" + type: boolean + label: Enable accept EULA before installation + group: "License" + show_subquestion_if: true + subquestions: + - variable: eula.company + description: "Company name. Required field if EULA is accepted" + type: string + label: Company Name + - variable: eula.email + description: "Contact email. Required field if EULA is accepted" + type: string + label: Contact Email diff --git a/charts/k10/k10/4.5.900/templates/NOTES.txt b/charts/k10/k10/4.5.900/templates/NOTES.txt new file mode 100644 index 000000000..240f3062d --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/NOTES.txt @@ -0,0 +1,47 @@ +Thank you for installing Kasten’s K10 Data Management Platform! + +Documentation can be found at https://docs.kasten.io/. + +How to access the K10 Dashboard: + +{{ if .Values.ingress.create }} +You are using the system's default ingress controller. Please ask your +administrator for instructions on how to access the cluster. + +WebUI location: https://{{ default "Your ingress endpoint" .Values.ingress.host }}/{{ default .Release.Name .Values.ingress.urlPath }} +{{ end }} + +The K10 dashboard is not exposed externally. To establish a connection to it use the following `kubectl` command: + +`kubectl --namespace {{ .Release.Namespace }} port-forward service/gateway 8080:{{ .Values.service.externalPort }}` + +The Kasten dashboard will be available at: `http{{ if or (and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey) .Values.externalGateway.awsSSLCertARN }}s{{ end }}://127.0.0.1:8080/{{ .Release.Name }}/#/` + +{{ if.Values.externalGateway.create }} +{{ if .Values.externalGateway.fqdn.name }} + +The K10 Dashboard is accessible via {{ if or (and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey) .Values.externalGateway.awsSSLCertARN }}https{{ else }}http{{ end }}://{{ .Values.externalGateway.fqdn.name }}/{{ .Release.Name }}/#/ + +{{ else }} + +The K10 Dashboard is accessible via a LoadBalancer. Find the service's EXTERNAL IP using: + `kubectl get svc gateway-ext --namespace {{ .Release.Namespace }} -o wide` +And use it in following URL + `http://SERVICE_EXTERNAL_IP/{{ .Release.Name }}/#/` +{{ end }} +{{ end }} + +{{ if and ( .Values.metering.awsManagedLicense ) ( not .Values.metering.licenseConfigSecretName ) }} + +IAM Role created during installation need to have permissions that allow K10 to +perform operations on EBS and, if needed, EFS and S3. Please create a policy +with required permissions, and use the commands below to attach the policy to +the service account. + +`ROLE_NAME=$(kubectl get serviceaccount {{ .Values.serviceAccount.name }} -n {{ .Release.Namespace }} -ojsonpath="{.metadata.annotations['eks\.amazonaws\.com/role-arn']}" | awk -F '/' '{ print $(NF) }')` +`aws iam attach-role-policy --role-name "${ROLE_NAME}" --policy-arn ` + +Refer to `https://docs.kasten.io/latest/install/aws-containers-anywhere/aws-containers-anywhere.html#attaching-permissions-for-eks-installations` +for more information. + +{{ end }} \ No newline at end of file diff --git a/charts/k10/k10/4.5.900/templates/_definitions.tpl b/charts/k10/k10/4.5.900/templates/_definitions.tpl new file mode 100644 index 000000000..9c91d0bdf --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/_definitions.tpl @@ -0,0 +1,184 @@ +{{/* Autogenerated, do NOT modify */}} +{{- define "k10.additionalServices" -}}frontend kanister {{- end -}} +{{- define "k10.restServices" -}}admin auth bloblifecyclemanager catalog crypto dashboardbff events executor jobs logging metering state vbrintegrationapi {{- end -}} +{{- define "k10.services" -}}aggregatedapis config {{- end -}} +{{- define "k10.exposedServices" -}}auth dashboardbff vbrintegrationapi {{- end -}} +{{- define "k10.statelessServices" -}}admin aggregatedapis auth bloblifecyclemanager crypto dashboardbff events executor state vbrintegrationapi {{- end -}} +{{- define "k10.colocatedServices" -}}admin: + isExposed: false + port: 8001 + primary: state +bloblifecyclemanager: + isExposed: true + port: 8001 + primary: crypto +events: + isExposed: true + port: 8002 + primary: crypto +vbrintegrationapi: + isExposed: true + port: 8001 + primary: dashboardbff +{{- end -}} +{{- define "k10.colocatedServiceLookup" -}}crypto: +- bloblifecyclemanager +- events +dashboardbff: +- vbrintegrationapi +state: +- admin +{{- end -}} +{{- define "k10.aggregatedAPIs" -}}actions apps vault {{- end -}} +{{- define "k10.configAPIs" -}}config{{- end -}} +{{- define "k10.profiles" -}}profiles{{- end -}} +{{- define "k10.policies" -}}policies{{- end -}} +{{- define "k10.reportingAPIs" -}}reporting{{- end -}} +{{- define "k10.distAPIs" -}}dist{{- end -}} +{{- define "k10.actionsAPIs" -}}actions{{- end -}} +{{- define "k10.backupActions" -}}backupactions{{- end -}} +{{- define "k10.backupActionsDetails" -}}backupactions/details{{- end -}} +{{- define "k10.reportActions" -}}reportactions{{- end -}} +{{- define "k10.reportActionsDetails" -}}reportactions/details{{- end -}} +{{- define "k10.restoreActions" -}}restoreactions{{- end -}} +{{- define "k10.restoreActionsDetails" -}}restoreactions/details{{- end -}} +{{- define "k10.importActions" -}}importactions{{- end -}} +{{- define "k10.exportActions" -}}exportactions{{- end -}} +{{- define "k10.exportActionsDetails" -}}exportactions/details{{- end -}} +{{- define "k10.retireActions" -}}retireactions{{- end -}} +{{- define "k10.runActions" -}}runactions{{- end -}} +{{- define "k10.backupClusterActions" -}}backupclusteractions{{- end -}} +{{- define "k10.backupClusterActionsDetails" -}}backupclusteractions/details{{- end -}} +{{- define "k10.restoreClusterActions" -}}restoreclusteractions{{- end -}} +{{- define "k10.restoreClusterActionsDetails" -}}restoreclusteractions/details{{- end -}} +{{- define "k10.cancelActions" -}}cancelactions{{- end -}} +{{- define "k10.appsAPIs" -}}apps{{- end -}} +{{- define "k10.restorePoints" -}}restorepoints{{- end -}} +{{- define "k10.restorePointsDetails" -}}restorepoints/details{{- end -}} +{{- define "k10.clusterRestorePoints" -}}clusterrestorepoints{{- end -}} +{{- define "k10.clusterRestorePointsDetails" -}}clusterrestorepoints/details{{- end -}} +{{- define "k10.applications" -}}applications{{- end -}} +{{- define "k10.applicationsDetails" -}}applications/details{{- end -}} +{{- define "k10.vaultAPIs" -}}vault{{- end -}} +{{- define "k10.passkey" -}}passkeys{{- end -}} +{{- define "k10.authAPIs" -}}auth{{- end -}} +{{- define "k10.defaultConcurrentSnapshotConversions" -}}3{{- end -}} +{{- define "k10.defaultConcurrentWorkloadSnapshots" -}}5{{- end -}} +{{- define "k10.defaultK10DataStoreParallelUpload" -}}8{{- end -}} +{{- define "k10.defaultK10DataStoreGeneralContentCacheSizeMB" -}}0{{- end -}} +{{- define "k10.defaultK10DataStoreGeneralMetadataCacheSizeMB" -}}500{{- end -}} +{{- define "k10.defaultK10DataStoreRestoreContentCacheSizeMB" -}}500{{- end -}} +{{- define "k10.defaultK10DataStoreRestoreMetadataCacheSizeMB" -}}500{{- end -}} +{{- define "k10.defaultK10BackupBufferFileHeadroomFactor" -}}1.1{{- end -}} +{{- define "k10.defaultK10LimiterGenericVolumeSnapshots" -}}10{{- end -}} +{{- define "k10.defaultK10LimiterGenericVolumeCopies" -}}10{{- end -}} +{{- define "k10.defaultK10LimiterGenericVolumeRestores" -}}10{{- end -}} +{{- define "k10.defaultK10LimiterCsiSnapshots" -}}10{{- end -}} +{{- define "k10.defaultK10LimiterProviderSnapshots" -}}10{{- end -}} +{{- define "k10.defaultAssumeRoleDuration" -}}60m{{- end -}} +{{- define "k10.defaultKanisterBackupTimeout" -}}45{{- end -}} +{{- define "k10.defaultKanisterRestoreTimeout" -}}600{{- end -}} +{{- define "k10.defaultKanisterDeleteTimeout" -}}45{{- end -}} +{{- define "k10.defaultKanisterHookTimeout" -}}20{{- end -}} +{{- define "k10.defaultKanisterCheckRepoTimeout" -}}20{{- end -}} +{{- define "k10.defaultKanisterStatsTimeout" -}}20{{- end -}} +{{- define "k10.defaultKanisterEFSPostRestoreTimeout" -}}45{{- end -}} +{{- define "k10.cloudProviders" -}} aws google azure {{- end -}} +{{- define "k10.serviceResources" -}} +admin-svc: + admin-svc: + requests: + cpu: 2m + memory: 160Mi +aggregatedapis-svc: + aggregatedapis-svc: + requests: + cpu: 90m + memory: 180Mi +auth-svc: + auth-svc: + requests: + cpu: 2m + memory: 30Mi +bloblifecyclemanager-svc: + bloblifecyclemanager-svc: + requests: + cpu: 10m + memory: 40Mi +catalog-svc: + catalog-svc: + requests: + cpu: 200m + memory: 780Mi + kanister-sidecar: + limits: + cpu: 1200m + memory: 800Mi + requests: + cpu: 100m + memory: 800Mi +config-svc: + config-svc: + requests: + cpu: 5m + memory: 30Mi +crypto-svc: + crypto-svc: + requests: + cpu: 1m + memory: 30Mi +dashboardbff-svc: + dashboardbff-svc: + requests: + cpu: 8m + memory: 40Mi +events-svc: + events-svc: + requests: + cpu: 3m + memory: 500Mi +executor-svc: + executor-svc: + requests: + cpu: 3m + memory: 50Mi + tools: + requests: + cpu: 1m + memory: 2Mi +frontend-svc: + frontend-svc: + requests: + cpu: 1m + memory: 40Mi +jobs-svc: + jobs-svc: + requests: + cpu: 30m + memory: 380Mi +kanister-svc: + kanister-svc: + requests: + cpu: 1m + memory: 30Mi +logging-svc: + logging-svc: + requests: + cpu: 2m + memory: 40Mi +metering-svc: + metering-svc: + requests: + cpu: 2m + memory: 30Mi +state-svc: + state-svc: + requests: + cpu: 2m + memory: 30Mi +{{- end -}} +{{- define "k10.multiClusterVersion" -}}2{{- end -}} +{{- define "k10.ambassadorImageTag" -}}1.14.1{{- end -}} +{{- define "k10.kanisterToolsImageTag" -}}0.73.0{{- end -}} +{{- define "k10.dexImageTag" -}}v2.24.0{{- end -}} +{{- define "k10.rhAmbassadorImageTag" -}}1.13.8{{- end -}} diff --git a/charts/k10/k10/4.5.900/templates/_helpers.tpl b/charts/k10/k10/4.5.900/templates/_helpers.tpl new file mode 100644 index 000000000..9e6d189e6 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/_helpers.tpl @@ -0,0 +1,645 @@ +{{/* Check if basic auth is needed */}} +{{- define "basicauth.check" -}} + {{- if .Values.auth.basicAuth.enabled }} + {{- print true }} + {{- end -}} {{/* End of check for auth.basicAuth.enabled */}} +{{- end -}} + +{{/* +Check if trusted root CA certificate related configmap settings +have been configured +*/}} +{{- define "check.cacertconfigmap" -}} +{{- if .Values.cacertconfigmap.name -}} +{{- print true -}} +{{- else -}} +{{- print false -}} +{{- end -}} +{{- end -}} + +{{/* +Check if the auth options are implemented using Dex +*/}} +{{- define "check.dexAuth" -}} +{{- if or .Values.auth.openshift.enabled .Values.auth.ldap.enabled -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* Check the only 1 auth is specified */}} +{{- define "singleAuth.check" -}} +{{- $count := dict "count" (int 0) -}} +{{- $authList := list .Values.auth.basicAuth.enabled .Values.auth.tokenAuth.enabled .Values.auth.oidcAuth.enabled .Values.auth.openshift.enabled .Values.auth.ldap.enabled -}} +{{- range $i, $val := $authList }} +{{ if $val }} +{{ $c := add1 $count.count | set $count "count" }} +{{ if gt $count.count 1 }} +{{- fail "Multiple auth types were selected. Only one type can be enabled." }} +{{ end }} +{{ end }} +{{- end }} +{{- end -}}{{/* Check the only 1 auth is specified */}} + +{{/* Check if Auth is enabled */}} +{{- define "authEnabled.check" -}} +{{- $count := dict "count" (int 0) -}} +{{- $authList := list .Values.auth.basicAuth.enabled .Values.auth.tokenAuth.enabled .Values.auth.oidcAuth.enabled .Values.auth.openshift.enabled .Values.auth.ldap.enabled -}} +{{- range $i, $val := $authList }} +{{ if $val }} +{{ $c := add1 $count.count | set $count "count" }} +{{ end }} +{{- end }} +{{- if eq $count.count 0}} + {{- fail "Auth is required to expose access to K10." }} +{{- end }} +{{- end -}}{{/*end of check */}} + +{{/* Return ingress class name annotation */}} +{{- define "ingressClassAnnotation" -}} +{{- if .Values.ingress.class -}} +kubernetes.io/ingress.class: {{ .Values.ingress.class | quote }} +{{- end -}} +{{- end -}} + +{{/* Helm required labels */}} +{{- define "helm.labels" -}} +heritage: {{ .Release.Service }} +helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +app.kubernetes.io/name: {{ .Chart.Name }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{ include "k10.common.matchLabels" . }} +{{- end -}} + +{{- define "k10.common.matchLabels" -}} +app: {{ .Chart.Name }} +release: {{ .Release.Name }} +{{- end -}} + +{{/* Expand the name of the chart. */}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "serviceAccountName" -}} +{{- if and .Values.metering.awsMarketplace ( not .Values.serviceAccount.name ) -}} + {{ print "k10-metering" }} +{{- else if .Values.serviceAccount.create -}} + {{ default (include "fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the metering service account to use +*/}} +{{- define "meteringServiceAccountName" -}} +{{- if and .Values.metering.awsManagedLicense ( not .Values.serviceAccount.name ) ( not .Values.metering.serviceAccount.name ) ( not .Values.metering.licenseConfigSecretName ) -}} + {{ print "k10-metering" }} +{{- else -}} + {{ default (include "serviceAccountName" .) .Values.metering.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Prints annotations based on .Values.fqdn.type +*/}} +{{- define "dnsAnnotations" -}} +{{- if .Values.externalGateway.fqdn.name -}} +{{- if eq "route53-mapper" ( default "" .Values.externalGateway.fqdn.type) }} +domainName: {{ .Values.externalGateway.fqdn.name | quote }} +{{- end }} +{{- if eq "external-dns" (default "" .Values.externalGateway.fqdn.type) }} +external-dns.alpha.kubernetes.io/hostname: {{ .Values.externalGateway.fqdn.name | quote }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Prometheus scrape config template for k10 services +*/}} +{{- define "k10.prometheusScrape" -}} +{{- $admin_port := default 8877 .main.Values.service.gatewayAdminPort -}} +- job_name: {{ .k10service }} + metrics_path: /metrics + {{- if eq "aggregatedapis" .k10service }} + scheme: https + tls_config: + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- else }} + scheme: http + {{- end }} + static_configs: + - targets: + {{- if eq "gateway" .k10service }} + - {{ .k10service }}-admin.{{ .main.Release.Namespace }}.svc.{{ .main.Values.cluster.domainName }}:{{ $admin_port }} + {{- else if eq "aggregatedapis" .k10service }} + - {{ .k10service }}-svc.{{ .main.Release.Namespace }}.svc.{{ .main.Values.cluster.domainName }}:443 + {{- else }} + {{- $service := default .k10service (index (include "k10.colocatedServices" . | fromYaml) .k10service).primary }} + {{- $port := default .main.Values.service.externalPort (index (include "k10.colocatedServices" . | fromYaml) .k10service).port }} + - {{ $service }}-svc.{{ .main.Release.Namespace }}.svc.{{ .main.Values.cluster.domainName }}:{{ $port }} + {{- end }} + labels: + application: {{ .main.Release.Name }} + service: {{ .k10service }} +{{- end -}} + +{{/* +Expands the name of the Prometheus chart. It is equivalent to what the +"prometheus.name" template does. It is needed because the referenced values in a +template are relative to where/when the template is called from, and not where +the template is defined at. This means that the value of .Chart.Name and +.Values.nameOverride are different depending on whether the template is called +from within the Prometheus chart or the K10 chart. +*/}} +{{- define "k10.prometheus.name" -}} +{{- default "prometheus" .Values.prometheus.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expands the name of the Prometheus service created to expose the prometheus server. +*/}} +{{- define "k10.prometheus.service.name" -}} +{{- default (printf "%s-%s-%s" .Release.Name "prometheus" .Values.prometheus.server.name) .Values.prometheus.server.fullnameOverride }} +{{- end -}} + +{{/* +Checks if EULA is accepted via cmd +Enforces eula.company and eula.email as required fields +returns configMap fields +*/}} +{{- define "k10.eula.fields" -}} +{{- if .Values.eula.accept -}} +accepted: "true" +company: {{ required "eula.company is required field if eula is accepted" .Values.eula.company }} +email: {{ required "eula.email is required field if eula is accepted" .Values.eula.email }} +{{- else -}} +accepted: "" +company: "" +email: "" +{{- end }} +{{- end -}} + +{{/* +Helper to determine the API Domain +*/}} +{{- define "apiDomain" -}} +{{- if .Values.useNamespacedAPI -}} +kio.{{- replace "-" "." .Release.Namespace -}} +{{- else -}} +kio.kasten.io +{{- end -}} +{{- end -}} + +{{/* +Get dex image, if user wants to +install certified version of upstream +images or not +*/}} +{{- define "k10.dexImage" -}} +{{- if not .Values.rhMarketPlace }} +{{- printf "%s:%s" ( include "k10.dexImageRepo" . ) (include "k10.dexTag" .) }} +{{- else }} +{{- printf "%s" (get .Values.images "dex") }} +{{- end -}} +{{- end -}} + +{{/* +Get dex image repo based on conditions +if its airgapped and red hat images are +required +*/}} +{{- define "k10.dexImageRepo" -}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/dex" .Values.global.airgapped.repository }} +{{- else }} +{{- printf "%s/%s/dex" .Values.image.registry .Values.image.repository }} +{{- end}} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/dex" .Values.global.airgapped.repository }} +{{- else }} +{{- printf "%s/%s/%s" .Values.dexImage.registry .Values.dexImage.repository .Values.dexImage.image }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +Get dex image tag based on conditions +if its airgapped and red hat images are +required +*/}} +{{- define "k10.dexTag" -}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s-rh-ubi" (include "k10.dexImageTag" .) }} +{{- else }} +{{- printf "%s-rh-ubi" (include "k10.dexImageTag" .) }} +{{- end}} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.dexImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.dexImageTag" .) }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +Get ambassador image base on whether +we or not we are installing k10 on openshift +*/}} +{{- define "k10.ambImage" -}} +{{- if not .Values.global.rhMarketPlace }} +{{- printf "%s:%s" ( include "k10.ambImageRepo" .) (include "k10.ambImageTag" .) }} +{{- else }} +{{- printf "%s" (get .Values.global.images "ambassador") }} +{{- end -}} +{{- end -}} + +{{- define "k10.ambImageRepo" -}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/ambassador" .Values.global.airgapped.repository }} +{{- else }} +{{- printf "%s/%s/ambassador" .Values.image.registry .Values.image.repository }} +{{- end }} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/ambassador" .Values.global.airgapped.repository }} +{{- else }} +{{- printf "%s/%s/%s" .Values.ambassadorImage.registry .Values.ambassadorImage.repository .Values.ambassadorImage.image }} +{{- end }} +{{- end }} +{{- end -}} + +{{- define "k10.ambImageTag" -}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s-rh-ubi" (include "k10.rhAmbassadorImageTag" .) }} +{{- else }} +{{- printf "%s-rh-ubi" (include "k10.rhAmbassadorImageTag" .) }} +{{- end }} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.ambassadorImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.ambassadorImageTag" .) }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +Check if AWS creds are specified +*/}} +{{- define "check.awscreds" -}} +{{- if or .Values.secrets.awsAccessKeyId .Values.secrets.awsSecretAccessKey -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if kanister-tools image has k10- in name +this means we need to overwrite kanister image in the system +*/}} +{{- define "overwite.kanisterToolsImage" -}} +{{- if .Values.global.airgapped.repository -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Figure out the kanisterToolsImage.image based on +the value of airgapped.repository value +The details on how these image are being generated +is in below issue +https://kasten.atlassian.net/browse/K10-4036 +Using substr to remove repo from kanisterToolsImage +*/}} +{{- define "get.kanisterToolsImage" }} +{{- if not .Values.global.rhMarketPlace }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/%s:k10-%s" (.Values.global.airgapped.repository) (.Values.kanisterToolsImage.image) (include "k10.kanisterToolsImageTag" .) -}} +{{- else }} +{{- printf "%s/%s/%s:%s" (.Values.kanisterToolsImage.registry) (.Values.kanisterToolsImage.repository) (.Values.kanisterToolsImage.image) (include "k10.kanisterToolsImageTag" .) -}} +{{- end }} +{{- else }} +{{- printf "%s" (get .Values.global.images "kanister-tools") -}} +{{- end }} +{{- end }} + +{{/* +Check if Google creds are specified +*/}} +{{- define "check.googlecreds" -}} +{{- if .Values.secrets.googleApiKey -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if IBM SL api key is specified +*/}} +{{- define "check.ibmslcreds" -}} +{{- if or .Values.secrets.ibmSoftLayerApiKey .Values.secrets.ibmSoftLayerApiUsername -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if Azure creds are specified +*/}} +{{- define "check.azurecreds" -}} +{{- if or (or .Values.secrets.azureTenantId .Values.secrets.azureClientId) .Values.secrets.azureClientSecret -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if Vsphere creds are specified +*/}} +{{- define "check.vspherecreds" -}} +{{- if or (or .Values.secrets.vsphereEndpoint .Values.secrets.vsphereUsername) .Values.secrets.vspherePassword -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if Vault creds are specified +*/}} +{{- define "check.vaultcreds" -}} +{{- if .Values.vault.secretName -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Checks and enforces only 1 set of cloud creds is specified +*/}} +{{- define "enforce.singlecloudcreds" -}} +{{- $count := dict "count" (int 0) -}} +{{- $main := . -}} +{{- range $ind, $cloud_provider := include "k10.cloudProviders" . | splitList " " }} +{{ if eq (include (printf "check.%screds" $cloud_provider) $main) "true" }} +{{ $c := add1 $count.count | set $count "count" }} +{{ if gt $count.count 1 }} +{{- fail "Credentials for different cloud providers were provided but only one is allowed. Please verify your .secrets.* values." }} +{{ end }} +{{ end }} +{{- end }} +{{- end -}} + +{{/* +Converts .Values.features into k10-features: map[string]: "value" +*/}} +{{- define "k10.features" -}} +{{ range $n, $v := .Values.features }} +{{ $n }}: {{ $v | quote -}} +{{ end }} +{{- end -}} + +{{/* +Returns a license base64 either from file or from values +or prints it for awsmarketplace or awsManagedLicense +*/}} +{{- define "k10.getlicense" -}} +{{- if .Values.metering.awsMarketplace -}} +{{- print "Y3VzdG9tZXJOYW1lOiBhd3MtbWFya2V0cGxhY2UKZGF0ZUVuZDogJzIxMDAtMDEtMDFUMDA6MDA6MDAuMDAwWicKZGF0ZVN0YXJ0OiAnMjAxOC0wOC0wMVQwMDowMDowMC4wMDBaJwpmZWF0dXJlczoKICBjbG91ZE1ldGVyaW5nOiBhd3MKaWQ6IGF3cy1ta3QtNWMxMDlmZDUtYWI0Yy00YTE0LWJiY2QtNTg3MGU2Yzk0MzRiCnByb2R1Y3Q6IEsxMApyZXN0cmljdGlvbnM6IG51bGwKdmVyc2lvbjogdjEuMC4wCnNpZ25hdHVyZTogY3ZEdTNTWHljaTJoSmFpazR3THMwTk9mcTNFekYxQ1pqLzRJMUZVZlBXS0JETHpuZmh2eXFFOGUvMDZxNG9PNkRoVHFSQlY3VFNJMzVkQzJ4alllaGp3cWwxNHNKT3ZyVERKZXNFWVdyMVFxZGVGVjVDd21HczhHR0VzNGNTVk5JQXVseGNTUG9oZ2x2UlRJRm0wVWpUOEtKTzlSTHVyUGxyRjlGMnpnK0RvM2UyTmVnamZ6eTVuMUZtd24xWUNlbUd4anhFaks0djB3L2lqSGlwTGQzWVBVZUh5Vm9mZHRodGV0YmhSUGJBVnVTalkrQllnRklnSW9wUlhpYnpTaEMvbCs0eTFEYzcyTDZXNWM0eUxMWFB1SVFQU3FjUWRiYnlwQ1dYYjFOT3B3aWtKMkpsR0thMldScFE4ZUFJNU9WQktqZXpuZ3FPa0lRUC91RFBtSXFBPT0K" -}} +{{- else if or ( .Values.metering.awsManagedLicense ) ( .Values.metering.licenseConfigSecretName ) -}} +{{- print "Y3VzdG9tZXJOYW1lOiBhd3MtdG90ZW0KZGF0ZUVuZDogJzIxMDAtMDEtMDFUMDA6MDA6MDAuMDAwWicKZGF0ZVN0YXJ0OiAnMjAyMS0wOS0wMVQwMDowMDowMC4wMDBaJwpmZWF0dXJlczoKICBleHRlcm5hbExpY2Vuc2U6IGF3cwogIHByb2R1Y3RTS1U6IGI4YzgyMWQ5LWJmNDAtNDE4ZC1iYTBiLTgxMjBiZjc3ZThmOQogIGtleUZpbmdlcnByaW50OiBhd3M6Mjk0NDA2ODkxMzExOkFXUy9NYXJrZXRwbGFjZTppc3N1ZXItZmluZ2VycHJpbnQKaWQ6IGF3cy1leHQtMWUxMTVlZjMtM2YyMC00MTJlLTgzODItMmE1NWUxMTc1OTFlCnByb2R1Y3Q6IEsxMApyZXN0cmljdGlvbnM6CiAgbm9kZXM6ICczJwp2ZXJzaW9uOiB2MS4wLjAKc2lnbmF0dXJlOiBkeEtLN3pPUXdzZFBOY2I1NExzV2hvUXNWeWZSVDNHVHZ0VkRuR1Vvb2VxSGlwYStTY25HTjZSNmdmdmtWdTRQNHh4RmV1TFZQU3k2VnJYeExOTE1RZmh2NFpBSHVrYmFNd3E5UXhGNkpGSmVXbTdzQmdtTUVpWVJ2SnFZVFcyMlNoakZEU1RWejY5c2JBTXNFMUd0VTdXKytITGk0dnhybjVhYkd6RkRHZW5iRE5tcXJQT3dSa3JIdTlHTFQ1WmZTNDFUL0hBMjNZZnlsTU54MGFlK2t5TGZvZXNuK3FKQzdld2NPWjh4eE94bFRJR3RuWDZ4UU5DTk5iYjhSMm5XbmljNVd0OElEc2VDR3lLMEVVRW9YL09jNFhsWVVra3FGQ0xPdVhuWDMxeFZNZ1NFQnVEWExFd3Y3K2RlSmcvb0pMaW9EVHEvWUNuM0lnem9VR2NTMGc9PQo=" -}} +{{- else if .Values.metering.redhatMarketplacePayg -}} +{{- print "Y3VzdG9tZXJOYW1lOiByZWRoYXQtbWFya2V0cGxhY2UKZGF0ZUVuZDogJzIxMDAtMDEtMDFUMDA6MDA6MDAuMDAwWicKZGF0ZVN0YXJ0OiAnMjAyMi0wMS0wMVQwMDowMDowMC4wMDBaJwpmZWF0dXJlczoKICBjbG91ZE1ldGVyaW5nOiByZWRoYXQKaWQ6IHJoLW1rdC01YzEwOWZkNS1hYjRjLTRhMTQtYmJjZC01ODcwZTZjOTQzNGIKcHJvZHVjdDogSzEwCnJlc3RyaWN0aW9uczogbnVsbAp2ZXJzaW9uOiB2MS4wLjAKc2lnbmF0dXJlOiBxYlQxUElMd2MwSjFjVUhwcWlLY1IzN0NhTXRqcjcydkZveXZ3NjhXWEFYbnRYZENhQzlGSEVQc1hkRjZCRnllUXZ5M1l4NU9RVDVIbVlZei9GQTEveTUraHZ4TlgvcGpHYTR3aThzL1VVSW9XbW1TdDNXWUhJWVZNU2RmeEtyeGtiNTIzUk1ZbDArK3VHaGpoODRkdCtZZ1A4N2dyNmMrRHRrZ21EU3FlQk44L09obksxUHVCUTAzNDJCL1U1ZjBwcjArVnVBcHp6bmJBUnJzUW5WYkthUHd2UVRzMUNZUUR3bFZuM210KzBHbkdQdlNmblR4UEg4RzJTK3cyRWh0SkgxL1Vrd3o2VDd2L0dHMXlZNUU0UmluNWlCSmRmODlDcmIxSUZHK2x4SHhYQ1g1RENWZjlTSTdYanZ4dE9mdXoxYzhwK3k3MERZYm9sbFU5ZmF1dlE9PQo=" -}} +{{- else -}} +{{- print (default (.Files.Get "license") .Values.license) -}} +{{- end -}} +{{- end -}} + +{{/* +Returns resource usage given a pod name and container name +*/}} +{{- define "k10.resource.request" -}} +{{- $resourceDefaultList := (include "k10.serviceResources" .main | fromYaml) }} +{{- $podName := .k10_service_pod_name }} +{{- $containerName := .k10_service_container_name }} +{{- $resourceValue := "" }} +{{- if (hasKey $resourceDefaultList $podName) }} + {{- $resourceValue = index (index $resourceDefaultList $podName) $containerName }} +{{- end }} +{{- if (hasKey .main.Values.resources $podName) }} + {{- if (hasKey (index .main.Values.resources $podName) $containerName) }} + {{- $resourceValue = index (index .main.Values.resources $podName) $containerName }} + {{- end }} +{{- end }} +{{- /* If no resource usage value was provided, do not include the resources section */}} +{{- /* This allows users to set unlimited resources by providing a service key that is empty (e.g. `--set resources.=`) */}} +{{- if $resourceValue }} +resources: +{{- $resourceValue | toYaml | trim | nindent 2 }} +{{- else if eq .main.Release.Namespace "default" }} +resources: + requests: + cpu: "0.01" +{{- end }} +{{- end -}} + +{{- define "kanisterToolsResources" }} +{{- if .Values.genericVolumeSnapshot.resources.requests.memory }} +KanisterToolsMemoryRequests: {{ .Values.genericVolumeSnapshot.resources.requests.memory | quote }} +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.requests.cpu }} +KanisterToolsCPURequests: {{ .Values.genericVolumeSnapshot.resources.requests.cpu | quote }} +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.limits.memory }} +KanisterToolsMemoryLimits: {{ .Values.genericVolumeSnapshot.resources.limits.memory | quote }} +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.limits.cpu }} +KanisterToolsCPULimits: {{ .Values.genericVolumeSnapshot.resources.limits.cpu | quote }} +{{- end }} +{{- end }} + +{{- define "get.kanisterPodCustomLabels" -}} +{{- if .Values.kanisterPodCustomLabels }} +KanisterPodCustomLabels: {{ .Values.kanisterPodCustomLabels | quote }} +{{- end }} +{{- end }} + +{{- define "get.kanisterPodCustomAnnotations" -}} +{{- if .Values.kanisterPodCustomAnnotations }} +KanisterPodCustomAnnotations: {{ .Values.kanisterPodCustomAnnotations | quote }} +{{- end }} +{{- end }} + +{{/* +Lookup and return only enabled colocated services +*/}} +{{- define "get.enabledColocatedSvcList" -}} +{{- $enabledColocatedSvcList := dict }} +{{- $colocatedList := include "k10.colocatedServiceLookup" . | fromYaml }} +{{- range $primary, $secondaryList := $colocatedList }} + {{- $enabledSecondarySvcList := list }} + {{- range $skip, $secondary := $secondaryList }} + {{- if or (not (hasKey $.Values.optionalColocatedServices $secondary)) ((index $.Values.optionalColocatedServices $secondary).enabled) }} + {{- $enabledSecondarySvcList = append $enabledSecondarySvcList $secondary }} + {{- end }} + {{- end }} + {{- if gt (len $enabledSecondarySvcList) 0 }} + {{- $enabledColocatedSvcList = set $enabledColocatedSvcList $primary $enabledSecondarySvcList }} + {{- end }} +{{- end }} +{{- $enabledColocatedSvcList | toYaml | trim | nindent 0}} +{{- end -}} + +{{- define "get.serviceContainersInPod" -}} +{{- $podService := .k10_service_pod }} +{{- $colocatedList := include "k10.colocatedServices" . | fromYaml }} +{{- $colocatedLookupByPod := include "get.enabledColocatedSvcList" .main | fromYaml }} +{{- $containerList := list $podService }} +{{- if hasKey $colocatedLookupByPod $podService }} + {{- $containerList = concat $containerList (index $colocatedLookupByPod $podService)}} +{{- end }} +{{- $containerList | join " " }} +{{- end -}} + +{{- define "get.statefulRestServicesInPod" -}} +{{- $statefulRestSvcsInPod := list }} +{{- $podService := .k10_service_pod }} +{{- $containerList := (dict "main" .main "k10_service_pod" $podService | include "get.serviceContainersInPod" | splitList " ") }} +{{- if .main.Values.global.persistence.enabled }} + {{- range $skip, $containerInPod := $containerList }} + {{- $isRestService := has $containerInPod (include "k10.restServices" . | splitList " ") }} + {{- $isStatelessService := has $containerInPod (include "k10.statelessServices" . | splitList " ") }} + {{- if and $isRestService (not $isStatelessService) }} + {{- $statefulRestSvcsInPod = append $statefulRestSvcsInPod $containerInPod }} + {{- end }} + {{- end }} +{{- end }} +{{- $statefulRestSvcsInPod | join " " }} +{{- end -}} + +{{- define "k10.ingressPath" -}} + {{- if and .Values.global.ingress.create .Values.global.route.enabled -}} + {{ fail "Either enable ingress or route"}} + {{- end -}} + {{- if .Values.global.ingress.create -}} + {{ if .Values.global.ingress.urlPath }} + {{- print .Values.global.ingress.urlPath -}} + {{ else }} + {{- print .Release.Name -}} + {{- end -}} + {{- else if .Values.global.route.enabled -}} + {{ if .Values.global.route.path }} + {{- print .Values.global.route.path -}} + {{ else }} + {{- print .Release.Name -}} + {{- end -}} + {{ else }} + {{- print .Release.Name -}} + {{- end -}} +{{- end -}} + + +{{/* +Check if encryption keys are specified +*/}} +{{- define "check.primaryKey" -}} +{{- if (or .Values.encryption.primaryKey.awsCmkKeyId .Values.encryption.primaryKey.vaultTransitKeyName) -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{- define "check.validateMonitoringProperties" -}} +{{- include "check.monitoringPrefix" . -}} +{{- include "check.monitoringFullNameOverride" . -}} +{{- end -}} + +{{- define "check.monitoringPrefix" -}} +{{- if eq .Values.prometheus.server.enabled .Values.grafana.enabled -}} +{{- if not (eq .Values.prometheus.server.prefixURL .Values.grafana.prometheusPrefixURL) -}} +{{ fail "Prometheus and Grafana prefixURL should match. Please check values of prometheus.server.prefixURL and grafana.prometheusPrefixURL" }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "check.monitoringFullNameOverride" -}} +{{- if eq .Values.prometheus.server.enabled .Values.grafana.enabled -}} +{{- if not (eq .Values.prometheus.server.fullnameOverride .Values.grafana.prometheusName) -}} +{{ fail "The Prometheus name overrides must match. Please check values of prometheus.server.fullnameOverride and grafana.prometheusName" }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "check.validateImagePullSecrets" -}} + {{/* Validate image pull secrets if a custom Docker config is provided */}} + {{- if (or .Values.secrets.dockerConfig .Values.secrets.dockerConfigPath ) -}} + {{- if (and .Values.grafana.enabled (not .Values.global.imagePullSecret) (not .Values.grafana.image.pullSecrets)) -}} + {{ fail "A custom Docker config was provided, but Grafana is not configured to use it. Please check that global.imagePullSecret is set correctly." }} + {{- end -}} + {{- if (and .Values.prometheus.server.enabled (not .Values.global.imagePullSecret) (not .Values.prometheus.imagePullSecrets)) -}} + {{ fail "A custom Docker config was provided, but Prometheus is not configured to use it. Please check that global.imagePullSecret is set correctly." }} + {{- end -}} + {{- end -}} +{{- end -}} + +{{- define "k10.imagePullSecrets" }} +{{- $imagePullSecrets := list .Values.global.imagePullSecret }}{{/* May be empty, but the compact below will handle that */}} +{{- if (or .Values.secrets.dockerConfig .Values.secrets.dockerConfigPath) }} + {{- $imagePullSecrets = concat $imagePullSecrets (list "k10-ecr") }} +{{- end }} +{{- $imagePullSecrets = $imagePullSecrets | compact | uniq }} + +{{- if $imagePullSecrets }} +imagePullSecrets: + {{- range $imagePullSecrets }} + {{/* Check if the name is not empty string */}} + - name: {{ . }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Below helper template functions are referred from chart +https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus/templates/_helpers.tpl +*/}} + +{{/* +Return kubernetes version +*/}} +{{- define "k10.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "k10.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "extensions/v1beta1" -}} + {{- print "extensions/v1beta1" -}} + {{- else -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Is ingress part of stable APIVersion. +*/}} +{{- define "ingress.isStable" -}} + {{- eq (include "ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/templates/_k10_container.tpl b/charts/k10/k10/4.5.900/templates/_k10_container.tpl new file mode 100644 index 000000000..1fec05488 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/_k10_container.tpl @@ -0,0 +1,652 @@ +{{- define "k10-containers" }} +{{- $pod := .k10_pod }} +{{- with .main }} +{{- $main_context := . }} +{{- $colocatedList := include "k10.colocatedServices" . | fromYaml }} +{{- $containerList := (dict "main" $main_context "k10_service_pod" $pod | include "get.serviceContainersInPod" | splitList " ") }} + containers: +{{- range $skip, $container := $containerList }} + {{- $port := default $main_context.Values.service.externalPort (index $colocatedList $container).port }} + {{- $serviceStateful := has $container (dict "main" $main_context "k10_service_pod" $pod | include "get.statefulRestServicesInPod" | splitList " ") }} + {{- dict "main" $main_context "k10_pod" $pod "k10_container" $container "externalPort" $port "stateful" $serviceStateful | include "k10-container" }} +{{- end }} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-containers" */}} + +{{- define "k10-container" }} +{{- $pod := .k10_pod }} +{{- $service := .k10_container }} +{{- $externalPort := .externalPort }} +{{- with .main }} + - name: {{ $service }}-svc + {{- dict "main" . "k10_service" $service | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ .Values.image.pullPolicy }} +{{- if eq $service "aggregatedapis" }} + args: + - "--secure-port={{ .Values.service.aggregatedApiPort }}" + - "--cert-dir=/tmp/apiserver.local.config/certificates/" +{{- if .Values.useNamespacedAPI }} + - "--k10-api-domain={{ template "apiDomain" . }}" +{{- end }}{{/* .Values.useNamespacedAPI */}} +{{/* +We need this explicit conversion because installation using operator hub was failing +stating that types are not same for the equality check +*/}} +{{- else if not (eq (int .Values.service.externalPort) (int $externalPort) ) }} + args: + - "--port={{ $externalPort }}" + - "--host=0.0.0.0" +{{- end }}{{/* eq $service "aggregatedapis" */}} +{{- $podName := (printf "%s-svc" $service) }} +{{- $containerName := (printf "%s-svc" $service) }} +{{- dict "main" . "k10_service_pod_name" $podName "k10_service_container_name" $containerName | include "k10.resource.request" | indent 8}} + ports: +{{- if eq $service "aggregatedapis" }} + - containerPort: {{ .Values.service.aggregatedApiPort }} +{{- else }} + - containerPort: {{ $externalPort }} +{{- end }} +{{- if eq $service "logging" }} + - containerPort: 24224 + protocol: TCP + - containerPort: 24225 + protocol: TCP +{{- end }} + livenessProbe: +{{- if eq $service "aggregatedapis" }} + tcpSocket: + port: {{ .Values.service.aggregatedApiPort }} + timeoutSeconds: 5 +{{- else }} + httpGet: + path: /v0/healthz + port: {{ $externalPort }} + timeoutSeconds: 1 +{{- end }} + initialDelaySeconds: 300 +{{- if ne $service "aggregatedapis" }} + readinessProbe: + httpGet: + path: /v0/healthz + port: {{ $externalPort }} + initialDelaySeconds: 3 +{{- end }} + env: +{{- if eq (include "check.googlecreds" .) "true" }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/var/run/secrets/kasten.io/kasten-gke-sa.json" +{{- end }} +{{- if eq (include "check.ibmslcreds" .) "true" }} + - name: IBM_SL_API_KEY + valueFrom: + secretKeyRef: + name: ibmsl-secret + key: ibm_sl_key + - name: IBM_SL_API_USERNAME + valueFrom: + secretKeyRef: + name: ibmsl-secret + key: ibm_sl_username +{{- end }} +{{- if eq (include "check.azurecreds" .) "true" }} + - name: AZURE_TENANT_ID + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_tenant_id + - name: AZURE_CLIENT_ID + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_client_id + - name: AZURE_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_client_secret +{{- if .Values.secrets.azureResourceGroup }} + - name: AZURE_RESOURCE_GROUP + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_resource_group +{{- end }} +{{- if .Values.secrets.azureSubscriptionID }} + - name: AZURE_SUBSCRIPTION_ID + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_subscription_id +{{- end }} +{{- if .Values.secrets.azureResourceMgrEndpoint }} + - name: AZURE_RESOURCE_MANAGER_ENDPOINT + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_resource_manager_endpoint +{{- end }} +{{- if .Values.secrets.azureADEndpoint }} + - name: AZURE_AD_ENDPOINT + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_ad_endpoint +{{- end }} +{{- if .Values.secrets.azureADResourceID }} + - name: AZURE_AD_RESOURCE + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_ad_resource_id +{{- end }} +{{- end }} +{{- if eq (include "check.awscreds" .) "true" }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: aws-creds + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws-creds + key: aws_secret_access_key +{{- if .Values.secrets.awsIamRole }} + - name: K10_AWS_IAM_ROLE + valueFrom: + secretKeyRef: + name: aws-creds + key: role +{{- end }} +{{- end }} +{{- if eq (include "check.vaultcreds" .) "true" }} + - name: VAULT_ADDR + value: {{ .Values.vault.address }} + - name: VAULT_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.vault.secretName }} + key: vault_token +{{- end }} +{{- if eq (include "check.vspherecreds" .) "true" }} + - name: VSPHERE_ENDPOINT + valueFrom: + secretKeyRef: + name: vsphere-creds + key: vsphere_endpoint + - name: VSPHERE_USERNAME + valueFrom: + secretKeyRef: + name: vsphere-creds + key: vsphere_username + - name: VSPHERE_PASSWORD + valueFrom: + secretKeyRef: + name: vsphere-creds + key: vsphere_password +{{- end }} + - name: VERSION + valueFrom: + configMapKeyRef: + name: k10-config + key: version +{{- if .Values.clusterName }} + - name: CLUSTER_NAME + valueFrom: + configMapKeyRef: + name: k10-config + key: clustername +{{- end }} +{{- if eq $service "config" }} + - name: K10_STATEFUL + value: "{{ .Values.global.persistence.enabled }}" +{{- end }} + - name: MODEL_STORE_DIR +{{- if or (eq $service "state") (not .Values.global.persistence.enabled) }} + value: "/tmp/k10store" +{{- else }} + valueFrom: + configMapKeyRef: + name: k10-config + key: modelstoredirname +{{- end }} +{{- if or (eq $service "kanister") (eq $service "executor")}} + - name: DATA_MOVER_IMAGE + value: {{ default .Chart.AppVersion .Values.image.tag | print .Values.image.registry "/" .Values.image.repository "/datamover:" }} + - name: KANISTER_POD_READY_WAIT_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterPodReadyWaitTimeout +{{- end }} + - name: LOG_LEVEL + valueFrom: + configMapKeyRef: + name: k10-config + key: loglevel +{{- if .Values.kanisterPodCustomLabels }} + - name: KANISTER_POD_CUSTOM_LABELS + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterPodCustomLabels +{{- end }} +{{- if .Values.kanisterPodCustomAnnotations }} + - name: KANISTER_POD_CUSTOM_ANNOTATIONS + valueFrom: + configMapKeyRef: + name: k10-config + key: kanisterPodCustomAnnotations +{{- end }} + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONCURRENT_SNAP_CONVERSIONS + valueFrom: + configMapKeyRef: + name: k10-config + key: concurrentSnapConversions + - name: CONCURRENT_WORKLOAD_SNAPSHOTS + valueFrom: + configMapKeyRef: + name: k10-config + key: concurrentWorkloadSnapshots + - name: K10_DATA_STORE_PARALLEL_UPLOAD + valueFrom: + configMapKeyRef: + name: k10-config + key: k10DataStoreParallelUpload + - name: K10_DATA_STORE_GENERAL_CONTENT_CACHE_SIZE_MB + valueFrom: + configMapKeyRef: + name: k10-config + key: k10DataStoreGeneralContentCacheSizeMB + - name: K10_DATA_STORE_GENERAL_METADATA_CACHE_SIZE_MB + valueFrom: + configMapKeyRef: + name: k10-config + key: k10DataStoreGeneralMetadataCacheSizeMB + - name: K10_DATA_STORE_RESTORE_CONTENT_CACHE_SIZE_MB + valueFrom: + configMapKeyRef: + name: k10-config + key: k10DataStoreRestoreContentCacheSizeMB + - name: K10_DATA_STORE_RESTORE_METADATA_CACHE_SIZE_MB + valueFrom: + configMapKeyRef: + name: k10-config + key: k10DataStoreRestoreMetadataCacheSizeMB + - name: K10_LIMITER_GENERIC_VOLUME_SNAPSHOTS + valueFrom: + configMapKeyRef: + name: k10-config + key: K10LimiterGenericVolumeSnapshots + - name: K10_LIMITER_GENERIC_VOLUME_COPIES + valueFrom: + configMapKeyRef: + name: k10-config + key: K10LimiterGenericVolumeCopies + - name: K10_LIMITER_GENERIC_VOLUME_RESTORES + valueFrom: + configMapKeyRef: + name: k10-config + key: K10LimiterGenericVolumeRestores + - name: K10_LIMITER_CSI_SNAPSHOTS + valueFrom: + configMapKeyRef: + name: k10-config + key: K10LimiterCsiSnapshots + - name: K10_LIMITER_PROVIDER_SNAPSHOTS + valueFrom: + configMapKeyRef: + name: k10-config + key: K10LimiterProviderSnapshots + - name: AWS_ASSUME_ROLE_DURATION + valueFrom: + configMapKeyRef: + name: k10-config + key: AWSAssumeRoleDuration +{{- if (eq $service "executor") }} + - name: KANISTER_BACKUP_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterBackupTimeout + - name: KANISTER_RESTORE_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterRestoreTimeout + - name: KANISTER_DELETE_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterDeleteTimeout + - name: KANISTER_HOOK_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterHookTimeout + - name: KANISTER_CHECKREPO_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterCheckRepoTimeout + - name: KANISTER_STATS_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterStatsTimeout + - name: KANISTER_EFSPOSTRESTORE_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterEFSPostRestoreTimeout +{{- end }} +{{- if and (eq $service "executor") (.Values.awsConfig.efsBackupVaultName) }} + - name: EFS_BACKUP_VAULT_NAME + valueFrom: + configMapKeyRef: + name: k10-config + key: efsBackupVaultName +{{- end }} +{{- if and (eq $service "executor") (.Values.vmWare.taskTimeoutMin) }} + - name: VMWARE_GOM_TIMEOUT_MIN + valueFrom: + configMapKeyRef: + name: k10-config + key: vmWareTaskTimeoutMin +{{- end }} +{{- if .Values.useNamespacedAPI }} + - name: K10_API_DOMAIN + valueFrom: + configMapKeyRef: + name: k10-config + key: apiDomain +{{- end }} +{{- if .Values.jaeger.enabled }} + - name: JAEGER_AGENT_HOST + value: {{ .Values.jaeger.agentDNS }} +{{- end }} +{{- if .Values.auth.tokenAuth.enabled }} + - name: TOKEN_AUTH + valueFrom: + secretKeyRef: + name: k10-token-auth + key: auth +{{- end }} +{{- if eq "true" (include "overwite.kanisterToolsImage" .) }} + - name: KANISTER_TOOLS + valueFrom: + configMapKeyRef: + name: k10-config + key: overwriteKanisterTools +{{- end }} +{{- if eq (include "check.cacertconfigmap" .) "true" }} + - name: CACERT_CONFIGMAP_NAME + value: {{ .Values.cacertconfigmap.name }} +{{- end }} + - name: K10_RELEASE_NAME + value: {{ .Release.Name }} + - name: KANISTER_FUNCTION_VERSION + valueFrom: + configMapKeyRef: + name: k10-config + key: kanisterFunctionVersion +{{- if and (eq $service "config") (.Values.injectKanisterSidecar.enabled) }} + - name: K10_MUTATING_WEBHOOK_ENABLED + value: "true" + - name: K10_MUTATING_WEBHOOK_TLS_CERT_DIR + valueFrom: + configMapKeyRef: + name: k10-config + key: K10MutatingWebhookTLSCertDir + - name: K10_MUTATING_WEBHOOK_PORT + value: {{ .Values.injectKanisterSidecar.webhookServer.port | quote }} +{{- end }} +{{- if or (eq $service "config") (eq $service "kanister") }} +{{- if .Values.genericVolumeSnapshot.resources.requests.memory }} + - name: KANISTER_TOOLS_MEMORY_REQUESTS + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterToolsMemoryRequests +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.requests.cpu }} + - name: KANISTER_TOOLS_CPU_REQUESTS + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterToolsCPURequests +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.limits.memory }} + - name: KANISTER_TOOLS_MEMORY_LIMITS + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterToolsMemoryLimits +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.limits.cpu }} + - name: KANISTER_TOOLS_CPU_LIMITS + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterToolsCPULimits +{{- end }} +{{- end }} +{{- if (list "dashboardbff" "config" "executor" | has $service) }} + {{- if .Values.prometheus.server.enabled }} + - name: K10_PROMETHEUS_HOST + value: {{ include "k10.prometheus.service.name" . }}-exp + - name: K10_PROMETHEUS_PORT + value: {{ .Values.prometheus.server.service.servicePort | quote }} + - name: K10_PROMETHEUS_BASE_URL + value: {{ .Values.prometheus.server.baseURL }} + {{- end }} + - name: K10_GRAFANA_ENABLED + value: {{ .Values.grafana.enabled | quote }} +{{- end }} +{{- if or $.stateful (or (eq (include "check.googlecreds" .) "true") (eq $service "auth" "logging")) }} + volumeMounts: +{{- else if or (or (eq (include "basicauth.check" .) "true") (or .Values.auth.oidcAuth.enabled (eq (include "check.dexAuth" .) "true"))) .Values.features }} + volumeMounts: +{{- else if and (eq $service "config") (.Values.injectKanisterSidecar.enabled) }} + volumeMounts: +{{- else if eq (include "check.cacertconfigmap" .) "true" }} + volumeMounts: +{{- end }} +{{- if $.stateful }} + - name: {{ $service }}-persistent-storage + mountPath: {{ .Values.global.persistence.mountPath | quote }} +{{- end }} +{{- if .Values.features }} + - name: k10-features + mountPath: "/mnt/k10-features" +{{- end }} +{{- if eq $service "logging" }} + - name: logging-configmap-storage + mountPath: "/mnt/conf" +{{- end }} +{{- if and (eq $service "config") (.Values.injectKanisterSidecar.enabled) }} + - name: mutating-webhook-certs + mountPath: /etc/ssl/certs/webhook + readOnly: true +{{- end }} +{{- if eq (include "basicauth.check" .) "true" }} + - name: k10-basic-auth + mountPath: "/var/run/secrets/kasten.io/k10-basic-auth" + readOnly: true +{{- end }} +{{- if (or .Values.auth.oidcAuth.enabled (eq (include "check.dexAuth" .) "true")) }} + - name: k10-oidc-auth + mountPath: "/var/run/secrets/kasten.io/k10-oidc-auth" + readOnly: true +{{- end }} +{{- if eq (include "check.googlecreds" .) "true" }} + - name: service-account + mountPath: "/var/run/secrets/kasten.io" +{{- end }} +{{- if eq (include "check.cacertconfigmap" .) "true" }} + - name: {{ .Values.cacertconfigmap.name }} + mountPath: "/etc/ssl/certs/custom-ca-bundle.pem" + subPath: custom-ca-bundle.pem +{{- end }} +{{- if .Values.toolsImage.enabled }} +{{- if eq $service "executor" }} + - name: tools + {{- dict "main" . "k10_service" "cephtool" | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ .Values.toolsImage.pullPolicy }} +{{- $podName := (printf "%s-svc" $service) }} +{{- dict "main" . "k10_service_pod_name" $podName "k10_service_container_name" "tools" | include "k10.resource.request" | indent 8}} +{{- end }} +{{- end }} {{/* .Values.toolsImage.enabled */}} +{{- if and (eq $service "catalog") $.stateful }} + - name: kanister-sidecar + image: {{ include "get.kanisterToolsImage" .}} + imagePullPolicy: {{ .Values.kanisterToolsImage.pullPolicy }} +{{- $podName := (printf "%s-svc" $service) }} +{{- dict "main" . "k10_service_pod_name" $podName "k10_service_container_name" "kanister-sidecar" | include "k10.resource.request" | indent 8}} + volumeMounts: + - name: {{ $service }}-persistent-storage + mountPath: {{ .Values.global.persistence.mountPath | quote }} +{{- if eq (include "check.cacertconfigmap" .) "true" }} + - name: {{ .Values.cacertconfigmap.name }} + mountPath: "/etc/ssl/certs/custom-ca-bundle.pem" + subPath: custom-ca-bundle.pem +{{- end }} +{{- end }} {{/* and (eq $service "catalog") $.stateful */}} +{{- if and ( eq $service "auth" ) ( or .Values.auth.dex.enabled (eq (include "check.dexAuth" .) "true")) }} + - name: dex + image: {{ include "k10.dexImage" . }} +{{- if .Values.auth.ldap.enabled }} + command: ["/usr/local/bin/dex", "serve", "/dex-config/config.yaml"] +{{- else }} + command: ["/usr/local/bin/dex", "serve", "/etc/dex/cfg/config.yaml"] +{{- end }} + ports: + - name: http + containerPort: 8080 + volumeMounts: +{{- if .Values.auth.ldap.enabled }} + - name: dex-config + mountPath: /dex-config + - name: k10-logos-dex + mountPath: /web/themes/custom/ +{{- else }} + - name: config + mountPath: /etc/dex/cfg +{{- end }} +{{- if eq (include "check.cacertconfigmap" .) "true" }} + - name: {{ .Values.cacertconfigmap.name }} + mountPath: "/etc/ssl/certs/custom-ca-bundle.pem" + subPath: custom-ca-bundle.pem +{{- end }} +{{- end }} {{/* end of dex check */}} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-container" */}} + +{{- define "k10-init-container-header" }} +{{- $pod := .k10_pod }} +{{- with .main }} +{{- $main_context := . }} +{{- $containerList := (dict "main" $main_context "k10_service_pod" $pod | include "get.serviceContainersInPod" | splitList " ") }} +{{- $needsInitContainersHeader := false }} +{{- range $skip, $service := $containerList }} +{{- $serviceStateful := has $service (dict "main" $main_context "k10_service_pod" $pod | include "get.statefulRestServicesInPod" | splitList " ") }} + {{- if and ( eq $service "auth" ) $main_context.Values.auth.ldap.enabled }} + {{- $needsInitContainersHeader = true }} + {{- else if $serviceStateful }} + {{- $needsInitContainersHeader = true }} + {{- end }}{{/* initContainers header needed check */}} +{{- end }}{{/* range $skip, $service := $containerList */}} +{{- if $needsInitContainersHeader }} + initContainers: +{{- end }} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-init-container-header" */}} + +{{- define "k10-init-container" }} +{{- $pod := .k10_pod }} +{{- with .main }} +{{- $main_context := . }} +{{- $containerList := (dict "main" $main_context "k10_service_pod" $pod | include "get.serviceContainersInPod" | splitList " ") }} +{{- range $skip, $service := $containerList }} +{{- $serviceStateful := has $service (dict "main" $main_context "k10_service_pod" $pod | include "get.statefulRestServicesInPod" | splitList " ") }} +{{- if and ( eq $service "auth" ) $main_context.Values.auth.ldap.enabled }} + - name: dex-init + command: + - /dex/dexconfigmerge + args: + - --config-path=/etc/dex/cfg/config.yaml + - --secret-path=/var/run/secrets/kasten.io/bind-secret/bindPW + - --new-config-path=/dex-config/config.yaml + - --secret-field=bindPW + {{- dict "main" $main_context "k10_service" $service | include "serviceImage" | indent 8 }} + volumeMounts: + - mountPath: /etc/dex/cfg + name: config + - mountPath: /dex-config + name: dex-config + - name: bind-secret + mountPath: "/var/run/secrets/kasten.io/bind-secret" + readOnly: true +{{- else if $serviceStateful }} + - name: upgrade-init + securityContext: + runAsUser: 0 + allowPrivilegeEscalation: true + {{- dict "main" $main_context "k10_service" "upgrade" | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ $main_context.Values.image.pullPolicy }} + env: + - name: MODEL_STORE_DIR + valueFrom: + configMapKeyRef: + name: k10-config + key: modelstoredirname + volumeMounts: + - name: {{ $service }}-persistent-storage + mountPath: {{ $main_context.Values.global.persistence.mountPath | quote }} +{{- if eq $service "catalog" }} + - name: schema-upgrade-check + {{- dict "main" $main_context "k10_service" $service | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ $main_context.Values.image.pullPolicy }} + env: +{{- if $main_context.Values.clusterName }} + - name: CLUSTER_NAME + valueFrom: + configMapKeyRef: + name: k10-config + key: clustername +{{- end }} + - name: INIT_CONTAINER + value: "true" + - name: K10_RELEASE_NAME + value: {{ $main_context.Release.Name }} + - name: LOG_LEVEL + valueFrom: + configMapKeyRef: + name: k10-config + key: loglevel + - name: MODEL_STORE_DIR + valueFrom: + configMapKeyRef: + name: k10-config + key: modelstoredirname + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: VERSION + valueFrom: + configMapKeyRef: + name: k10-config + key: version + volumeMounts: + - name: {{ $service }}-persistent-storage + mountPath: {{ $main_context.Values.global.persistence.mountPath | quote }} +{{- end }}{{/* eq $service "catalog" */}} +{{- end }}{{/* initContainers definitions */}} +{{- end }}{{/* range $skip, $service := $containerList */}} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-init-container" */}} diff --git a/charts/k10/k10/4.5.900/templates/_k10_metering.tpl b/charts/k10/k10/4.5.900/templates/_k10_metering.tpl new file mode 100644 index 000000000..5f3ecc1f3 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/_k10_metering.tpl @@ -0,0 +1,261 @@ +{{/* Generate service spec */}} +{{/* because of https://github.com/GoogleCloudPlatform/marketplace-k8s-app-tools/issues/165 +we have to start using .Values.reportingSecret instead +of correct version .Values.metering.reportingSecret */}} +{{- define "k10-metering" }} +{{ $service := .k10_service }} +{{- with .main }} +{{- if $.stateful }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: {{ .Release.Namespace }} + name: {{ $service }}-pv-claim + labels: +{{ include "helm.labels" . | indent 4 }} + component: {{ $service }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default .Values.global.persistence.size (index .Values.global.persistence $service "size") }} +{{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} +{{- end }} +--- +{{- end }}{{/* if $.stateful */}} +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: {{ include "fullname" . }}-metering-config +data: + config: | +{{- if .Values.metering.reportingKey }} + identities: + - name: gcp + gcp: + encodedServiceAccountKey: {{ .Values.metering.reportingKey }} +{{- end }} + metrics: + - name: node_time + type: int + passthrough: {} + endpoints: + - name: on_disk +{{- if .Values.metering.reportingKey }} + - name: servicecontrol +{{- end }} + endpoints: + - name: on_disk + disk: +{{- if .Values.global.persistence.enabled }} + reportDir: /var/reports/ubbagent/reports +{{- else }} + reportDir: /tmp/reports/ubbagent/reports +{{- end }} + expireSeconds: 3600 +{{- if .Values.metering.reportingKey }} + - name: servicecontrol + servicecontrol: + identity: gcp + serviceName: kasten-k10.mp-kasten-public.appspot.com + consumerId: {{ .Values.metering.consumerId }} +{{- end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: {{ .Release.Namespace }} + name: {{ $service }}-svc + labels: +{{ include "helm.labels" . | indent 4 }} + component: {{ $service }} +spec: + replicas: {{ $.replicas }} + strategy: + type: Recreate + selector: + matchLabels: +{{ include "k10.common.matchLabels" . | indent 6 }} + component: {{ $service }} + run: {{ $service }}-svc + template: + metadata: + annotations: + checksum/config: {{ include (print .Template.BasePath "/k10-config.yaml") . | sha256sum }} + checksum/secret: {{ include (print .Template.BasePath "/secrets.yaml") . | sha256sum }} + labels: +{{ include "helm.labels" . | indent 8 }} + component: {{ $service }} + run: {{ $service }}-svc + spec: + securityContext: +{{ toYaml .Values.services.securityContext | indent 8 }} + serviceAccountName: {{ template "meteringServiceAccountName" . }} + {{- include "k10.imagePullSecrets" . | indent 6 }} +{{- if $.stateful }} + initContainers: + - name: upgrade-init + securityContext: + runAsUser: 0 + allowPrivilegeEscalation: true + {{- dict "main" . "k10_service" "upgrade" | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: MODEL_STORE_DIR + value: /var/reports/ + volumeMounts: + - name: {{ $service }}-persistent-storage + mountPath: /var/reports/ +{{- end }} + containers: + - name: {{ $service }}-svc + {{- dict "main" . "k10_service" $service | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ .Values.image.pullPolicy }} +{{- if eq .Release.Namespace "default" }} +{{- $podName := (printf "%s-svc" $service) }} +{{- $containerName := (printf "%s-svc" $service) }} +{{- dict "main" . "k10_service_pod_name" $podName "k10_service_container_name" $containerName | include "k10.resource.request" | indent 8}} +{{- end }} + ports: + - containerPort: {{ .Values.service.externalPort }} + livenessProbe: + httpGet: + path: /v0/healthz + port: {{ .Values.service.externalPort }} + initialDelaySeconds: 90 + timeoutSeconds: 1 + env: + - name: VERSION + valueFrom: + configMapKeyRef: + name: k10-config + key: version +{{- if .Values.clusterName }} + - name: CLUSTER_NAME + valueFrom: + configMapKeyRef: + name: k10-config + key: clustername +{{- end }} + - name: LOG_LEVEL + valueFrom: + configMapKeyRef: + name: k10-config + key: loglevel + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{{- if .Values.useNamespacedAPI }} + - name: K10_API_DOMAIN + valueFrom: + configMapKeyRef: + name: k10-config + key: apiDomain +{{- end }} + - name: AGENT_CONFIG_FILE + value: /var/ubbagent/config.yaml + - name: AGENT_STATE_DIR +{{- if .Values.global.persistence.enabled }} + value: "/var/reports/ubbagent" +{{- else }} + value: "/tmp/reports/ubbagent" + - name: K10_REPORTING_DIR + value: "/tmp/reports/k10/syncV2" + - name: K10SYNCSTATUSDIR + value: "/tmp/reports/k10" + - name: GRACE_PERIOD_STORE + value: /tmp/reports/clustergraceperiod + - name: NODE_USAGE_STORE + value: /tmp/reports/node_usage_history +{{- end }} +{{- if eq "true" (include "overwite.kanisterToolsImage" .) }} + - name: KANISTER_TOOLS + valueFrom: + configMapKeyRef: + name: k10-config + key: overwriteKanisterTools +{{- end }} +{{- if .Values.metering.awsRegion }} + - name: AWS_REGION + value: {{ .Values.metering.awsRegion }} +{{- end }} +{{- if .Values.metering.mode }} + - name: K10REPORTMODE + value: {{ .Values.metering.mode }} +{{- end }} +{{- if .Values.metering.reportCollectionPeriod }} + - name: K10_REPORT_COLLECTION_PERIOD + value: {{ .Values.metering.reportCollectionPeriod | quote }} +{{- end }} +{{- if .Values.metering.reportPushPeriod }} + - name: K10_REPORT_PUSH_PERIOD + value: {{ .Values.metering.reportPushPeriod | quote }} +{{- end }} +{{- if .Values.metering.promoID }} + - name: K10_PROMOTION_ID + value: {{ .Values.metering.promoID }} +{{- end }} +{{- if .Values.reportingSecret }} + - name: AGENT_CONSUMER_ID + valueFrom: + secretKeyRef: + name: {{ .Values.reportingSecret }} + key: consumer-id + - name: AGENT_REPORTING_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.reportingSecret }} + key: reporting-key + - name: K10_RELEASE_NAME + value: {{ .Release.Name }} +{{- end }} +{{- if .Values.metering.licenseConfigSecretName }} + - name: AWS_WEB_IDENTITY_REFRESH_TOKEN_FILE + value: "/var/run/secrets/product-license/license_token" + - name: AWS_ROLE_ARN + valueFrom: + secretKeyRef: + name: {{ .Values.metering.licenseConfigSecretName }} + key: iam_role +{{- end }} + volumeMounts: + - name: meter-config + mountPath: /var/ubbagent +{{- if $.stateful }} + - name: {{ $service }}-persistent-storage + mountPath: /var/reports/ +{{- end }} +{{- if .Values.metering.licenseConfigSecretName }} + - name: awsmp-product-license + mountPath: "/var/run/secrets/product-license" +{{- end }} + volumes: + - name: meter-config + configMap: + name: {{ include "fullname" . }}-metering-config + items: + - key: config + path: config.yaml +{{- if $.stateful }} + - name: {{ $service }}-persistent-storage + persistentVolumeClaim: + claimName: {{ $service }}-pv-claim +{{- end }} +{{- if .Values.metering.licenseConfigSecretName }} + - name: awsmp-product-license + secret: + secretName: {{ .Values.metering.licenseConfigSecretName }} +{{- end }} +--- +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-metering" */}} diff --git a/charts/k10/k10/4.5.900/templates/_k10_serviceimage.tpl b/charts/k10/k10/4.5.900/templates/_k10_serviceimage.tpl new file mode 100644 index 000000000..d9e69a8a4 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/_k10_serviceimage.tpl @@ -0,0 +1,51 @@ +{{/* +Helper to get k10 service image +The details on how these image are being generated +is in below issue +https://kasten.atlassian.net/browse/K10-4036 +Using substr to remove repo from ambassadorImage +*/}} +{{- define "serviceImage" -}} +{{/* +we are maintaining the field .Values.images to override it when +we install the chart for red hat marketplace. If we dont +have the value specified use earlier flow, if it is, use the +value that is specified. +*/}} +{{- if not .main.Values.global.rhMarketPlace }} +{{- $serviceImage := "" -}} +{{- $tagFromDefs := "" -}} +{{- if .main.Values.global.airgapped.repository }} +{{- $serviceImage = default .main.Chart.AppVersion .main.Values.image.tag | print .main.Values.global.airgapped.repository "/" .k10_service ":" }} +{{- else if contains .main.Values.image.registry .main.Values.image.repository }} +{{- $serviceImage = default .main.Chart.AppVersion .main.Values.image.tag | print .main.Values.image.repository "/" .k10_service ":" }} +{{- else }} +{{- $serviceImage = default .main.Chart.AppVersion .main.Values.image.tag | print .main.Values.image.registry "/" .main.Values.image.repository "/" .k10_service ":" }} +{{- end }}{{/* if .main.Values.global.airgapped.repository */}} +{{- $serviceImageKey := print (replace "-" "" .k10_service) "Image" }} +{{- if eq $serviceImageKey "ambassadorImage" }} +{{- $tagFromDefs = (include "k10.ambassadorImageTag" .) }} +{{- else if eq $serviceImageKey "dexImage" }} +{{- $tagFromDefs = (include "k10.dexImageTag" .) }} +{{- end }}{{/* if eq $serviceImageKey "ambassadorImage" */}} +{{- if index .main.Values $serviceImageKey }} +{{- $service_values := index .main.Values $serviceImageKey }} +{{- if .main.Values.global.airgapped.repository }} +{{ $valuesImage := (splitList "/" (index $service_values "image")) }} +{{- if $tagFromDefs }} +image: {{ printf "%s/%s:k10-%s" .main.Values.global.airgapped.repository (index $valuesImage (sub (len $valuesImage) 1) ) $tagFromDefs -}} +{{- end }} +{{- else }}{{/* .main.Values.global.airgapped.repository */}} +{{- if $tagFromDefs }} +image: {{ printf "%s:%s" (index $service_values "image") $tagFromDefs }} +{{- else }} +image: {{ index $service_values "image" }} +{{- end }} +{{- end }}{{/* .main.Values.global.airgapped.repository */}} +{{- else }} +image: {{ $serviceImage }} +{{- end -}}{{/* index .main.Values $serviceImageKey */}} +{{- else }} +image: {{ printf "%s" (get .main.Values.global.images .k10_service) }} +{{- end }}{{/* if not .main.Values.images.executor */}} +{{- end -}}{{/* define "serviceImage" */}} diff --git a/charts/k10/k10/4.5.900/templates/_k10_template.tpl b/charts/k10/k10/4.5.900/templates/_k10_template.tpl new file mode 100644 index 000000000..30a0ac977 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/_k10_template.tpl @@ -0,0 +1,190 @@ +{{/* Generate service spec */}} +{{- define "k10-default" }} +{{- $service := .k10_service }} +{{- with .main }} +{{- $main_context := . }} +{{- range $skip, $statefulContainer := compact (dict "main" $main_context "k10_service_pod" $service | include "get.statefulRestServicesInPod" | splitList " ") }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: {{ $main_context.Release.Namespace }} + name: {{ $statefulContainer }}-pv-claim + labels: +{{ include "helm.labels" $main_context | indent 4 }} + component: {{ $statefulContainer }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default $main_context.Values.global.persistence.size (index $main_context.Values.global.persistence $statefulContainer "size") }} +{{- if $main_context.Values.global.persistence.storageClass }} + {{- if (eq "-" $main_context.Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ $main_context.Values.global.persistence.storageClass }}" + {{- end }} +{{- end }} +--- +{{- end }}{{/* if $.stateful */}} +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: {{ .Release.Namespace }} + name: {{ $service }}-svc + labels: +{{ include "helm.labels" . | indent 4 }} + component: {{ $service }} +spec: + replicas: {{ $.replicas }} + strategy: + type: Recreate + selector: + matchLabels: +{{ include "k10.common.matchLabels" . | indent 6 }} + component: {{ $service }} + run: {{ $service }}-svc + template: + metadata: + annotations: + checksum/config: {{ include (print .Template.BasePath "/k10-config.yaml") . | sha256sum }} + checksum/secret: {{ include (print .Template.BasePath "/secrets.yaml") . | sha256sum }} +{{- if .Values.auth.ldap.restartPod }} + rollme: {{ randAlphaNum 5 | quote }} +{{- end}} + labels: +{{ include "helm.labels" . | indent 8 }} + component: {{ $service }} + run: {{ $service }}-svc + spec: +{{- if eq $service "executor" }} +{{- if .Values.services.executor.hostNetwork }} + hostNetwork: true +{{- end }}{{/* .Values.services.executor.hostNetwork */}} +{{- end }}{{/* eq $service "executor" */}} +{{- if eq $service "aggregatedapis" }} +{{- if .Values.services.aggregatedapis.hostNetwork }} + hostNetwork: true +{{- end }}{{/* .Values.services.aggregatedapis.hostNetwork */}} +{{- end }}{{/* eq $service "aggregatedapis" */}} +{{- if eq $service "dashboardbff" }} +{{- if .Values.services.dashboardbff.hostNetwork }} + hostNetwork: true +{{- end }}{{/* .Values.services.dashboardbff.hostNetwork */}} +{{- end }}{{/* eq $service "dashboardbff" */}} + securityContext: +{{ toYaml .Values.services.securityContext | indent 8 }} + serviceAccountName: {{ template "serviceAccountName" . }} + {{- include "k10.imagePullSecrets" . | indent 6 }} +{{- /* initContainers: */}} +{{- (dict "main" . "k10_pod" $service | include "k10-init-container-header") }} +{{- (dict "main" . "k10_pod" $service | include "k10-init-container") }} +{{- /* containers: */}} +{{- (dict "main" . "k10_pod" $service | include "k10-containers") }} +{{- /* volumes: */}} +{{- (dict "main" . "k10_pod" $service | include "k10-deployment-volumes-header") }} +{{- (dict "main" . "k10_pod" $service | include "k10-deployment-volumes") }} +--- +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-default" */}} + +{{- define "k10-deployment-volumes-header" }} +{{- $pod := .k10_pod }} +{{- with .main }} +{{- $main_context := . }} +{{- $containerList := (dict "main" $main_context "k10_service_pod" $pod | include "get.serviceContainersInPod" | splitList " ") }} +{{- $needsVolumesHeader := false }} +{{- range $skip, $service := $containerList }} + {{- $serviceStateful := has $service (dict "main" $main_context "k10_service_pod" $pod | include "get.statefulRestServicesInPod" | splitList " ") }} + {{- if or $serviceStateful (or (eq (include "check.googlecreds" $main_context) "true") (eq $service "auth" "logging")) }} + {{- $needsVolumesHeader = true }} + {{- else if or (or (eq (include "basicauth.check" $main_context) "true") (or $main_context.Values.auth.oidcAuth.enabled (eq (include "check.dexAuth" $main_context) "true"))) $main_context.Values.features }} + {{- $needsVolumesHeader = true }} + {{- else if and (eq $service "config") ($main_context.Values.injectKanisterSidecar.enabled) }} + {{- $needsVolumesHeader = true }} + {{- else if eq (include "check.cacertconfigmap" $main_context) "true" }} + {{- $needsVolumesHeader = true }} + {{- else if and ( eq $service "auth" ) ( or $main_context.Values.auth.dex.enabled (eq (include "check.dexAuth" $main_context) "true")) }} + {{- $needsVolumesHeader = true }} + {{- end }}{{/* volumes header needed check */}} +{{- end }}{{/* range $skip, $service := $containerList */}} +{{- if $needsVolumesHeader }} + volumes: +{{- end }} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-init-container-header" */}} + +{{- define "k10-deployment-volumes" }} +{{- $pod := .k10_pod }} +{{- with .main }} +{{- if .Values.features }} + - name: k10-features + configMap: + name: k10-features +{{- end }} +{{- if eq (include "basicauth.check" .) "true" }} + - name: k10-basic-auth + secret: + secretName: {{ default "k10-basic-auth" .Values.auth.basicAuth.secretName }} +{{- end }} +{{- if .Values.auth.oidcAuth.enabled }} + - name: k10-oidc-auth + secret: + secretName: {{ default "k10-oidc-auth" .Values.auth.oidcAuth.secretName }} +{{- end }} +{{- if .Values.auth.openshift.enabled }} + - name: k10-oidc-auth + secret: + secretName: {{ default "k10-oidc-auth" .Values.auth.openshift.secretName }} +{{- end }} +{{- if .Values.auth.ldap.enabled }} + - name: k10-oidc-auth + secret: + secretName: {{ default "k10-oidc-auth" .Values.auth.ldap.secretName }} + - name: k10-logos-dex + configMap: + name: k10-logos-dex +{{- end }} +{{- range $skip, $statefulContainer := compact (dict "main" . "k10_service_pod" $pod | include "get.statefulRestServicesInPod" | splitList " ") }} + - name: {{ $statefulContainer }}-persistent-storage + persistentVolumeClaim: + claimName: {{ $statefulContainer }}-pv-claim +{{- end }} +{{- if eq (include "check.googlecreds" .) "true" }} + - name: service-account + secret: + secretName: google-secret +{{- end }} +{{- if eq (include "check.cacertconfigmap" .) "true" }} + - name: {{ .Values.cacertconfigmap.name }} + configMap: + name: {{ .Values.cacertconfigmap.name }} +{{- end }} +{{- $containersInThisPod := (dict "main" . "k10_service_pod" $pod | include "get.serviceContainersInPod" | splitList " ") }} +{{- if has "logging" $containersInThisPod }} + - name: logging-configmap-storage + configMap: + name: fluentbit-configmap +{{- end }} +{{- if and (has "config" $containersInThisPod) (.Values.injectKanisterSidecar.enabled) }} + - name: mutating-webhook-certs + secret: + secretName: {{ include "k10.configAPIs" . }}-certs +{{- end }} +{{- if and ( has "auth" $containersInThisPod) (or .Values.auth.dex.enabled (eq (include "check.dexAuth" .) "true")) }} + - name: config + configMap: + name: k10-dex + items: + - key: config.yaml + path: config.yaml +{{- if .Values.auth.ldap.enabled }} + - name: dex-config + emptyDir: {} + - name: bind-secret + secret: + secretName: {{ default "k10-dex" .Values.auth.ldap.bindPWSecretName }} +{{- end }} +{{- end }} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-init-container-header" */}} diff --git a/charts/k10/k10/4.5.900/templates/api-tls-secrets.yaml b/charts/k10/k10/4.5.900/templates/api-tls-secrets.yaml new file mode 100644 index 000000000..6c863f7c6 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/api-tls-secrets.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey }} +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: ambassador-certs +type: kubernetes.io/tls +data: + tls.crt: {{ .Values.secrets.apiTlsCrt }} + tls.key: {{ .Values.secrets.apiTlsKey }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/templates/apiservice.yaml b/charts/k10/k10/4.5.900/templates/apiservice.yaml new file mode 100644 index 000000000..1811df48a --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/apiservice.yaml @@ -0,0 +1,25 @@ +{{/* Template to generate the aggregated APIService/Service objects */}} +{{- if .Values.apiservices.deployed -}} +{{- $main := . -}} +{{- $container_port := .Values.service.internalPort -}} +{{- $namespace := .Release.Namespace -}} +{{- range include "k10.aggregatedAPIs" . | splitList " " -}} +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1alpha1.{{ . }}.{{ template "apiDomain" $main }} + labels: + apiserver: "true" +{{ include "helm.labels" $ | indent 4 }} +spec: + version: v1alpha1 + group: {{ . }}.{{ template "apiDomain" $main }} + groupPriorityMinimum: 2000 + service: + namespace: {{$namespace}} + name: aggregatedapis-svc + versionPriority: 10 + insecureSkipTLSVerify: true +{{ end }} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/templates/daemonsets.yaml b/charts/k10/k10/4.5.900/templates/daemonsets.yaml new file mode 100644 index 000000000..cf2f53cea --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/daemonsets.yaml @@ -0,0 +1,26 @@ +{{- if .Values.metering.redhatMarketplacePayg }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + namespace: {{ .Release.Namespace }} + name: k10-rhmp-paygo + labels: +{{ include "helm.labels" . | indent 4 }} + component: paygo +spec: + selector: + matchLabels: +{{ include "k10.common.matchLabels" . | indent 6 }} + component: paygo + template: + metadata: + labels: +{{ include "helm.labels" . | indent 8 }} + component: paygo + spec: + containers: + - name: paygo + image: registry.access.redhat.com/ubi8/ubi-minimal:8.5-230 + command: [ "sleep" ] + args: [ "36500d" ] +{{- end -}} diff --git a/charts/k10/k10/4.5.900/templates/deployments.yaml b/charts/k10/k10/4.5.900/templates/deployments.yaml new file mode 100644 index 000000000..53ac1c8b0 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/deployments.yaml @@ -0,0 +1,30 @@ +{{/* +Generates deployment specs for K10 services and other services such as +"frontend" and "kanister". +*/}} +{{- include "singleAuth.check" . -}} +{{- $main_context := . -}} +{{- $stateless_services := include "k10.statelessServices" . | splitList " " -}} +{{- $colocated_services := include "k10.colocatedServices" . | fromYaml -}} +{{- range $skip, $k10_service := include "k10.restServices" . | splitList " " }} + {{ if not (hasKey $colocated_services $k10_service ) }} + {{/* Set $stateful for stateful services when .Values.global.persistence.enabled is true */}} + {{- $stateful := and $.Values.global.persistence.enabled (not (has $k10_service $stateless_services)) -}} + {{/* Set $replicas to .Values.executorReplicas for the exectutor service */}} + {{- $replicas := or (and (eq $k10_service "executor") $.Values.executorReplicas) 1 -}} + {{ $tmp_contx := dict "main" $main_context "k10_service" $k10_service "stateful" $stateful "replicas" $replicas }} + {{ if eq $k10_service "metering" }} + {{- include "k10-metering" $tmp_contx -}} + {{ else }} + {{- include "k10-default" $tmp_contx -}} + {{ end }} + {{ end }}{{/* if not (hasKey $colocated_services $k10_service ) */}} +{{- end }} +{{/* +Generate deployment specs for additional services. These are stateless and have +1 replica. +*/}} +{{- range $skip, $k10_service := concat (include "k10.services" . | splitList " ") (include "k10.additionalServices" . | splitList " ") }} + {{ $tmp_contx := dict "main" $main_context "k10_service" $k10_service "stateful" false "replicas" 1 }} + {{- include "k10-default" $tmp_contx -}} +{{- end }} diff --git a/charts/k10/k10/4.5.900/templates/fluentbit-configmap.yaml b/charts/k10/k10/4.5.900/templates/fluentbit-configmap.yaml new file mode 100644 index 000000000..71cecb966 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/fluentbit-configmap.yaml @@ -0,0 +1,34 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: fluentbit-configmap +data: + fluentbit.conf: | + [SERVICE] + HTTP_Server On + HTTP_Listen 0.0.0.0 + HTTP_PORT 24225 + + [INPUT] + Name tcp + Listen 0.0.0.0 + Port 24224 + + [OUTPUT] + Name stdout + Match * + + [OUTPUT] + Name file + Match * + File {{ .Values.global.persistence.mountPath }}/k10.log + logrotate.conf: | + {{ .Values.global.persistence.mountPath }}/k10.log { + create + missingok + rotate 6 + size 1G + } diff --git a/charts/k10/k10/4.5.900/templates/gateway-ext.yaml b/charts/k10/k10/4.5.900/templates/gateway-ext.yaml new file mode 100644 index 000000000..1e21d3dba --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/gateway-ext.yaml @@ -0,0 +1,33 @@ +{{/* Externally exposed service for gateway endpoint. */}} +{{- $container_port := .Values.service.internalPort -}} +{{- if .Values.externalGateway.create -}} +{{- include "authEnabled.check" . -}} +apiVersion: v1 +kind: Service +metadata: + namespace: {{ $.Release.Namespace }} + name: gateway-ext + labels: + service: gateway + {{- if eq "route53-mapper" (default " " .Values.externalGateway.fqdn.type) }} + dns: route53 + {{- end }} +{{ include "helm.labels" . | indent 4 }} + annotations: + {{- if .Values.externalGateway.annotations }} +{{ toYaml .Values.externalGateway.annotations | indent 4 }} + {{- end }} +{{ include "dnsAnnotations" . | indent 4 }} + {{- if .Values.externalGateway.awsSSLCertARN }} + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: {{ .Values.externalGateway.awsSSLCertARN }} + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https + {{- end }} +spec: + type: LoadBalancer + ports: + - name: https + port: {{ if or (and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey) .Values.externalGateway.awsSSLCertARN }}443{{ else }}80{{ end }} + targetPort: {{ $container_port }} + selector: + service: gateway +{{- end -}} diff --git a/charts/k10/k10/4.5.900/templates/gateway.yaml b/charts/k10/k10/4.5.900/templates/gateway.yaml new file mode 100644 index 000000000..4a1844981 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/gateway.yaml @@ -0,0 +1,134 @@ +{{- $container_port := .Values.service.internalPort -}} +{{- $service_port := .Values.service.externalPort -}} +{{- $admin_port := default 8877 .Values.service.gatewayAdminPort -}} +--- +apiVersion: v1 +kind: Service +metadata: + namespace: {{ $.Release.Namespace }} + labels: + service: gateway +{{ include "helm.labels" . | indent 4 }} + name: gateway + annotations: + getambassador.io/config: | + --- + apiVersion: ambassador/v1 + kind: AuthService + name: authentication + auth_service: "auth-svc:8000" + path_prefix: "/v0/authz" + allowed_request_headers: + - "x-forwarded-access-token" + --- +{{- if (eq "endpoint" .Values.apigateway.serviceResolver) }} + apiVersion: getambassador.io/v1 + kind: KubernetesEndpointResolver + name: endpoint + --- +{{- end }} + apiVersion: ambassador/v1 + kind: Module + name: ambassador + config: + service_port: {{ $container_port }} +{{- if (eq "endpoint" .Values.apigateway.serviceResolver) }} + resolver: endpoint + load_balancer: + policy: round_robin +{{- end }} +{{- if and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey }} + --- + apiVersion: ambassador/v1 + kind: Module + name: tls + config: + server: + enabled: True + secret: ambassador-certs +{{- end }} +spec: + ports: + - name: http + port: {{ $service_port }} + targetPort: {{ $container_port }} + selector: + service: gateway +--- +{{- if .Values.gateway.exposeAdminPort }} +apiVersion: v1 +kind: Service +metadata: + namespace: {{ $.Release.Namespace }} + name: gateway-admin + labels: + service: gateway +{{ include "helm.labels" . | indent 4 }} +spec: + ports: + - name: metrics + port: {{ $admin_port }} + targetPort: {{ $admin_port }} + selector: + service: gateway +--- +{{- end }} +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: {{ $.Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} + component: gateway + name: gateway +spec: + replicas: 1 + selector: + matchLabels: + service: gateway + template: + metadata: + annotations: + checksum/config: {{ include (print .Template.BasePath "/k10-config.yaml") . | sha256sum }} + checksum/secret: {{ include (print .Template.BasePath "/secrets.yaml") . | sha256sum }} + labels: + service: gateway + component: gateway +{{ include "helm.labels" . | indent 8 }} + spec: + serviceAccountName: {{ template "serviceAccountName" . }} + {{- include "k10.imagePullSecrets" . | indent 6 }} + containers: + - name: ambassador + image: {{ include "k10.ambImage" . }} + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 200m + memory: 300Mi + env: + - name: AMBASSADOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: AMBASSADOR_SINGLE_NAMESPACE + value: "true" + - name: AMBASSADOR_LEGACY_MODE + value: "true" + - name: "AMBASSADOR_VERIFY_SSL_FALSE" + value: {{ .Values.gateway.insecureDisableSSLVerify | quote }} + livenessProbe: + httpGet: + path: /ambassador/v0/check_alive + port: {{ $admin_port }} + initialDelaySeconds: 30 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /ambassador/v0/check_ready + port: {{ $admin_port }} + initialDelaySeconds: 30 + periodSeconds: 3 + restartPolicy: Always diff --git a/charts/k10/k10/4.5.900/templates/grafana-scc.yaml b/charts/k10/k10/4.5.900/templates/grafana-scc.yaml new file mode 100644 index 000000000..f634498a4 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/grafana-scc.yaml @@ -0,0 +1,44 @@ +{{- if .Values.scc.create }} +{{- if .Values.grafana.enabled }} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Name }}-grafana +allowPrivilegedContainer: false +allowHostNetwork: false +allowHostDirVolumePlugin: true +priority: null +allowedCapabilities: null +allowHostPorts: true +allowHostPID: false +allowHostIPC: false +readOnlyRootFilesystem: false +requiredDropCapabilities: + - KILL + - MKNOD + - SETUID + - SETGID +defaultAddCapabilities: [] +allowedCapabilities: [] +priority: 0 +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +volumes: + - configMap + - downwardAPI + - emptyDir + - persistentVolumeClaim + - projected + - secret +users: + - system:serviceaccount:{{.Release.Namespace}}:{{.Release.Name}}-grafana +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/templates/ingress.yaml b/charts/k10/k10/4.5.900/templates/ingress.yaml new file mode 100644 index 000000000..48efc0530 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/ingress.yaml @@ -0,0 +1,46 @@ +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $service_port := .Values.service.externalPort -}} +{{ if .Values.ingress.create }} +{{ include "authEnabled.check" . }} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: {{ .Release.Name }}-ingress + annotations: +{{ include "ingressClassAnnotation" . | indent 4 }} + {{- if and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey }} + nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + {{- end }} + {{- if .Values.ingress.annotations }} +{{ toYaml .Values.ingress.annotations | indent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls.enabled }} + tls: + - hosts: + - {{ required "ingress.host value is required for TLS configuration" .Values.ingress.host }} + secretName: {{ required "ingress.tls.secretName is required for TLS configuration" .Values.ingress.tls.secretName }} +{{- end }} + rules: + - http: + paths: + - path: /{{ default .Release.Name .Values.ingress.urlPath | trimPrefix "/" | trimSuffix "/" }}/ + pathType: {{ default "ImplementationSpecific" .Values.ingress.pathType }} + backend: + {{- if $ingressApiIsStable }} + service: + name: gateway + port: + number: {{ $service_port }} + {{- else }} + serviceName: gateway + servicePort: {{ $service_port }} + {{- end }} + {{- if .Values.ingress.host }} + host: {{ .Values.ingress.host }} + {{- end }} +{{ end }} diff --git a/charts/k10/k10/4.5.900/templates/k10-config.yaml b/charts/k10/k10/4.5.900/templates/k10-config.yaml new file mode 100644 index 000000000..2c82274f0 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/k10-config.yaml @@ -0,0 +1,228 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-config +data: + loglevel: {{ .Values.logLevel | quote }} + {{- if .Values.clusterName }} + clustername: {{ quote .Values.clusterName }} + {{- end }} + version: {{ .Chart.AppVersion }} + multiClusterVersion: {{ include "k10.multiClusterVersion" . | quote }} + modelstoredirname: "//mnt/k10state/kasten-io/" + apiDomain: {{ include "apiDomain" . }} + concurrentSnapConversions: {{ include "k10.defaultConcurrentSnapshotConversions" . | quote }} + concurrentWorkloadSnapshots: {{ include "k10.defaultConcurrentWorkloadSnapshots" . | quote }} + k10DataStoreParallelUpload: {{ include "k10.defaultK10DataStoreParallelUpload" . | quote }} + k10DataStoreGeneralContentCacheSizeMB: {{ include "k10.defaultK10DataStoreGeneralContentCacheSizeMB" . | quote }} + k10DataStoreGeneralMetadataCacheSizeMB: {{ include "k10.defaultK10DataStoreGeneralMetadataCacheSizeMB" . | quote }} + k10DataStoreRestoreContentCacheSizeMB: {{ include "k10.defaultK10DataStoreRestoreContentCacheSizeMB" . | quote }} + k10DataStoreRestoreMetadataCacheSizeMB: {{ include "k10.defaultK10DataStoreRestoreMetadataCacheSizeMB" . | quote }} + K10BackupBufferFileHeadroomFactor: {{ include "k10.defaultK10BackupBufferFileHeadroomFactor" . | quote }} + AWSAssumeRoleDuration: {{ default (include "k10.defaultAssumeRoleDuration" .) .Values.awsConfig.assumeRoleDuration | quote }} + KanisterBackupTimeout: {{ default (include "k10.defaultKanisterBackupTimeout" .) .Values.kanister.backupTimeout | quote }} + KanisterRestoreTimeout: {{ default (include "k10.defaultKanisterRestoreTimeout" .) .Values.kanister.restoreTimeout | quote }} + KanisterDeleteTimeout: {{ default (include "k10.defaultKanisterDeleteTimeout" .) .Values.kanister.deleteTimeout | quote }} + KanisterHookTimeout: {{ default (include "k10.defaultKanisterHookTimeout" .) .Values.kanister.hookTimeout | quote }} + KanisterCheckRepoTimeout: {{ default (include "k10.defaultKanisterCheckRepoTimeout" .) .Values.kanister.checkRepoTimeout | quote }} + KanisterStatsTimeout: {{ default (include "k10.defaultKanisterStatsTimeout" .) .Values.kanister.statsTimeout | quote }} + KanisterEFSPostRestoreTimeout: {{ default (include "k10.defaultKanisterEFSPostRestoreTimeout" .) .Values.kanister.efsPostRestoreTimeout | quote }} + KanisterPodReadyWaitTimeout: {{ .Values.kanister.podReadyWaitTimeout | quote }} + K10MutatingWebhookTLSCertDir: "/etc/ssl/certs/webhook" + + K10LimiterGenericVolumeSnapshots: {{ default (include "k10.defaultK10LimiterGenericVolumeSnapshots" .) .Values.limiter.genericVolumeSnapshots | quote }} + K10LimiterGenericVolumeCopies: {{ default (include "k10.defaultK10LimiterGenericVolumeCopies" .) .Values.limiter.genericVolumeCopies | quote }} + K10LimiterGenericVolumeRestores: {{ default (include "k10.defaultK10LimiterGenericVolumeRestores" .) .Values.limiter.genericVolumeRestores | quote }} + K10LimiterCsiSnapshots: {{ default (include "k10.defaultK10LimiterCsiSnapshots" .) .Values.limiter.csiSnapshots | quote }} + K10LimiterProviderSnapshots: {{ default (include "k10.defaultK10LimiterProviderSnapshots" .) .Values.limiter.providerSnapshots | quote }} + + {{- if .Values.awsConfig.efsBackupVaultName }} + efsBackupVaultName: {{ quote .Values.awsConfig.efsBackupVaultName }} + {{- end }} + + {{- if .Values.vmWare.taskTimeoutMin }} + vmWareTaskTimeoutMin: {{ quote .Values.vmWare.taskTimeoutMin }} + {{- end }} + +{{- include "get.kanisterPodCustomLabels" . | indent 2}} +{{- include "get.kanisterPodCustomAnnotations" . | indent 2}} + + {{- if .Values.kanisterFunctionVersion }} + kanisterFunctionVersion: {{ .Values.kanisterFunctionVersion | quote }} + {{- else }} + kanisterFunctionVersion: {{ quote "v1.0.0-alpha" }} + {{- end }} + {{- if eq "true" (include "overwite.kanisterToolsImage" .) }} + overwriteKanisterTools: {{ include "get.kanisterToolsImage" .}} + {{- end }} +{{- include "kanisterToolsResources" . | indent 2 }} + +{{ if .Values.features }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-features +data: +{{ include "k10.features" . | indent 2}} +{{ end }} +{{ if .Values.auth.dex.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-dex + namespace: {{ .Release.Namespace }} +data: + config.yaml: | + issuer: {{ .Values.auth.oidcAuth.providerURL }} + storage: + type: memory + web: + http: 0.0.0.0:8080 + logger: + level: info + format: text + connectors: + - type: oidc + id: google + name: Google + config: + issuer: {{ .Values.auth.dex.providerURL }} + clientID: {{ .Values.auth.oidcAuth.clientID }} + clientSecret: {{ .Values.auth.oidcAuth.clientSecret }} + redirectURI: {{ .Values.auth.dex.redirectURL }} + scopes: + - openid + - profile + - email + oauth2: + skipApprovalScreen: true + staticClients: + - name: 'K10' + id: {{ .Values.auth.oidcAuth.clientID }} + secret: {{ .Values.auth.oidcAuth.clientSecret }} + redirectURIs: + - {{ printf "%s/k10/auth-svc/v0/oidc/redirect" .Values.auth.oidcAuth.redirectURL }} + enablePasswordDB: true + staticPasswords: +{{ end }} +{{ if .Values.auth.openshift.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-dex + namespace: {{ .Release.Namespace }} +data: + config.yaml: | + issuer: {{ printf "%s/dex" (trimSuffix "/" .Values.auth.openshift.dashboardURL) }} + storage: + type: memory + web: + http: 0.0.0.0:8080 + logger: + level: info + format: text + connectors: + - type: openshift + id: openshift + name: OpenShift + config: + issuer: {{ .Values.auth.openshift.openshiftURL }} + clientID: {{printf "system:serviceaccount:%s:%s" .Release.Namespace .Values.auth.openshift.serviceAccount }} + clientSecret: {{ .Values.auth.openshift.clientSecret }} + redirectURI: {{ printf "%s/dex/callback" (trimSuffix "/" .Values.auth.openshift.dashboardURL) }} + insecureCA: {{ .Values.auth.openshift.insecureCA }} +{{- if and (eq (include "check.cacertconfigmap" .) "false") .Values.auth.openshift.useServiceAccountCA }} + rootCA: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt +{{- end }} + oauth2: + skipApprovalScreen: true + staticClients: + - name: 'K10' + id: kasten + secret: kastensecret + redirectURIs: + - {{ printf "%s/auth-svc/v0/oidc/redirect" (trimSuffix "/" .Values.auth.openshift.dashboardURL) }} +{{ end }} +{{ if .Values.auth.ldap.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-dex + namespace: {{ .Release.Namespace }} +data: + config.yaml: | + issuer: {{ printf "%s/dex" (trimSuffix "/" .Values.auth.ldap.dashboardURL) }} + storage: + type: memory + web: + http: 0.0.0.0:8080 + frontend: + theme: custom + logoURL: theme/kasten-logo.svg + logger: + level: info + format: text + connectors: + - type: ldap + id: ldap + name: LDAP + config: + host: {{ .Values.auth.ldap.host }} + insecureNoSSL: {{ .Values.auth.ldap.insecureNoSSL }} + insecureSkipVerify: {{ .Values.auth.ldap.insecureSkipVerifySSL }} + startTLS: {{ .Values.auth.ldap.startTLS }} + bindDN: {{ .Values.auth.ldap.bindDN }} + bindPW: BIND_PASSWORD_PLACEHOLDER + userSearch: + baseDN: {{ .Values.auth.ldap.userSearch.baseDN }} + filter: {{ .Values.auth.ldap.userSearch.filter }} + username: {{ .Values.auth.ldap.userSearch.username }} + idAttr: {{ .Values.auth.ldap.userSearch.idAttr }} + emailAttr: {{ .Values.auth.ldap.userSearch.emailAttr }} + nameAttr: {{ .Values.auth.ldap.userSearch.nameAttr }} + preferredUsernameAttr: {{ .Values.auth.ldap.userSearch.preferredUsernameAttr }} + groupSearch: + baseDN: {{ .Values.auth.ldap.groupSearch.baseDN }} + filter: {{ .Values.auth.ldap.groupSearch.filter }} + nameAttr: {{ .Values.auth.ldap.groupSearch.nameAttr }} +{{- with .Values.auth.ldap.groupSearch.userMatchers }} + userMatchers: +{{ toYaml . | indent 10 }} +{{- end }} + oauth2: + skipApprovalScreen: true + staticClients: + - name: 'K10' + id: kasten + secret: kastensecret + redirectURIs: + - {{ printf "%s/auth-svc/v0/oidc/redirect" (trimSuffix "/" .Values.auth.ldap.dashboardURL) }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: k10-logos-dex + namespace: {{ .Release.Namespace }} +binaryData: + {{- $files := .Files }} + {{- range tuple "files/favicon.png" "files/kasten-logo.svg" "files/styles.css" }} + {{ trimPrefix "files/" . }}: |- + {{ $files.Get . | b64enc }} + {{- end }} +{{ end }} diff --git a/charts/k10/k10/4.5.900/templates/k10-eula.yaml b/charts/k10/k10/4.5.900/templates/k10-eula.yaml new file mode 100644 index 000000000..21e251d6c --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/k10-eula.yaml @@ -0,0 +1,21 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-eula +data: + text: {{ .Files.Get "eula.txt" | quote }} +--- +{{ if .Values.eula.accept }} +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-eula-info +data: +{{ include "k10.eula.fields" . | indent 2 }} +{{ end }} diff --git a/charts/k10/k10/4.5.900/templates/kopia-tls-certs.yaml b/charts/k10/k10/4.5.900/templates/kopia-tls-certs.yaml new file mode 100644 index 000000000..ac0635f51 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/kopia-tls-certs.yaml @@ -0,0 +1,33 @@ +# alternate names of the services. This renders to: [ component-svc.namespace, component-svc.namespace.svc ] +{{- $altNamesKopia := list ( printf "%s-svc.%s" "data-mover" .Release.Namespace ) ( printf "%s-svc.%s.svc" "data-mover" .Release.Namespace ) }} +# generate ca cert with 365 days of validity +{{- $caKopia := genCA ( printf "%s-svc-ca" "data-mover" ) 365 }} +# generate cert with CN="component-svc", SAN=$altNames and with 365 days of validity +{{- $certKopia := genSignedCert ( printf "%s-svc" "data-mover" ) nil $altNamesKopia 365 $caKopia }} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: kopia-tls-cert + labels: +{{ include "helm.labels" . | indent 4 }} +{{- if .Values.global.rhMarketPlace }} + annotations: + "helm.sh/hook": "pre-install" +{{- end }} +data: + tls.crt: {{ $certKopia.Cert | b64enc }} +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: kopia-tls-key + labels: +{{ include "helm.labels" . | indent 4 }} +{{- if .Values.global.rhMarketPlace }} + annotations: + "helm.sh/hook": "pre-install" +{{- end }} +data: + tls.key: {{ $certKopia.Key | b64enc }} diff --git a/charts/k10/k10/4.5.900/templates/license.yaml b/charts/k10/k10/4.5.900/templates/license.yaml new file mode 100644 index 000000000..36af4280f --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/license.yaml @@ -0,0 +1,25 @@ +{{- if not ( or ( .Values.license ) ( .Values.metering.awsMarketplace ) ( .Values.metering.awsManagedLicense ) ( .Values.metering.licenseConfigSecretName ) ( .Values.metering.redhatMarketplacePayg ) ) }} +{{- if .Files.Get "triallicense" }} +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-trial-license +type: Opaque +data: + license: {{ print (.Files.Get "triallicense") }} +{{- end }} +{{- end }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-license +type: Opaque +data: + license: {{ include "k10.getlicense" . }} diff --git a/charts/k10/k10/4.5.900/templates/mutatingwebhook.yaml b/charts/k10/k10/4.5.900/templates/mutatingwebhook.yaml new file mode 100644 index 000000000..36d7da875 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/mutatingwebhook.yaml @@ -0,0 +1,51 @@ +{{- if .Values.injectKanisterSidecar.enabled -}} +# alternate names of the services. This renders to: [ component-svc.namespace, component-svc.namespace.svc ] +{{- $altNames := list ( printf "%s-svc.%s" (include "k10.configAPIs" .) .Release.Namespace ) ( printf "%s-svc.%s.svc" (include "k10.configAPIs" .) .Release.Namespace ) }} +# generate ca cert with 365 days of validity +{{- $ca := genCA ( printf "%s-svc-ca" (include "k10.configAPIs" .) ) 365 }} +# generate cert with CN="component-svc", SAN=$altNames and with 365 days of validity +{{- $cert := genSignedCert ( printf "%s-svc" (include "k10.configAPIs" .) ) nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ include "k10.configAPIs" . }}-certs + labels: +{{ include "helm.labels" . | indent 4 }} +data: + tls.crt: {{ $cert.Cert | b64enc }} + tls.key: {{ $cert.Key | b64enc }} +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-sidecar-injector +webhooks: +- name: k10-sidecar-injector.kasten.io + admissionReviewVersions: ["v1", "v1beta1"] + failurePolicy: Ignore + sideEffects: None + clientConfig: + service: + name: config-svc + namespace: {{ .Release.Namespace }} + path: "/k10/mutate" + port: 443 + caBundle: {{ b64enc $ca.Cert }} + rules: + - operations: ["CREATE", "UPDATE"] + apiGroups: ["*"] + apiVersions: ["v1"] + resources: ["deployments", "statefulsets", "deploymentconfigs"] +{{- if .Values.injectKanisterSidecar.namespaceSelector }} + namespaceSelector: +{{ toYaml .Values.injectKanisterSidecar.namespaceSelector | indent 4 }} +{{- end }} +{{- if .Values.injectKanisterSidecar.objectSelector }} + objectSelector: +{{ toYaml .Values.injectKanisterSidecar.objectSelector | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/templates/networkpolicy.yaml b/charts/k10/k10/4.5.900/templates/networkpolicy.yaml new file mode 100644 index 000000000..2cd4dae9f --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/networkpolicy.yaml @@ -0,0 +1,192 @@ +{{- $admin_port := default 8877 .Values.service.gatewayAdminPort -}} +{{- $mutating_webhook_port := default 8080 .Values.injectKanisterSidecar.webhookServer.port -}} +{{- if .Values.networkPolicy.create }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: {} + policyTypes: + - Ingress +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: cross-services-allow + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + ingress: + - from: + - podSelector: + matchLabels: + release: {{ .Release.Name }} + ports: + - protocol: TCP + port: {{ .Values.service.externalPort }} +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: logging-allow-internal + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + run: logging-svc + ingress: + - from: + - podSelector: + matchLabels: + release: {{ .Release.Name }} + ports: + # Logging input port + - protocol: TCP + port: 24224 + - protocol: TCP + port: 24225 +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-external + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + service: gateway + release: {{ .Release.Name }} + ingress: + - from: [] + ports: + - protocol: TCP + port: 8000 +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-all-api + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + run: aggregatedapis-svc + release: {{ .Release.Name }} + ingress: + - from: + ports: + - protocol: TCP + port: {{ .Values.service.aggregatedApiPort }} +{{- if .Values.gateway.exposeAdminPort }} +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-gateway-admin + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + service: gateway + ingress: + - from: + - podSelector: + matchLabels: + app: prometheus + component: server + release: {{ .Release.Name }} + ports: + - protocol: TCP + port: {{ $admin_port }} +{{- end -}} +{{- if .Values.injectKanisterSidecar.enabled }} +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-mutating-webhook + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + run: config-svc + ingress: + - from: + ports: + - protocol: TCP + port: {{ $mutating_webhook_port }} +{{- end -}} +{{- if or .Values.auth.dex.enabled (eq (include "check.dexAuth" .) "true") }} +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: gateway-dex-allow + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + run: auth-svc + ingress: + - from: + - podSelector: + matchLabels: + service: gateway + release: {{ .Release.Name }} + ports: + - protocol: TCP + port: 8080 +{{- end -}} +{{- $mainCtx := . }} +{{- $colocatedList := include "get.enabledColocatedSvcList" . | fromYaml }} +{{- range $primary, $secondaryList := $colocatedList }} +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ $primary }}-svc-allow-secondary-services + namespace: {{ $mainCtx.Release.Namespace }} + labels: +{{ include "helm.labels" $mainCtx | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ $mainCtx.Release.Name }} + run: {{ $primary }}-svc + ingress: + - from: + - podSelector: + matchLabels: + release: {{ $mainCtx.Release.Name }} + ports: + {{- range $skip, $secondary := $secondaryList }} + {{- $colocConfig := index (include "k10.colocatedServices" . | fromYaml) $secondary }} + - protocol: TCP + port: {{ $colocConfig.port }} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/templates/prometheus-configmap.yaml b/charts/k10/k10/4.5.900/templates/prometheus-configmap.yaml new file mode 100644 index 000000000..55c44c96d --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/prometheus-configmap.yaml @@ -0,0 +1,70 @@ +{{ $scrape_services := (include "k10.restServices" . | splitList " " ) }} +{{- if .Values.gateway.exposeAdminPort -}} + {{- $scrape_services = append (include "k10.restServices" . | splitList " " ) "gateway" -}} +{{- end -}} + +{{- include "check.validateMonitoringProperties" .}} +{{- if .Values.prometheus.server.enabled -}} +{{- $rbac := .Values.prometheus.rbac.create -}} +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: {{ .Release.Name }}-{{ .Values.prometheus.server.configMapOverrideName }} +data: + prometheus.yml: | + global: + scrape_interval: 1m + scrape_timeout: 10s + evaluation_interval: 1m + scrape_configs: +{{- range $scrape_services -}} +{{- if or (not (hasKey $.Values.optionalColocatedServices .)) (index $.Values.optionalColocatedServices .).enabled }} +{{ $tmpcontx := dict "main" $ "k10service" . -}} +{{ include "k10.prometheusScrape" $tmpcontx | indent 6 -}} +{{- end }} +{{- end }} +{{- range include "k10.services" . | splitList " " }} +{{- if (or (ne . "aggregatedapis") ($rbac)) }} +{{ $tmpcontx := dict "main" $ "k10service" . -}} +{{ include "k10.prometheusScrape" $tmpcontx | indent 6 -}} +{{- end }} +{{- end }} +{{- range include "k10.additionalServices" . | splitList " " }} +{{- if not (eq . "frontend") }} +{{ $tmpcontx := dict "main" $ "k10service" . -}} +{{ include "k10.prometheusScrape" $tmpcontx | indent 6 -}} +{{- end }} +{{- end }} +{{- if .Values.prometheus.extraScrapeConfigs }} +{{ .Values.prometheus.extraScrapeConfigs | indent 6 }} +{{- end -}} +{{- if .Values.prometheus.scrapeCAdvisor }} + - job_name: 'kubernetes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor +{{- end}} + - job_name: prometheus + metrics_path: {{ .Values.prometheus.server.baseURL }}metrics + static_configs: + - targets: + - "localhost:9090" + labels: + app: prometheus + component: server +{{- end -}} diff --git a/charts/k10/k10/4.5.900/templates/prometheus-service.yaml b/charts/k10/k10/4.5.900/templates/prometheus-service.yaml new file mode 100644 index 000000000..846ecbbd7 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/prometheus-service.yaml @@ -0,0 +1,44 @@ +{{/* Template to generate service spec for v0 rest services */}} +{{- if .Values.prometheus.server.enabled -}} +{{- $postfix := default .Release.Name .Values.ingress.urlPath -}} +{{- $os_postfix := default .Release.Name .Values.route.path -}} +{{- $service_port := .Values.prometheus.server.service.servicePort -}} +apiVersion: v1 +kind: Service +metadata: + namespace: {{ .Release.Namespace }} + name: {{ include "k10.prometheus.service.name" . }}-exp + labels: +{{ include "helm.labels" $ | indent 4 }} + component: {{ include "k10.prometheus.service.name" . }} + run: {{ include "k10.prometheus.service.name" . }} + annotations: + getambassador.io/config: | + --- + apiVersion: ambassador/v1 + kind: Mapping + name: {{ include "k10.prometheus.service.name" . }}-mapping + {{- if .Values.prometheus.server.baseURL }} + rewrite: /{{ .Values.prometheus.server.baseURL | trimPrefix "/" | trimSuffix "/" }}/ + {{- else }} + rewrite: / + {{- end }} + {{- if .Values.route.enabled }} + prefix: /{{ $os_postfix | trimPrefix "/" | trimSuffix "/" }}/prometheus/ + {{- else }} + prefix: /{{ $postfix | trimPrefix "/" | trimSuffix "/" }}/prometheus/ + {{- end }} + service: {{ include "k10.prometheus.service.name" . }}:{{ $service_port }} + timeout_ms: 15000 + +spec: + ports: + - name: http + protocol: TCP + port: {{ $service_port }} + targetPort: 9090 + selector: + app: {{ include "k10.prometheus.name" . }} + component: {{ .Values.prometheus.server.name }} + release: {{ .Release.Name }} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/templates/rbac.yaml b/charts/k10/k10/4.5.900/templates/rbac.yaml new file mode 100644 index 000000000..f30536605 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/rbac.yaml @@ -0,0 +1,234 @@ +{{- $main := . -}} +{{- $apiDomain := include "apiDomain" . -}} + +{{- $actionsAPIs := splitList " " (include "k10.actionsAPIs" .) -}} +{{- $aggregatedAPIs := splitList " " (include "k10.aggregatedAPIs" .) -}} +{{- $appsAPIs := splitList " " (include "k10.appsAPIs" .) -}} +{{- $authAPIs := splitList " " (include "k10.authAPIs" .) -}} +{{- $configAPIs := splitList " " (include "k10.configAPIs" .) -}} +{{- $distAPIs := splitList " " (include "k10.distAPIs" .) -}} +{{- $reportingAPIs := splitList " " (include "k10.reportingAPIs" .) -}} + +{{- if .Values.rbac.create }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Namespace }}-{{ template "serviceAccountName" . }}-cluster-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: {{ template "serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- if not ( eq (include "meteringServiceAccountName" .) (include "serviceAccountName" .) )}} +- kind: ServiceAccount + name: {{ template "meteringServiceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Name }}-admin +rules: +- apiGroups: +{{- range sortAlpha (concat $aggregatedAPIs $configAPIs $reportingAPIs) }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - "*" + verbs: + - "*" +- apiGroups: + - cr.kanister.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - "" + resources: + - namespaces + verbs: + - create + - get + - list +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Name }}-ns-admin + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - "" + - "apps" + resources: + - deployments + - pods + verbs: + - get + - list +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - update +- apiGroups: + - "batch" + resources: + - jobs + verbs: + - get +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Name }}-mc-admin +rules: +- apiGroups: +{{- range sortAlpha (concat $authAPIs $configAPIs $distAPIs) }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - secrets + verbs: + - "*" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Name }}-basic +rules: +- apiGroups: +{{- range sortAlpha $actionsAPIs }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - {{ include "k10.backupActions" $main}} + - {{ include "k10.backupActionsDetails" $main}} + - {{ include "k10.restoreActions" $main}} + - {{ include "k10.restoreActionsDetails" $main}} + - {{ include "k10.exportActions" $main}} + - {{ include "k10.exportActionsDetails" $main}} + - {{ include "k10.cancelActions" $main}} + verbs: + - "*" +- apiGroups: +{{- range sortAlpha $appsAPIs }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - {{ include "k10.restorePoints" $main}} + - {{ include "k10.restorePointsDetails" $main}} + - {{ include "k10.applications" $main}} + - {{ include "k10.applicationsDetails" $main}} + verbs: + - "*" +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: +{{- range sortAlpha $configAPIs }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - {{ include "k10.policies" $main}} + verbs: + - "*" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Name }}-config-view +rules: +- apiGroups: +{{- range sortAlpha $configAPIs }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - {{ include "k10.profiles" $main}} + - {{ include "k10.policies" $main}} + verbs: + - get + - list +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Namespace }}-{{ template "serviceAccountName" . }}-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: k10:admins +{{- range .Values.auth.k10AdminUsers }} +- apiGroup: rbac.authorization.k8s.io + kind: User + name: {{ . }} +{{- end }} +{{- range default .Values.auth.groupAllowList .Values.auth.k10AdminGroups }} +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: {{ . }} +{{- end }} +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Namespace }}-{{ template "serviceAccountName" . }}-ns-admin + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-ns-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: k10:admins +{{- range .Values.auth.k10AdminUsers }} +- apiGroup: rbac.authorization.k8s.io + kind: User + name: {{ . }} +{{- end }} +{{- range default .Values.auth.groupAllowList .Values.auth.k10AdminGroups }} +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: {{ . }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/templates/route.yaml b/charts/k10/k10/4.5.900/templates/route.yaml new file mode 100644 index 000000000..1ecd244be --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/route.yaml @@ -0,0 +1,36 @@ +{{- $route := .Values.route -}} +{{- if $route.enabled -}} +{{ include "authEnabled.check" . }} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ .Release.Name }}-route + {{- with $route.annotations }} + namespace: {{ .Release.Namespace }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: +{{ include "helm.labels" . | indent 4 }} + {{- with $route.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + host: {{ $route.host }} + path: /{{ default .Release.Name $route.path | trimPrefix "/" | trimSuffix "/" }}/ + port: + targetPort: http + to: + kind: Service + name: gateway + weight: 100 + {{- if $route.tls.enabled }} + tls: + {{- if $route.tls.insecureEdgeTerminationPolicy }} + insecureEdgeTerminationPolicy: {{ $route.tls.insecureEdgeTerminationPolicy }} + {{- end }} + {{- if $route.tls.termination }} + termination: {{ $route.tls.termination }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/k10/k10/4.5.900/templates/scc.yaml b/charts/k10/k10/4.5.900/templates/scc.yaml new file mode 100644 index 000000000..df12af4e3 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/scc.yaml @@ -0,0 +1,43 @@ +{{- if .Values.scc.create }} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Name }}-prometheus-server +allowPrivilegedContainer: false +allowHostNetwork: false +allowHostDirVolumePlugin: true +priority: null +allowedCapabilities: null +allowHostPorts: true +allowHostPID: false +allowHostIPC: false +readOnlyRootFilesystem: false +requiredDropCapabilities: +- CHOWN +- KILL +- MKNOD +- SETUID +- SETGID +defaultAddCapabilities: [] +allowedCapabilities: [] +priority: 0 +runAsUser: + type: MustRunAsNonRoot +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +users: + - system:serviceaccount:{{.Release.Namespace}}:prometheus-server +{{- end }} diff --git a/charts/k10/k10/4.5.900/templates/secrets.yaml b/charts/k10/k10/4.5.900/templates/secrets.yaml new file mode 100644 index 000000000..80bfa6b8a --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/secrets.yaml @@ -0,0 +1,241 @@ +{{- include "enforce.singlecloudcreds" . -}} +{{- include "check.validateImagePullSecrets" . -}} +{{- if eq (include "check.awscreds" . ) "true" }} +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: aws-creds +type: Opaque +data: + aws_access_key_id: {{ required "secrets.awsAccessKeyId field is required!" .Values.secrets.awsAccessKeyId | b64enc | quote }} + aws_secret_access_key: {{ required "secrets.awsSecretAccessKey field is required!" .Values.secrets.awsSecretAccessKey | b64enc | quote }} +{{- if .Values.secrets.awsIamRole }} + role: {{ .Values.secrets.awsIamRole | trim | b64enc | quote }} +{{- end }} +{{- end }} +{{- if or .Values.secrets.dockerConfig .Values.secrets.dockerConfigPath }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-ecr +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ or .Values.secrets.dockerConfig ( .Values.secrets.dockerConfigPath | b64enc ) }} +{{- end }} +{{- if eq (include "check.googlecreds" .) "true" }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: google-secret +type: Opaque +data: + kasten-gke-sa.json: {{ .Values.secrets.googleApiKey }} +{{- end }} +{{- if eq (include "check.ibmslcreds" .) "true" }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: ibmsl-secret +type: Opaque +data: + ibm_sl_key: {{ required "secrets.ibmSoftLayerApiKey field is required!" .Values.secrets.ibmSoftLayerApiKey | b64enc | quote }} + ibm_sl_username: {{ required "secrets.ibmSoftLayerApiUsername field is required!" .Values.secrets.ibmSoftLayerApiUsername | b64enc | quote }} +{{- end }} +{{- if eq (include "check.azurecreds" .) "true" }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: azure-creds +type: Opaque +data: + azure_tenant_id: {{ required "secrets.azureTenantId field is required!" .Values.secrets.azureTenantId | b64enc | quote }} + azure_client_id: {{ required "secrets.azureClientId field is required!" .Values.secrets.azureClientId | b64enc | quote }} + azure_client_secret: {{ required "secrets.azureClientSecret field is required!" .Values.secrets.azureClientSecret | b64enc | quote }} + azure_resource_group: {{ default "" .Values.secrets.azureResourceGroup | b64enc | quote }} + azure_subscription_id: {{ default "" .Values.secrets.azureSubscriptionID | b64enc | quote }} + azure_resource_manager_endpoint: {{ default "" .Values.secrets.azureResourceMgrEndpoint | b64enc | quote }} + azure_ad_endpoint: {{ default "" .Values.secrets.azureADEndpoint | b64enc | quote }} + azure_ad_resource_id: {{ default "" .Values.secrets.azureADResourceID | b64enc | quote }} +{{- end }} +{{- if eq (include "check.vspherecreds" .) "true" }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: vsphere-creds +type: Opaque +data: + vsphere_endpoint: {{ required "secrets.vsphereEndpoint field is required!" .Values.secrets.vsphereEndpoint | b64enc | quote }} + vsphere_username: {{ required "secrets.vsphereUsername field is required!" .Values.secrets.vsphereUsername | b64enc | quote }} + vsphere_password: {{ required "secrets.vspherePassword field is required!" .Values.secrets.vspherePassword | b64enc | quote }} +{{- end }} +{{- if and (eq (include "basicauth.check" .) "true") (not .Values.auth.basicAuth.secretName) }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-basic-auth + namespace: {{ .Release.Namespace }} +data: + auth: {{ required "auth.basicAuth.htpasswd field is required!" .Values.auth.basicAuth.htpasswd | b64enc | quote}} +type: Opaque +{{- end }} +{{- if .Values.auth.tokenAuth.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-token-auth + namespace: {{ .Release.Namespace }} +data: + auth: {{ "true" | b64enc | quote}} +type: Opaque +{{- end }} +{{- if and .Values.auth.oidcAuth.enabled (not .Values.auth.oidcAuth.secretName) }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-oidc-auth + namespace: {{ .Release.Namespace }} +data: + provider-url: {{ required "auth.oidcAuth.providerURL field is required!" .Values.auth.oidcAuth.providerURL | b64enc | quote }} + redirect-url: {{ required "auth.oidcAuth.redirectURL field is required!" .Values.auth.oidcAuth.redirectURL | b64enc | quote }} + client-id: {{ required "auth.oidcAuth.clientID field is required!" .Values.auth.oidcAuth.clientID | b64enc | quote }} + client-secret: {{ required "auth.oidcAuth.clientSecret field is required!" .Values.auth.oidcAuth.clientSecret | b64enc | quote }} + scopes: {{ required "auth.oidcAuth.scopes field is required!" .Values.auth.oidcAuth.scopes | b64enc | quote }} + prompt: {{ default "select_account" .Values.auth.oidcAuth.prompt | b64enc | quote }} + usernameClaim: {{ default "sub" .Values.auth.oidcAuth.usernameClaim | b64enc | quote }} + usernamePrefix: {{ default "" .Values.auth.oidcAuth.usernamePrefix | b64enc | quote }} + groupClaim: {{ default "" .Values.auth.oidcAuth.groupClaim | b64enc | quote }} + groupPrefix: {{ default "" .Values.auth.oidcAuth.groupPrefix | b64enc | quote }} +stringData: + groupAllowList: |- +{{- range $.Values.auth.groupAllowList }} + {{ . -}} +{{ end }} + logout-url: {{ default "" .Values.auth.oidcAuth.logoutURL | b64enc | quote }} +type: Opaque +{{- end }} +{{- if and .Values.auth.openshift.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-oidc-auth + namespace: {{ .Release.Namespace }} +data: + provider-url: {{ required "auth.openshift.dashboardURL field is required!" (printf "%s/dex" (trimSuffix "/" .Values.auth.openshift.dashboardURL)) | b64enc | quote }} + {{- if .Values.route.enabled }} + redirect-url: {{ required "auth.openshift.dashboardURL field is required!" (trimSuffix "/" (trimSuffix (default .Release.Name .Values.route.path) (trimSuffix "/" .Values.auth.openshift.dashboardURL))) | b64enc | quote }} + {{- else }} + redirect-url: {{ required "auth.openshift.dashboardURL field is required!" (trimSuffix "/" (trimSuffix (default .Release.Name .Values.ingress.urlPath) (trimSuffix "/" .Values.auth.openshift.dashboardURL))) | b64enc | quote }} + {{- end }} + client-id: {{ (printf "kasten") | b64enc | quote }} + client-secret: {{ (printf "kastensecret") | b64enc | quote }} + scopes: {{ (printf "groups profile email") | b64enc | quote }} + prompt: {{ (printf "select_account") | b64enc | quote }} + usernameClaim: {{ default "email" .Values.auth.openshift.usernameClaim | b64enc | quote }} + usernamePrefix: {{ default "" .Values.auth.openshift.usernamePrefix | b64enc | quote }} + groupClaim: {{ default "groups" .Values.auth.openshift.groupClaim | b64enc | quote }} + groupPrefix: {{ default "" .Values.auth.openshift.groupPrefix | b64enc | quote }} +stringData: + groupAllowList: |- +{{- range $.Values.auth.groupAllowList }} + {{ . -}} +{{ end }} +type: Opaque +{{- end }} +{{- if and .Values.auth.ldap.enabled (not .Values.auth.ldap.secretName) }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-oidc-auth + namespace: {{ .Release.Namespace }} +data: + provider-url: {{ required "auth.ldap.dashboardURL field is required!" (printf "%s/dex" (trimSuffix "/" .Values.auth.ldap.dashboardURL)) | b64enc | quote }} + {{- if .Values.route.enabled }} + redirect-url: {{ required "auth.ldap.dashboardURL field is required!" (trimSuffix "/" (trimSuffix (default .Release.Name .Values.route.path) (trimSuffix "/" .Values.auth.ldap.dashboardURL))) | b64enc | quote }} + {{- else }} + redirect-url: {{ required "auth.ldap.dashboardURL field is required!" (trimSuffix "/" (trimSuffix (default .Release.Name .Values.ingress.urlPath) (trimSuffix "/" .Values.auth.ldap.dashboardURL))) | b64enc | quote }} + {{- end }} + client-id: {{ (printf "kasten") | b64enc | quote }} + client-secret: {{ (printf "kastensecret") | b64enc | quote }} + scopes: {{ (printf "groups profile email") | b64enc | quote }} + prompt: {{ (printf "select_account") | b64enc | quote }} + usernameClaim: {{ default "email" .Values.auth.ldap.usernameClaim | b64enc | quote }} + usernamePrefix: {{ default "" .Values.auth.ldap.usernamePrefix | b64enc | quote }} + groupClaim: {{ default "groups" .Values.auth.ldap.groupClaim | b64enc | quote }} + groupPrefix: {{ default "" .Values.auth.ldap.groupPrefix | b64enc | quote }} +stringData: + groupAllowList: |- +{{- range $.Values.auth.groupAllowList }} + {{ . -}} +{{ end }} +type: Opaque +{{- end }} +{{- if and .Values.auth.ldap.enabled (not .Values.auth.ldap.bindPWSecretName) }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-dex + namespace: {{ .Release.Namespace }} +data: + bindPW: {{ required "auth.ldap.bindPW field is required!" .Values.auth.ldap.bindPW | b64enc | quote }} +type: Opaque +{{- end }} +{{- if eq (include "check.primaryKey" . ) "true" }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-encryption-primary-key + namespace: {{ .Release.Namespace }} +data: + {{- if .Values.encryption.primaryKey.awsCmkKeyId }} + awscmkkeyid: {{ default "" .Values.encryption.primaryKey.awsCmkKeyId | trim | b64enc | quote }} + {{- end }} + {{- if .Values.encryption.primaryKey.vaultTransitKeyName }} + vaulttransitkeyname: {{ default "" .Values.encryption.primaryKey.vaultTransitKeyName | trim | b64enc | quote }} + vaulttransitpath: {{ default "transit" .Values.encryption.primaryKey.vaultTransitPath | trim | b64enc | quote }} + {{- end }} +type: Opaque +{{- end }} diff --git a/charts/k10/k10/4.5.900/templates/serviceaccount.yaml b/charts/k10/k10/4.5.900/templates/serviceaccount.yaml new file mode 100644 index 000000000..a7704e4e6 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/serviceaccount.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.serviceAccount.create ( not .Values.metering.awsMarketplace ) ( not .Values.metering.awsManagedLicense ) }} +kind: ServiceAccount +apiVersion: v1 +metadata: +{{- if .Values.secrets.awsIamRole }} + annotations: + eks.amazonaws.com/role-arn: {{ .Values.secrets.awsIamRole }} +{{- end }} + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ template "serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +{{- if and (not ( eq (include "meteringServiceAccountName" .) (include "serviceAccountName" .))) ( not .Values.metering.awsManagedLicense ) .Values.metering.serviceAccount.create }} +--- +kind: ServiceAccount +apiVersion: v1 +metadata: +{{- if .Values.metering.awsMarketPlaceIamRole }} + annotations: + eks.amazonaws.com/role-arn: {{ .Values.metering.awsMarketPlaceIamRole }} +{{- end }} + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ template "meteringServiceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/k10/k10/4.5.900/templates/v0services.yaml b/charts/k10/k10/4.5.900/templates/v0services.yaml new file mode 100644 index 000000000..de881bf77 --- /dev/null +++ b/charts/k10/k10/4.5.900/templates/v0services.yaml @@ -0,0 +1,162 @@ +{{/* Template to generate service spec for v0 rest services */}} +{{- $container_port := .Values.service.internalPort -}} +{{- $service_port := .Values.service.externalPort -}} +{{- $aggregated_api_port := .Values.service.aggregatedApiPort -}} +{{- $postfix := default .Release.Name .Values.ingress.urlPath -}} +{{- $colocated_services := include "k10.colocatedServices" . | fromYaml -}} +{{- $exposed_services := include "k10.exposedServices" . | splitList " " -}} +{{- $os_postfix := default .Release.Name .Values.route.path -}} +{{- $main_context := . -}} +{{- range append (include "k10.restServices" . | splitList " ") "frontend" }} + {{ if not (hasKey $colocated_services . ) }} +apiVersion: v1 +kind: Service +metadata: + namespace: {{ $.Release.Namespace }} + name: {{ . }}-svc + labels: +{{ include "helm.labels" $ | indent 4 }} + component: {{ . }} + run: {{ . }}-svc +{{ if or (has . $exposed_services) (eq . "frontend") }} + annotations: + getambassador.io/config: | + --- + apiVersion: ambassador/v1 + kind: Mapping + name: {{ . }}-mapping + {{- if $.Values.route.enabled }} + {{- if eq . "frontend" }} + prefix: /{{ $os_postfix | trimPrefix "/" | trimSuffix "/" }}/ + {{- else }} + prefix: /{{ $os_postfix | trimPrefix "/" | trimSuffix "/" }}/{{ . }}-svc/ + {{- end }} + {{- else }} + {{- if eq . "frontend" }} + prefix: /{{ $postfix | trimPrefix "/" | trimSuffix "/" }}/ + {{- else }} + prefix: /{{ $postfix | trimPrefix "/" | trimSuffix "/" }}/{{ . }}-svc/ + {{- end }} + {{- end }} + rewrite: / + service: {{ . }}-svc.{{ $.Release.Namespace }}:{{ $service_port }} + timeout_ms: 30000 +{{- $colocatedList := include "get.enabledColocatedSvcList" $main_context | fromYaml }} +{{- range $skip, $secondary := index $colocatedList . }} + {{- $colocConfig := index (include "k10.colocatedServices" . | fromYaml) $secondary }} + {{- if $colocConfig.isExposed }} + --- + apiVersion: ambassador/v1 + kind: Mapping + name: {{ $secondary }}-mapping + prefix: /{{ $postfix }}/{{ $secondary }}-svc/ + rewrite: / + service: {{ $colocConfig.primary }}-svc.{{ $.Release.Namespace }}:{{ $colocConfig.port }} + timeout_ms: 30000 + {{- end }} +{{- end }} +{{- end }} +spec: + ports: + - name: http + protocol: TCP + port: {{ $service_port }} + targetPort: {{ $container_port }} +{{- $colocatedList := include "get.enabledColocatedSvcList" $main_context | fromYaml }} +{{- range $skip, $secondary := index $colocatedList . }} + {{- $colocConfig := index (include "k10.colocatedServices" . | fromYaml) $secondary }} + - name: {{ $secondary }} + protocol: TCP + port: {{ $colocConfig.port }} + targetPort: {{ $colocConfig.port }} +{{- end }} +{{- if eq . "logging" }} + - name: logging + protocol: TCP + port: 24224 + targetPort: 24224 + - name: logging-metrics + protocol: TCP + port: 24225 + targetPort: 24225 +{{- end }} + selector: + run: {{ . }}-svc +--- + {{ end }}{{/* if not (hasKey $colocated_services $k10_service ) */}} +{{ end -}}{{/* range append (include "k10.restServices" . | splitList " ") "frontend" */}} +{{- range append (include "k10.services" . | splitList " ") "kanister" }} +apiVersion: v1 +kind: Service +metadata: + namespace: {{ $.Release.Namespace }} + name: {{ . }}-svc + labels: +{{ include "helm.labels" $ | indent 4 }} + component: {{ . }} + run: {{ . }}-svc +spec: + ports: + {{- if eq . "aggregatedapis" }} + - name: http + port: 443 + protocol: TCP + targetPort: {{ $aggregated_api_port }} + {{- else }} + - name: http + protocol: TCP + port: {{ $service_port }} + targetPort: {{ $container_port }} + {{- end }} + {{- if and (eq . "config") ($.Values.injectKanisterSidecar.enabled) }} + - name: https + protocol: TCP + port: 443 + targetPort: {{ $.Values.injectKanisterSidecar.webhookServer.port }} + {{- end }} +{{- $colocatedList := include "get.enabledColocatedSvcList" $main_context | fromYaml }} +{{- range $skip, $secondary := index $colocatedList . }} + {{- $colocConfig := index (include "k10.colocatedServices" . | fromYaml) $secondary }} + - name: {{ $secondary }} + protocol: TCP + port: {{ $colocConfig.port }} + targetPort: {{ $colocConfig.port }} +{{- end }} + selector: + run: {{ . }}-svc +--- +{{ end -}} +{{- if or .Values.auth.dex.enabled (eq (include "check.dexAuth" .) "true") }} +apiVersion: v1 +kind: Service +metadata: + annotations: + getambassador.io/config: | + --- + apiVersion: ambassador/v1 + kind: Mapping + name: dex-mapping + {{- if $.Values.route.enabled }} + prefix: /{{ $os_postfix | trimPrefix "/" | trimSuffix "/" }}/dex/ + {{- else }} + prefix: /{{ $postfix | trimPrefix "/" | trimSuffix "/" }}/dex/ + {{- end }} + rewrite: "" + service: dex.{{ $.Release.Namespace }}:8000 + timeout_ms: 30000 + name: dex + namespace: {{ $.Release.Namespace }} + labels: +{{ include "helm.labels" $ | indent 4 }} + component: dex + run: auth-svc +spec: + ports: + - name: http + port: {{ $service_port }} + protocol: TCP + targetPort: 8080 + selector: + run: auth-svc + type: ClusterIP +{{ end -}} diff --git a/charts/k10/k10/4.5.900/triallicense b/charts/k10/k10/4.5.900/triallicense new file mode 100644 index 000000000..cfe6dd46b --- /dev/null +++ b/charts/k10/k10/4.5.900/triallicense @@ -0,0 +1 @@ +Y3VzdG9tZXJOYW1lOiB0cmlhbHN0YXJ0ZXItbGljZW5zZQpkYXRlRW5kOiAnMjEwMC0wMS0wMVQwMDowMDowMC4wMDBaJwpkYXRlU3RhcnQ6ICcyMDIwLTAxLTAxVDAwOjAwOjAwLjAwMFonCmZlYXR1cmVzOgogIHRyaWFsOiBudWxsCmlkOiB0cmlhbC0wOWY4MzE5Zi0xODBmLTRhOTAtOTE3My1kOTJiNzZmMTgzNWUKcHJvZHVjdDogSzEwCnJlc3RyaWN0aW9uczoKICBub2RlczogNTAwCnNlcnZpY2VBY2NvdW50S2V5OiBudWxsCnZlcnNpb246IHYxLjAuMApzaWduYXR1cmU6IEYxbnVLUFV5STJtbDJGMmV1VHdGOXNZRTZMVU5rQ3ZiR2tTV1ZkT0ZqdERCb1B6SjUyVWFsVkFmRjVmQUxpcm5BcVhkcERnYi9YcnpxSEYrTE0xS2pEMVdXUFd0ZUdXNFc1anBPSFN0T296Y0c5M0pUUHF5M2l6TVk3RmczZVFLYTZzWDhBdnFwOXArWXVBMWNwTENlQ2dsR2dnOTVzSUFmYmRMMTBmV2d2RmR6QUt4dUZLN2psRzVtbG1CRVF5R0hrYWdoZFIrVGxzeUNTNEFkbXVBOEZodVUwZnRBdXN0b1M3R2JKd1BuTFI3STFZY1Q4OW8wU2xRZEJ2Yjg2QzdKbm1OdnY0aHhiSUo5TTJvWGJPSnQ4ZnBNcjhNWFR6YWRMTWJzSndhZ3VBVHlNUWF2cExHNXRPb0U2ZE1uMVlFVDZLdWZiYy9NdThVRDVYYXlDYTdkZz09Cg== diff --git a/charts/k10/k10/4.5.900/values.schema.json b/charts/k10/k10/4.5.900/values.schema.json new file mode 100644 index 000000000..0437e8d1b --- /dev/null +++ b/charts/k10/k10/4.5.900/values.schema.json @@ -0,0 +1,1089 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "type": "object", + "properties": { + "serviceAccount": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, + "image": { + "type": "object", + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "image": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "pullPolicy": { + "type": "string" + } + } + }, + "scc": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "networkPolicy": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "global": { + "type": "object", + "properties": { + "airgapped": { + "type": "object", + "properties": { + "repository": { + "type": "string" + } + } + }, + "persistence": { + "type": "object", + "properties": { + "mountPath": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "storageClass": { + "type": "string" + }, + "accessMode": { + "type": "string" + }, + "size": { + "type": "string" + }, + "metering": { + "type": "object", + "properties": { + "size": { + "type": "string" + } + } + }, + "catalog": { + "type": "object", + "properties": { + "size": { + "type": "string" + } + } + }, + "jobs": { + "type": "object", + "properties": { + "size": { + "type": "string" + } + } + }, + "logging": { + "type": "object", + "properties": { + "size": { + "type": "string" + } + } + } + } + }, + "upstreamCertifiedImages": { + "type": "boolean" + }, + "rhMarketPlace": { + "type": "boolean" + }, + "images": { + "type": "object", + "properties": { + "aggregatedapis": { + "type": "string" + }, + "auth": { + "type": "string" + }, + "catalog": { + "type": "string" + }, + "config": { + "type": "string" + }, + "crypto": { + "type": "string" + }, + "dashboardbff": { + "type": "string" + }, + "executor": { + "type": "string" + }, + "frontend": { + "type": "string" + }, + "jobs": { + "type": "string" + }, + "kanister": { + "type": "string" + }, + "logging": { + "type": "string" + }, + "metering": { + "type": "string" + }, + "state": { + "type": "string" + }, + "ambassador": { + "type": "string" + }, + "prometheus": { + "type": "string" + }, + "configmap-reload": { + "type": "string" + }, + "dex": { + "type": "string" + }, + "kanister-tools": { + "type": "string" + }, + "upgrade": { + "type": "string" + }, + "cephtool": { + "type": "string" + }, + "datamover": { + "type": "string" + } + } + } + } + }, + "metering": { + "type": "object", + "properties": { + "reportingKey": { + "type": "string" + }, + "consumerId": { + "type": "string" + }, + "awsMarketPlaceIamRole": { + "type": "string" + }, + "awsRegion": { + "type": "string" + }, + "serviceAccount": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, + "licenseConfigSecretName": { + "type": "string" + }, + "mode": { + "type": "string" + }, + "reportCollectionPeriod": { + "type": "integer" + }, + "reportPushPeriod": { + "type": "integer" + }, + "promoID": { + "type": "string" + } + } + }, + "route": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "host": { + "type": "string" + }, + "path": { + "type": "string" + }, + "annotations": { + "type": "object" + }, + "labels": { + "type": "object" + }, + "tls": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "insecureEdgeTerminationPolicy": { + "type": "string" + }, + "termination": { + "type": "string" + } + } + } + } + }, + "toolsImage": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "pullPolicy": { + "type": "string" + } + } + }, + "ambassadorImage": { + "type": "object", + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "image": { + "type": "string" + } + } + }, + "dexImage": { + "type": "object", + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "image": { + "type": "string" + } + } + }, + "kanisterToolsImage": { + "type": "object", + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "image": { + "type": "string" + }, + "pullPolicy": { + "type": "string" + } + } + }, + "eula": { + "type": "object", + "properties": { + "accept": { + "type": "boolean" + } + } + }, + "license": { + "type": "string" + }, + "prometheus": { + "type": "object", + "properties": { + "k10image": { + "type": "object", + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + } + } + }, + "initChownData": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "rbac": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "alertmanager": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "kubeStateMetrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "networkPolicy": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "nodeExporter": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "pushgateway": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "scrapeCAdvisor": { + "type": "boolean" + }, + "server": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "securityContext": { + "type": "object", + "properties": { + "runAsUser": { + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsGroup": { + "type": "integer" + }, + "fsGroup": { + "type": "integer" + } + } + }, + "retention": { + "type": "string" + }, + "strategy": { + "type": "object", + "properties": { + "rollingUpdate": { + "type": "object", + "properties": { + "maxSurge": { + "type": "string" + }, + "maxUnavailable": { + "type": "string" + } + } + }, + "type": { + "type": "string" + } + } + }, + "persistentVolume": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "storageClass": { + "type": "string" + } + } + }, + "configMapOverrideName": { + "type": "string" + }, + "fullnameOverride": { + "type": "string" + }, + "baseURL": { + "type": "string" + }, + "prefixURL": { + "type": "string" + } + } + }, + "serviceAccounts": { + "type": "object", + "properties": { + "alertmanager": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "kubeStateMetrics": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "nodeExporter": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "pushgateway": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "server": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + } + } + } + } + }, + "service": { + "type": "object", + "properties": { + "externalPort": { + "type": "integer" + }, + "internalPort": { + "type": "integer" + }, + "aggregatedApiPort": { + "type": "integer" + }, + "gatewayAdminPort": { + "type": "integer" + } + } + }, + "secrets": { + "type": "object", + "properties": { + "awsAccessKeyId": { + "type": "string" + }, + "awsSecretAccessKey": { + "type": "string" + }, + "awsIamRole": { + "type": "string" + }, + "googleApiKey": { + "type": "string" + }, + "dockerConfig": { + "type": "string" + }, + "dockerConfigPath": { + "type": "string" + }, + "azureTenantId": { + "type": "string" + }, + "azureClientId": { + "type": "string" + }, + "azureClientSecret": { + "type": "string" + }, + "azureResourceGroup": { + "type": "string" + }, + "azureSubscriptionID": { + "type": "string" + }, + "azureResourceMgrEndpoint": { + "type": "string" + }, + "azureADEndpoint": { + "type": "string" + }, + "azureADResourceID": { + "type": "string" + }, + "apiTlsCrt": { + "type": "string" + }, + "apiTlsKey": { + "type": "string" + }, + "ibmSoftLayerApiKey": { + "type": "string" + }, + "ibmSoftLayerApiUsername": { + "type": "string" + }, + "vsphereEndpoint": { + "type": "string" + }, + "vsphereUsername": { + "type": "string" + }, + "vspherePassword": { + "type": "string" + } + } + }, + "clusterName": { + "type": "string" + }, + "executorReplicas": { + "type": "integer" + }, + "logLevel": { + "type": "string" + }, + "apiservices": { + "type": "object", + "properties": { + "deployed": { + "type": "boolean" + } + } + }, + "injectKanisterSidecar": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "namespaceSelector": { + "type": "object", + "properties": { + "matchLabels": { + "type": "object" + } + } + }, + "objectSelector": { + "type": "object", + "properties": { + "matchLabels": { + "type": "object" + } + } + }, + "webhookServer": { + "type": "object", + "properties": { + "port": { + "type": "integer" + } + } + } + } + }, + "kanisterPodCustomLabels": { + "type": "string" + }, + "kanisterPodCustomAnnotations": { + "type": "string" + }, + "resources": { + "type": "object" + }, + "services": { + "type": "object", + "properties": { + "executor": { + "type": "object", + "properties": { + "hostNetwork": { + "type": "boolean" + } + } + }, + "dashboardbff": { + "type": "object", + "properties": { + "hostNetwork": { + "type": "boolean" + } + } + }, + "securityContext": { + "type": "object", + "properties": { + "runAsUser": { + "type": "integer" + }, + "fsGroup": { + "type": "integer" + } + } + } + } + }, + "apigateway": { + "type": "object", + "properties": { + "serviceResolver": { + "type": "string" + } + } + }, + "limiter": { + "type": "object", + "properties": { + "genericVolumeSnapshots": { + "type": "integer" + }, + "genericVolumeCopies": { + "type": "integer" + }, + "genericVolumeRestores": { + "type": "integer" + }, + "csiSnapshots": { + "type": "integer" + }, + "providerSnapshots": { + "type": "integer" + } + } + }, + "gateway": { + "type": "object", + "properties": { + "insecureDisableSSLVerify": { + "type": "boolean" + } + } + }, + "kanisterWithKopia": { + "type": "boolean" + }, + "ingress": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "tls": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "class": { + "type": "string" + }, + "host": { + "type": "string" + }, + "urlPath": { + "type": "string" + } + } + }, + "genericVolumeSnapshot": { + "type": "object", + "properties": { + "resources": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string" + }, + "cpu": { + "type": "string" + } + } + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "string" + }, + "cpu": { + "type": "string" + } + } + } + } + } + } + } + }, + "jaeger": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "agentDNS": { + "type": "string" + } + } + }, + "cacertconfigmap": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + }, + "externalGateway": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "annotations": { + "type": "object" + }, + "fqdn": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "awsSSLCertARN": { + "type": "string" + } + } + }, + "auth": { + "type": "object", + "properties": { + "groupAllowList": { + "type": "array", + "items": { + "type": "string" + } + }, + "basicAuth": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "secretName": { + "type": "string" + }, + "htpasswd": { + "type": "string" + } + } + }, + "tokenAuth": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "oidcAuth": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "providerURL": { + "type": "string" + }, + "redirectURL": { + "type": "string" + }, + "scopes": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "clientID": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "usernameClaim": { + "type": "string" + }, + "usernamePrefix": { + "type": "string" + }, + "groupClaim": { + "type": "string" + }, + "groupPrefix": { + "type": "string" + }, + "logoutURL": { + "type": "string" + }, + "secretName": { + "type": "string" + } + } + }, + "dex": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "providerURL": { + "type": "string" + }, + "redirectURL": { + "type": "string" + } + } + }, + "openshift": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "serviceAccount": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "dashboardURL": { + "type": "string" + }, + "openshiftURL": { + "type": "string" + }, + "insecureCA": { + "type": "boolean" + }, + "useServiceAccountCA": { + "type": "boolean" + }, + "secretName": { + "type": "string" + }, + "usernameClaim": { + "type": "string" + }, + "usernamePrefix": { + "type": "string" + }, + "groupnameClaim": { + "type": "string" + }, + "groupnamePrefix": { + "type": "string" + } + } + }, + "ldap": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "restartPod": { + "type": "boolean" + }, + "dashboardURL": { + "type": "string" + }, + "host": { + "type": "string" + }, + "insecureNoSSL": { + "type": "boolean" + }, + "insecureSkipVerifySSL": { + "type": "boolean" + }, + "startTLS": { + "type": "boolean" + }, + "bindDN": { + "type": "string" + }, + "bindPW": { + "type": "string" + }, + "bindPWSecretName": { + "type": "string" + }, + "userSearch": { + "type": "object", + "properties": { + "baseDN": { + "type": "string" + }, + "filter": { + "type": "string" + }, + "username": { + "type": "string" + }, + "idAttr": { + "type": "string" + }, + "emailAttr": { + "type": "string" + }, + "nameAttr": { + "type": "string" + }, + "preferredUsernameAttr": { + "type": "string" + } + } + }, + "groupSearch": { + "type": "object", + "properties": { + "baseDN": { + "type": "string" + }, + "filter": { + "type": "string" + }, + "userMatchers": { + "type": "array", + "items": { + "type": "string" + } + }, + "nameAttr": { + "type": "string" + } + } + }, + "secretName": { + "type": "string" + }, + "usernameClaim": { + "type": "string" + }, + "usernamePrefix": { + "type": "string" + }, + "groupnameClaim": { + "type": "string" + }, + "groupnamePrefix": { + "type": "string" + } + } + }, + "k10AdminUsers": { + "type": "array", + "items": { + "type": "string" + } + }, + "k10AdminGroups": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "cluster": { + "type": "object", + "properties": { + "domainName": { + "type": "string" + } + } + } +} diff --git a/charts/k10/k10/4.5.900/values.yaml b/charts/k10/k10/4.5.900/values.yaml new file mode 100644 index 000000000..d5f1067e4 --- /dev/null +++ b/charts/k10/k10/4.5.900/values.yaml @@ -0,0 +1,455 @@ +# Default values for k10. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +image: + registry: gcr.io + repository: kasten-images + image: '' + tag: '' + pullPolicy: Always + +rbac: + create: true +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is derived using the release and chart names. + name: "" + +scc: + create: false + +networkPolicy: + create: true + +# Empty value of airgapped.repository specifies that the installation is +# going to be online and if we provide this value using --set flag that +# means that the installation is going to be offline +global: + airgapped: + repository: '' + persistence: + mountPath: "/mnt/k10state" + enabled: true + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + accessMode: ReadWriteOnce + size: 20Gi + metering: + size: 2Gi + catalog: + size: "" + jobs: + size: "" + logging: + size: "" + grafana: + # Default value is set to 5Gi. This is the same as the default value + # from previous releases <= 4.5.1 where the Grafana sub chart used to + # reference grafana.persistence.size instead of the global values. + # Since the size remains the same across upgrades, the Grafana PVC + # is not deleted and recreated which means no Grafana data is lost + # while upgrading from <= 4.5.1 + size: 5Gi + ## Do we want to use certified version to upstream container images + ## TODO: @viveksinghggits, we don't need this anymore + upstreamCertifiedImages: false + ## Set it to true while geenerating helm operator + rhMarketPlace: false + ## these values should not be provided us, these are to be used by + ## red hat marketplace + images: + admin: '' + aggregatedapis: '' + auth: '' + catalog: '' + config: '' + crypto: '' + dashboardbff: '' + events: '' + executor: '' + frontend: '' + jobs: '' + kanister: '' + logging: '' + metering: '' + state: '' + ambassador: '' + prometheus: '' + configmap-reload: '' + dex: '' + kanister-tools: '' + upgrade: '' + cephtool: '' + datamover: '' + bloblifecyclemanager: '' + vbrintegrationapi: '' + grafana: '' + imagePullSecret: '' + ingress: + create: false + urlPath: "" #url path for k10 gateway + route: + enabled: false + path: "" + + +## OpenShift route configuration. +route: + enabled: false + # Host name for the route + host: "" + # Default path for the route + path: "" + + annotations: {} + # kubernetes.io/tls-acme: "true" + # haproxy.router.openshift.io/disable_cookies: "true" + # haproxy.router.openshift.io/balance: roundrobin + + labels: {} + # key: value + + # TLS configuration + tls: + enabled: false + # What to do in case of an insecure traffic edge termination + insecureEdgeTerminationPolicy: "Redirect" + # Where this TLS configuration should terminate + termination: "edge" + +toolsImage: + enabled: true + pullPolicy: Always + +ambassadorImage: + registry: quay.io + repository: datawire + image: ambassador + +dexImage: + registry: quay.io + repository: dexidp + image: dex + +kanisterToolsImage: + registry: ghcr.io + repository: kanisterio + image: kanister-tools + pullPolicy: Always + +ingress: + create: false + tls: + enabled: false + class: "" #Ingress controller type + host: "" #ingress object host name + urlPath: "" #url path for k10 gateway + pathType: "" + +eula: + accept: false #true value if EULA accepted + +license: "" #base64 encoded string provided by Kasten + +cluster: + domainName: "cluster.local" #default value is cluster.local + +prometheus: + k10image: + # take this value from image.repository + registry: gcr.io + repository: kasten-images + # Disabling init container + # which uses root cmds + initChownData: + enabled: false + rbac: + create: false + alertmanager: + enabled: false + kubeStateMetrics: + enabled: false + networkPolicy: + enabled: true + nodeExporter: + enabled: false + pushgateway: + enabled: false + scrapeCAdvisor: false + server: + # UID and groupid are from prometheus helm chart + enabled: true + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + retention: 30d + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 100% + type: RollingUpdate + persistentVolume: + enabled: true + storageClass: "" + configMapOverrideName: k10-prometheus-config + fullnameOverride: prometheus-server + baseURL: /k10/prometheus/ + prefixURL: /k10/prometheus + serviceAccounts: + alertmanager: + create: false + kubeStateMetrics: + create: false + nodeExporter: + create: false + pushgateway: + create: false + server: + create: true + +jaeger: + enabled: false + agentDNS: "" + +service: + externalPort: 8000 + internalPort: 8000 + aggregatedApiPort: 10250 + gatewayAdminPort: 8877 + +secrets: + awsAccessKeyId: '' + awsSecretAccessKey: '' + awsIamRole: '' + googleApiKey: '' + dockerConfig: '' + dockerConfigPath: '' + azureTenantId: '' + azureClientId: '' + azureClientSecret: '' + azureResourceGroup: '' + azureSubscriptionID: '' + azureResourceMgrEndpoint: '' + azureADEndpoint: '' + azureADResourceID: '' + apiTlsCrt: '' + apiTlsKey: '' + ibmSoftLayerApiKey: '' + ibmSoftLayerApiUsername: '' + vsphereEndpoint: '' + vsphereUsername: '' + vspherePassword: '' + +metering: + reportingKey: "" #[base64-encoded key] + consumerId: "" #project: + awsRegion: '' + awsMarketPlaceIamRole: '' + awsMarketplace: false # AWS cloud metering license mode + awsManagedLicense: false # AWS managed license mode + licenseConfigSecretName: '' # AWS managed license config secret for non-eks clusters + serviceAccount: + create: false + name: "" + mode: '' # controls metric and license reporting (set to `airgap` for private-network installs) + redhatMarketplacePayg: false # Redhat cloud metering license mode + reportCollectionPeriod: 1800 # metric report collection period in seconds + reportPushPeriod: 3600 # metric report push period in seconds + promoID: '' # sets the K10 promotion ID + +clusterName: '' +executorReplicas: 3 +logLevel: info + +externalGateway: + create: false + # Any standard service annotations + annotations: {} + # Host and domain name for the K10 API server + fqdn: + name: "" + #Supported types route53-mapper, external-dns + type: "" + # ARN for the AWS ACM SSL certificate used in the K10 API server (load balancer) + awsSSLCertARN: '' + +auth: + groupAllowList: [] +# - "group1" +# - "group2" + basicAuth: + enabled: false + secretName: "" #htpasswd based existing secret + htpasswd: "" #htpasswd string, which will be used for basic auth + tokenAuth: + enabled: false + oidcAuth: + enabled: false + providerURL: "" #URL to your OIDC provider + redirectURL: "" #URL to the K10 gateway service + scopes: "" #Space separated OIDC scopes required for userinfo. Example: "profile email" + prompt: "" #The prompt type to be requested with the OIDC provider. Default is select_account. + clientID: "" #ClientID given by the OIDC provider for K10 + clientSecret: "" #ClientSecret given by the OIDC provider for K10 + usernameClaim: "" #Claim to be used as the username + usernamePrefix: "" #Prefix that has to be used with the username obtained from the username claim + groupClaim: "" #Name of a custom OpenID Connect claim for specifying user groups + groupPrefix: "" #All groups will be prefixed with this value to prevent conflicts. + logoutURL: "" #URL to your OIDC provider's logout endpoint + #OIDC config based existing secret. + #Must include providerURL, redirectURL, scopes, clientID/secret and logoutURL. + secretName: "" + dex: + enabled: false + providerURL: "" + redirectURL: "" + openshift: + enabled: false + serviceAccount: "" #service account used as the OAuth client + clientSecret: "" #The token from the service account + dashboardURL: "" #The URL for accessing K10's dashboard + openshiftURL: "" #The URL of the Openshift API server + insecureCA: false + useServiceAccountCA: false + secretName: "" # The Kubernetes Secret that contains OIDC settings + usernameClaim: "email" + usernamePrefix: "" + groupnameClaim: "groups" + groupnamePrefix: "" + ldap: + enabled: false + restartPod: false # Enable this value to force a restart of the authentication service pod + dashboardURL: "" #The URL for accessing K10's dashboard + host: "" + insecureNoSSL: false + insecureSkipVerifySSL: false + startTLS: false + bindDN: "" + bindPW: "" + bindPWSecretName: "" + userSearch: + baseDN: "" + filter: "" + username: "" + idAttr: "" + emailAttr: "" + nameAttr: "" + preferredUsernameAttr: "" + groupSearch: + baseDN: "" + filter: "" + userMatchers: [] +# - userAttr: +# groupAttr: + nameAttr: "" + secretName: "" # The Kubernetes Secret that contains OIDC settings + usernameClaim: "email" + usernamePrefix: "" + groupnameClaim: "groups" + groupnamePrefix: "" + k10AdminUsers: [] + k10AdminGroups: [] + +optionalColocatedServices: + vbrintegrationapi: + enabled: false + +cacertconfigmap: + name: "" #Name of the configmap + +apiservices: + deployed: true # If false APIService objects will not be deployed + +injectKanisterSidecar: + enabled: false + namespaceSelector: + matchLabels: {} + # Set objectSelector to filter workloads + objectSelector: + matchLabels: {} + webhookServer: + port: 8080 # should not conflict with config server port (8000) + +kanisterPodCustomLabels : "" + +kanisterPodCustomAnnotations : "" + +genericVolumeSnapshot: + resources: + requests: + memory: "" + cpu: "" + limits: + memory: "" + cpu: "" + +resources: {} + +services: + executor: + hostNetwork: false + dashboardbff: + hostNetwork: false + securityContext: + runAsUser: 1000 + fsGroup: 1000 + aggregatedapis: + hostNetwork: false + +apigateway: + serviceResolver: dns + +limiter: + genericVolumeSnapshots: 10 + genericVolumeCopies: 10 + genericVolumeRestores: 10 + csiSnapshots: 10 + providerSnapshots: 10 + +gateway: + insecureDisableSSLVerify: false + exposeAdminPort: true + +kanister: + backupTimeout: 45 + restoreTimeout: 600 + deleteTimeout: 45 + hookTimeout: 20 + checkRepoTimeout: 20 + statsTimeout: 20 + efsPostRestoreTimeout: 45 + podReadyWaitTimeout: 15 + +awsConfig: + assumeRoleDuration: "" + efsBackupVaultName: "" + +grafana: + enabled: true + prometheusName: prometheus-server + prometheusPrefixURL: /k10/prometheus + rbac: + namespaced: true + pspEnabled: false + +encryption: + primaryKey: # primaryKey is used for enabling encryption of K10 primary key + awsCmkKeyId: '' # Ensures AWS CMK is used for encrypting K10 primary key + vaultTransitKeyName: '' + vaultTransitPath: '' + +vmWare: + taskTimeoutMin: "" + +vault: + secretName: "" + address: "http://vault:8200" diff --git a/index.yaml b/index.yaml index e349ac8a9..e75e87783 100755 --- a/index.yaml +++ b/index.yaml @@ -2187,6 +2187,26 @@ entries: urls: - assets/k8s-triliovault-operator/k8s-triliovault-operator-v2.0.200.tgz version: v2.0.200 + k10: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: K10 + catalog.cattle.io/release-name: k10 + apiVersion: v2 + appVersion: 4.5.9 + created: "2022-02-17T18:52:45.774810006+05:30" + description: Kasten’s K10 Data Management Platform + digest: bca3c5f9a5335047c1afc753b90d7fae999b7314b21f0b15132f529e84941221 + home: https://kasten.io/ + icon: https://docs.kasten.io/_static/kasten-logo-vertical.png + kubeVersion: '>= 1.17.0-0' + maintainers: + - email: support@kasten.io + name: kastenIO + name: k10 + urls: + - assets/k10/k10-4.5.900.tgz + version: 4.5.900 kong: - annotations: catalog.cattle.io/certified: partner From 9013b37a14ce6b6abbe40f921104a96e5c047ac2 Mon Sep 17 00:00:00 2001 From: Samuel Attwood Date: Fri, 18 Feb 2022 00:55:07 -0500 Subject: [PATCH 05/13] Setting vals-operator chart URL to specific commit --- packages/vals-operator/package.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/vals-operator/package.yaml b/packages/vals-operator/package.yaml index 56221e7ff..cc9eba615 100644 --- a/packages/vals-operator/package.yaml +++ b/packages/vals-operator/package.yaml @@ -1,2 +1,2 @@ -url: https://digitalis-io.github.io/helm-charts/charts/vals-operator-0.3.0.tgz +url: https://github.com/digitalis-io/helm-charts/raw/5ff94fe16f376b385973f2771d9300ca3c27ba05/charts/vals-operator-0.3.0.tgz packageVersion: 01 From 60e874b815868a749527857ee94a0a64aeff0e56 Mon Sep 17 00:00:00 2001 From: Samuel Attwood Date: Fri, 18 Feb 2022 13:18:41 -0500 Subject: [PATCH 06/13] make patch --- packages/vals-operator/generated-changes/patch/Chart.yaml.patch | 2 +- packages/vals-operator/package.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/vals-operator/generated-changes/patch/Chart.yaml.patch b/packages/vals-operator/generated-changes/patch/Chart.yaml.patch index 183736bcf..5d87d0b10 100644 --- a/packages/vals-operator/generated-changes/patch/Chart.yaml.patch +++ b/packages/vals-operator/generated-changes/patch/Chart.yaml.patch @@ -3,7 +3,7 @@ @@ -10,3 +10,7 @@ name: vals-operator type: application - version: 0.3.0 + version: 0.4.0 +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Vals-Operator diff --git a/packages/vals-operator/package.yaml b/packages/vals-operator/package.yaml index cc9eba615..483bef0a2 100644 --- a/packages/vals-operator/package.yaml +++ b/packages/vals-operator/package.yaml @@ -1,2 +1,2 @@ -url: https://github.com/digitalis-io/helm-charts/raw/5ff94fe16f376b385973f2771d9300ca3c27ba05/charts/vals-operator-0.3.0.tgz +url: https://digitalis-io.github.io/helm-charts/charts/vals-operator-0.4.0.tgz packageVersion: 01 From 0839345f9a72cd7c432ca07fc6127e25eaff3481 Mon Sep 17 00:00:00 2001 From: Samuel Attwood Date: Fri, 18 Feb 2022 13:18:58 -0500 Subject: [PATCH 07/13] make charts --- assets/vals-operator/vals-operator-0.4.1.tgz | Bin 0 -> 5890 bytes .../vals-operator/0.4.1/.gitignore | 49 +++++++ .../vals-operator/0.4.1/Chart.yaml | 16 +++ .../vals-operator/0.4.1/README.md | 33 +++++ .../vals-operator/0.4.1/app-readme.md | 9 ++ .../vals-operator/0.4.1/crds/valssecrets.yaml | 130 ++++++++++++++++++ .../vals-operator/0.4.1/questions.yaml | 26 ++++ .../vals-operator/0.4.1/templates/NOTES.txt | 0 .../0.4.1/templates/_helpers.tpl | 62 +++++++++ .../vals-operator/0.4.1/templates/crds.yaml | 6 + .../0.4.1/templates/deployment.yaml | 73 ++++++++++ .../0.4.1/templates/serviceaccount.yaml | 64 +++++++++ .../0.4.1/templates/servicemonitor.yaml | 37 +++++ .../vals-operator/0.4.1/values.yaml | 106 ++++++++++++++ index.yaml | 20 +++ 15 files changed, 631 insertions(+) create mode 100644 assets/vals-operator/vals-operator-0.4.1.tgz create mode 100644 charts/vals-operator/vals-operator/0.4.1/.gitignore create mode 100644 charts/vals-operator/vals-operator/0.4.1/Chart.yaml create mode 100644 charts/vals-operator/vals-operator/0.4.1/README.md create mode 100644 charts/vals-operator/vals-operator/0.4.1/app-readme.md create mode 100644 charts/vals-operator/vals-operator/0.4.1/crds/valssecrets.yaml create mode 100644 charts/vals-operator/vals-operator/0.4.1/questions.yaml create mode 100644 charts/vals-operator/vals-operator/0.4.1/templates/NOTES.txt create mode 100644 charts/vals-operator/vals-operator/0.4.1/templates/_helpers.tpl create mode 100644 charts/vals-operator/vals-operator/0.4.1/templates/crds.yaml create mode 100644 charts/vals-operator/vals-operator/0.4.1/templates/deployment.yaml create mode 100644 charts/vals-operator/vals-operator/0.4.1/templates/serviceaccount.yaml create mode 100644 charts/vals-operator/vals-operator/0.4.1/templates/servicemonitor.yaml create mode 100644 charts/vals-operator/vals-operator/0.4.1/values.yaml diff --git a/assets/vals-operator/vals-operator-0.4.1.tgz b/assets/vals-operator/vals-operator-0.4.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..12e1010b4b54eb01ef9bf217664c1a5b079830b0 GIT binary patch literal 5890 zcmV+d7yalTiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH<$bK5qy{d)N;&M4h+lMN}!cAQ+zbmwLr@7CKqd}BA6-b^Qj z$Ri0g2(SPsM~(B}?*Jh95@{*5>uhI@54J=C=LO(A@Hko$rb3aSBwEPf;etpVtw_qg zy1_CUjYbFi`}Y56G;04Jj~a=q8EiE4k7{el@x_ zt~PUjl0rBanrJGxngjqb(S(V46ceo(MpO)Al$y>cCKJer)Es4JwS=mSkyXe^iW7Ka zM%AlgRCU&u$dD*3>NiGVKr;FUrJ{mQV7VKREUUhbqDRpvNKnO+X4*bGTTlfHWGTdE z_yXlh6UG$i1;R5rr-4Bn(yW!tij_>_5YHv z9Mw%5fDQWpV1IPb(*OIrd%Jh~|2B$5JH-!0&h-SwK|oV7M`H>kW*@IjL~Tr7k_NN`4Srh&TG zPK8%0KMKH9p1@#G&~}nDcIwpi1m68*)t5(MtIcx8)*c25<&wtuG>$E+ma@|fV`JMC zTr5y8Py&IX9uQX%)I#Jefhj^Pk!VbeIkTT8pk-bb&Crh zKiT$Y3$&UU#H_280hTL_Y~AA-aG^n=2Evnq7AroP<3k`05LGZoZaN4P$Q9*t3)ssB zG^QCN8cUO4WaKR3p-fc4TX4_tLJhHm0g>}OMXoK6m9>_0*Aa%e5I@8$Pn=y>8HrJW z(vtGIeGTwmhaydllr!RJV^nFwcCCRHK<8Y@g5hw1gy}_?NioH6N_i5JB$24vpp6v$ z8^f$^0LYU}P_DtWG{7VH#t*u|(E+}JwT;&oCo6>f+FtiA6hK#05>;mJ+Nl{B0tIv_6YqD98BQHp4L=;noVTO{5 z1d?1jFQUNkowxuo)5zVNuV}TrNeq9@F~_D)0=P6|uZHxIs^ODK7%vE)qw+{z+o#I3 zlPl0PMLWHX=70rJFh%5$XNehx5&S4}hzW-!#S24K3Kvu_Tz_i@&f}b71jiauvI4@C zFj^s@%JgLdb0(&Qv6Y>b$lb#XrAN7>#By8Bf7od-7&V+o$I}D`W+D!DDz44ldwnc!BY`_hIeua{M-=n2x+0FNls8*PFTvqFdc&yVOE1M&NHG3lm%-^KhS;FCsIl z1zw1xvm56!Ly&9~f>5fMDGx&(;5t449aMeQ74|H)eW@8SEImJ0K+N z6nyQ}U@tGTrc{OLmSu`>gODxb=O%)J{5R**jM%$LcZ1b~|5l-Ws`%w@C$d^!0*hue8 z1er)qG0r8`t3wkYf6%1|G*kbSBF`qUI~t9w5IU>HBqLMGsHUhq8;KN|{~W^8=g;kD zkNmX|`X7{`S}Bcb&1aK2f2v*zZYs1N|DaG#U~EY#4CaZDdIHAEc~|m<;Kn_6sNXKo z4h`$~$=uFNZk#qCaN4NvCNMKLzp$)EkcGjRz>F~EC^>XP+T3$SFc< zlq@FBa1l_>yN!(^OpSY+8$=FVDbQ0xa^Zbi9o_}L@_`e~m{_54Z*B9k*~F{Y66!)+ zz|43@$TeHli16T4EJ@~AHQGlm37f#)NL6&v6h*G>B&=0-AJ5!N(@QD{Br1{17#(8) z7YUwPw@OI&L<@$}d)@DT3IZ~lQBL*BzW<*q1BK_}@nbYWVWi+0kjFKj<64u_6Aq zpPTXjqy0w*ck%yil)nCdzd)9uRFTfEy%e;e|M&I}c3b^_uy=6R|F=>;ejGjsjy`0@ z@zSkT2)h;&1rLUwJ_R2?hDO6vjxcCOEvCf)!m5t-kR!L1WLyh~-neDO`l1Y68Z*gb zIM}lvX?mK^X7mFL!T}Vpj92yPQxI4h58XNkDKX4)6KwsO6K1z3fMl7aJ__EVgKg_; zgTZiO0=}3S8>&Q#wWWN=^Nb=(oRi6DN;MWsI8?zm)0NvxczJ5-8t2d0*85R#JcD8* z!vQgoETLQ#%UEtDM!OJbipGE^Kok&5d_nC7@%j~;O2l+lO0>Py1ftwEbEP5?9N5SsVM-)~!0W%}`y|?sKsy3YQ=;W5!z>{B;0>_k+)$P9jYOFTBA` zbh=uO&;_zIQj4LbeF7gpn&6M+Ngs2OL*G|R6<*r&0!36CK?T!K%f(>xj$~PMUe4W! zieb4k_3>jzlS}wD7aChaScl?P$&zroa)A73DqP`CA$k{cOEI>24Lb>O0am}noHB^I zxv+bmJjNyu^(ogivfGNkd--`Yd-WCemwh^`SuHBouUqEhn;zyXZ5Z9}gUIXL*7QZW zR#|))Yz1IVCwys(eY2&{|BGdEWBY)c{QuqYn*V>W|M<@TyN%MCoYf9cIdvuBbA+!m zq8AV0>*>mPOp|XRvWqIwKbe@qz>JpR13O*{a|2)d3aDLU^E?d0|A<-j=P7;j-!1gm z#msAKferJ2fB(^bYyLkTkMH*XZ>7}FT9GVM!{s-R&gGI(U_agaUkxe-GO(vU;_diKKo6z=ANPb-J!Lq5p+l1`S=D@&UuXk2doz;O)pC)UsHJQU@JBDwBRf9H`liG=%HF(u= zpC)}RXM)JNYF4mkMK;O9HtQF{+Cb{SyteVztc`s64=K_nC9PeyW+~3d)Fjb`V$deD zh(0!8?j93v6Y%)&9#S;APDXC~8d9cOsh=8+(+SFgL(_CV!cbZ46_`e+y8&Ozjx>V-$vPL zan3DNR)KU!0f*TjF=;7~AMQ z4ErT>XxHJ@j|g_bHuCxd`3u8qjDx>Zo=`r&jo}~|7H*su;1$FM0PAD6n>3YAf5BK6 zB6Y1!8&k&8yeUS$GMK3yscEm_y8}AKI$GnFloxl4n9m8^uORBW{n6nRv=j|#N6%}K zbB}LOgMYWE_&dr5`=7cKV1KOr-`#z@X8#}Go&UI%(ry2XgMX1f#ttn9pcodE&!bpK z6v`c+ZE2wbO1BpBL+vxx53i=RI44DF)+DvKon+aS^zx?muC~Sd8r1j2 zUFd8TeD{v6sjxsvwO#n>*_UpGMOTZ7*X_ufwb`mo4Vzy`^mjuFWgUVha*o;s#)Bdp z9(2cSEoA;IXl>cCJRRCk;(Du1p<)X_<5X#FVX=AGw4Z>U_G?sR?x1va^4BjfeD(r@ zX6n#h1gL^r%I#tzo}Sa}4N5v&l_#6aK0IAtIk&!rhZ6R?9e?n$`<>;!{nE7m(VS{` z_3kEdY_R|1@uRl=-yiKgy0ibcQS9AQh4$Q|Jr{`|NSZOU*Lq@mNQ|wD>zE15C_|&c zG)k_b;GVrTR~+YeSA*zp=nhTV%V9+M90F5@poP0JlSyHsDAWEq63HPI5`j#O;;csC zBye|B{5_qEi;IY8MwFt2TYqCt^-$-=4NC7d9*2QNipSk!bzVr{ul z_Kv{&R3te=H4GA}^w3m26lq3{&+{*`BoG^XMS};C{nn^-veFB|VY<>7iv)uQQMP&) zi{vL$>$#YhO_`XRM<*mcCvz+x|3ajB{DLfz2hL{JX!#fFze{_D^7z?N5YYsQef|Td z8Z;_xFKJ7yg0rL3vj@?Kl$qE6EHC~}SMY+wuTF#L+3EXJT}Y$x{w?K+xKP2_B2T9( zN~Wg9(<Pv&#`tgCUjNeVx6wA<8n_#17m z%*@dS;?4o|Q&vCp(aC4+c;F>Wg<$5r2{6hU+eo3Y(b>}porg3!Yt}QEz@-^broy=P zzS?l^1W?~=@8Eg;ct#JD-UZF=S`>RNJ#y_jqTD0io;92bu)Sj1SG`;p>7=>7m)j|= z)+T#zjn0=}S{eQamZtqDSr$qpNs2e=18j=_4i5I(@!$UFF8|-HlzXjQeZhAq?X9_Y zrGfwHo31Q(_u&G8i6z22Glr^4{rME`KLo0G6cErx6J6K~agY$oR>o~ON6bopL1`Os zy5%gkir_7}lq;smyP{TDq_*iqu5;v?4qH|4gK#(KYxn=Ev(APlS22QPt}>h0$6f4F z2>L?6h2ROwS3aL9l=|hoJcQl1qq$aP`V^v zL<7|}ht)5+(w0*%Tt3s<=#22evVug z$_uiBRgp-jF`TPhT$J0KEY*@a<<3)fSsO}r8x^dqeO9R(o&Qb!Z|`x`&Jf(p4WO&_ z|9JdpckTZ7?)dKh&#jcTqXZk?Mthf zso(>jxQ_~cMpIOpq}c>`Zb4S3J^avbBu@4FTyuApq$8+ zezSMlcnr`;s9O=~vogBsHW7=>I8~7fPfw0ZD48^x#OLPmr8c@62WT5OcMMs@addaR zRQcpq=9zH?91Zr0Dpe-zbCI&oepWAH280(8Ok}sCQI>gUV!@XvHAsxboYUV**vcnu zv_~Lx3#Q#+2n%pZx+noljE_rX6yUiCnG_U9ZGeWH~ z_hF^W1y%~$=a77Tbb3}s)wVO*(sz+#*-%woU<5|FaRqEATK~tuEOQGwOGlBn#jo;d zO0}~27Ex)JfAG+%8k<_d=kYt{a7a>QheY8Y-T_9^Dl~%Kt_#{1iRzV|)av(0YL|36 z`m36yry0hLNt2+mo~zPC+e_UoedJ5`e6^U?h%}|j1$xrk@@lxQ_>_3vZ(DpG`TTiC zOr;8P>Zi!TX}r#Z{mN~Qdf)=Q${hVY6?(d@DzfWxNC(lcaaK&9G7qfNr@Zb}8mF>Y z37rgw8PSVg!nep)_q$l~=}Wt~3JrlB&wIw}#+PPLnXc*>6;xyTyM|IC=@R8uee$e6 zLkdzXxxaTXEQD%b3h6=pyvUSkTND`_xzUkYJhTDQj!O`uCAA?!?=Vm{pIVHNb4{6z zBvc;9=(6#3m0rni$)vABq$I1(8cfV7KddqbZY7ageVbS}j(xUS-!(l@HVp`99HH&4 z(jv#}g<9BE=E5)&_hESkC(fl@w@y9yuc)7ufpin1Rl6sZsW`2r^9mU*$wP}-V7FQ|X#Zn#R3 zgH`6W!PHx)>dGsXL67c-?S||hBd3irvWM)pG87)8X#qS>r}jrr`jHmNb35x8s=pIz zJIp^QbZMGjtWN*;xlQVA?SnNO_V-sx2k5%e*kVk7k$$?`A#vuu^Od!&oo}ul_*-~0 zUAB*Rs=F(g)0(Z7d*M#i}>FP;7UAvQ9@Bkk_1r zAb<+PcBHK(uDF`E!C9xpN4_j!wj(&75x=ox7Y!=v-L(ZM(YJW?e4M&W`)cqf)2Vmm YuH2Qo@@FXj9{>RV|6@1x{Q!0V0FwiFJ^%m! literal 0 HcmV?d00001 diff --git a/charts/vals-operator/vals-operator/0.4.1/.gitignore b/charts/vals-operator/vals-operator/0.4.1/.gitignore new file mode 100644 index 000000000..d9f7c8d36 --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/.gitignore @@ -0,0 +1,49 @@ +# These are some examples of commonly ignored file patterns. +# You should customize this list as applicable to your project. +# Learn more about .gitignore: +# https://www.atlassian.com/git/tutorials/saving-changes/gitignore + +# Node artifact files +node_modules/ +dist/ + +# Compiled Java class files +*.class + +# Compiled Python bytecode +*.py[cod] + +# Log files +*.log + +# Package files +*.jar + +# Maven +target/ +dist/ + +# JetBrains IDE +.idea/ + +# Unit test reports +TEST*.xml + +# Generated by MacOS +.DS_Store + +# Generated by Windows +Thumbs.db + +# Applications +*.app +*.exe +*.war + +# Large media files +*.mp4 +*.tiff +*.avi +*.flv +*.mov +*.wmv diff --git a/charts/vals-operator/vals-operator/0.4.1/Chart.yaml b/charts/vals-operator/vals-operator/0.4.1/Chart.yaml new file mode 100644 index 000000000..cea4ed30e --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/Chart.yaml @@ -0,0 +1,16 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Vals-Operator + catalog.cattle.io/release-name: vals-operator +apiVersion: v2 +appVersion: v0.5.0 +description: This helm chart installs the Digitalis Vals Operator to manage sync secrets + from supported backends into Kubernetes +icon: https://digitalis.io/wp-content/uploads/2020/06/cropped-Digitalis-512x512-Blue_Digitalis-512x512-Blue-32x32.png +kubeVersion: '>= 1.19' +maintainers: +- email: info@digitalis.io + name: Digitalis.IO +name: vals-operator +type: application +version: 0.4.1 diff --git a/charts/vals-operator/vals-operator/0.4.1/README.md b/charts/vals-operator/vals-operator/0.4.1/README.md new file mode 100644 index 000000000..3efb45778 --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/README.md @@ -0,0 +1,33 @@ +vals-operator +============= +This helm chart installs the Digitalis Vals Operator to manage sync secrets from supported backends into Kubernetes + + +## Chart Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| args | list | `[]` | | +| env | list | `[]` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"digitalisdocker/vals-operator"` | | +| image.tag | string | `""` | | +| imagePullSecrets | list | `[]` | | +| manageCrds | bool | `true` | | +| nameOverride | string | `""` | | +| nodeSelector | object | `{}` | | +| podSecurityContext | object | `{}` | | +| replicaCount | int | `1` | | +| resources | object | `{}` | | +| secretEnv | list | `[]` | | +| securityContext | object | `{}` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| serviceMonitor.enabled | bool | `false` | | +| serviceMonitor.labels | object | `{}` | | +| tolerations | list | `[]` | | +| volumeMounts | list | `[]` | | +| volumes | list | `[]` | | diff --git a/charts/vals-operator/vals-operator/0.4.1/app-readme.md b/charts/vals-operator/vals-operator/0.4.1/app-readme.md new file mode 100644 index 000000000..3458f2059 --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/app-readme.md @@ -0,0 +1,9 @@ +# Vals-Operator + +Here at [Digitalis](https://digitalis.io) we love [vals](https://github.com/variantdev/vals), it's a tool we use daily to keep secrets stored securely. We also use [secrets-manager](https://github.com/tuenti/secrets-manager) on the Kubernetes deployment we manage. Inspired by these two wonderful tools we have created this operator. + +*vals-operator* syncs secrets from any secrets store supported by [vals](https://github.com/variantdev/vals) into Kubernetes. It works very similarly to [secrets-manager](https://github.com/tuenti/secrets-manager) and the code is actually based on it. Where they differ is that it not just supports HashiCorp Vault but many other secrets stores. + +## Mirroring secrets + +We have also added the ability to copy secrets between namespaces. It uses the format `ref+k8s://namespace/secret#key`. This way you can keep secrets generated in one namespace in sync with any other namespace in the cluster. diff --git a/charts/vals-operator/vals-operator/0.4.1/crds/valssecrets.yaml b/charts/vals-operator/vals-operator/0.4.1/crds/valssecrets.yaml new file mode 100644 index 000000000..406ba1e83 --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/crds/valssecrets.yaml @@ -0,0 +1,130 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + "helm.sh/hook": crd-install + "helm.sh/hook-delete-policy": "before-hook-creation" + creationTimestamp: null + name: valssecrets.digitalis.io +spec: + group: digitalis.io + names: + kind: ValsSecret + listKind: ValsSecretList + plural: valssecrets + singular: valssecret + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ValsSecret is the Schema for the valssecrets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ValsSecretSpec defines the desired state of ValsSecret + properties: + data: + additionalProperties: + properties: + encoding: + description: Encoding type for the secret. Only base64 supported. + Optional + type: string + ref: + description: Ref value to the secret in the format ref+backend://path + https://github.com/variantdev/vals + type: string + required: + - ref + type: object + type: object + databases: + items: + properties: + driver: + description: Defines the database type + type: string + hosts: + description: List of hosts to connect to, they'll be tried in + sequence until one succeeds + items: + type: string + type: array + loginCredentials: + description: Credentials to access the database + properties: + namespace: + description: Optional namespace of the secret, default current + namespace + type: string + passwordKey: + description: Key in the secret containing the database username + type: string + secretName: + description: Name of the secret containing the credentials + to be able to log in to the database + type: string + usernameKey: + description: Key in the secret containing the database username + type: string + required: + - passwordKey + - secretName + type: object + passwordKey: + description: Key in the secret containing the database username + type: string + port: + description: Database port number + type: integer + userHost: + description: Used for MySQL only, the host part for the username + type: string + usernameKey: + description: Key in the secret containing the database username + type: string + required: + - driver + - hosts + - passwordKey + type: object + type: array + name: + type: string + ttl: + format: int64 + type: integer + type: + type: string + required: + - data + type: object + status: + description: ValsSecretStatus defines the observed state of ValsSecret + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/vals-operator/vals-operator/0.4.1/questions.yaml b/charts/vals-operator/vals-operator/0.4.1/questions.yaml new file mode 100644 index 000000000..7976cff0c --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/questions.yaml @@ -0,0 +1,26 @@ +questions: +#image configurations +- variable: image.repository + default: "digitalisdocker/vals-operator" + description: image registry + type: string + label: Image Registry + group: "Container Images" +- variable: image.tag + default: "v0.3.0" + description: Image tag + type: string + label: Image Tag + group: "Container Images" +- variable: imagePullSecrets + default: "" + description: secret name to pull image + type: string + label: Image Pull Secrets + group: "Container Images" +- variable: environmentSecret + default: "" + description: "The secret containing env variables to access the backend secrets store." + label: Config Secret + type: string + group: "Settings" diff --git a/charts/vals-operator/vals-operator/0.4.1/templates/NOTES.txt b/charts/vals-operator/vals-operator/0.4.1/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/charts/vals-operator/vals-operator/0.4.1/templates/_helpers.tpl b/charts/vals-operator/vals-operator/0.4.1/templates/_helpers.tpl new file mode 100644 index 000000000..dcd31f04a --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "vals-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "vals-operator.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "vals-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "vals-operator.labels" -}} +helm.sh/chart: {{ include "vals-operator.chart" . }} +{{ include "vals-operator.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "vals-operator.selectorLabels" -}} +app.kubernetes.io/name: {{ include "vals-operator.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "vals-operator.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "vals-operator.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/charts/vals-operator/vals-operator/0.4.1/templates/crds.yaml b/charts/vals-operator/vals-operator/0.4.1/templates/crds.yaml new file mode 100644 index 000000000..0b71ef8c6 --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/templates/crds.yaml @@ -0,0 +1,6 @@ +{{- if .Values.manageCrds -}} +{{- range $path, $bytes := .Files.Glob "crds/*.yaml" }} +{{ $.Files.Get $path }} +--- +{{- end }} +{{- end }} diff --git a/charts/vals-operator/vals-operator/0.4.1/templates/deployment.yaml b/charts/vals-operator/vals-operator/0.4.1/templates/deployment.yaml new file mode 100644 index 000000000..334c566e7 --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/templates/deployment.yaml @@ -0,0 +1,73 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "vals-operator.fullname" . }} + labels: + {{- include "vals-operator.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "vals-operator.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "vals-operator.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "vals-operator.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.args }} + args: + {{- toYaml .Values.args | nindent 12 }} + {{- end }} + {{- if .Values.environmentSecret }} + envFrom: + - secretRef: + name: "{{ .Values.environmentSecret }}" + {{- else }} + envFrom: + {{- toYaml .Values.secretEnv | nindent 12 }} + {{- end }} + {{- if .Values.env }} + env: + {{- toYaml .Values.env | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- if .Values.volumeMounts }} + volumeMounts: + {{- toYaml .Values.volumeMounts | nindent 12 }} + {{- end }} + ports: + - containerPort: {{ .Values.metricsPort | default 8080 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.volumes }} + volumes: + {{- toYaml .Values.volumes | nindent 8 }} + {{- end }} diff --git a/charts/vals-operator/vals-operator/0.4.1/templates/serviceaccount.yaml b/charts/vals-operator/vals-operator/0.4.1/templates/serviceaccount.yaml new file mode 100644 index 000000000..67f271063 --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/templates/serviceaccount.yaml @@ -0,0 +1,64 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: vals-operator + labels: + {{- include "vals-operator.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - "secrets" + verbs: + - "get" + - "list" + - "watch" + - "update" + - "delete" + - "create" +- apiGroups: + - "" + resources: + - "events" + verbs: + - "create" + - "patch" +- apiGroups: + - "digitalis.io" + resources: + - "valssecrets" + verbs: + - "get" + - "list" + - "watch" + - "update" + - "delete" + - "create" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: vals-operator + labels: + {{- include "vals-operator.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vals-operator +subjects: + - kind: ServiceAccount + name: {{ include "vals-operator.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "vals-operator.serviceAccountName" . }} + labels: + {{- include "vals-operator.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/vals-operator/vals-operator/0.4.1/templates/servicemonitor.yaml b/charts/vals-operator/vals-operator/0.4.1/templates/servicemonitor.yaml new file mode 100644 index 000000000..4b6e11eeb --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/templates/servicemonitor.yaml @@ -0,0 +1,37 @@ +{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "vals-operator.fullname" . }} + labels: + {{- if .Values.serviceMonitor.labels }} + {{ toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- else }} + app: {{ template "vals-operator.name" . }} + chart: {{ template "vals-operator.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- end }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} +spec: + endpoints: + - targetPort: "metrics" + {{- if .Values.serviceMonitor.interval }} + interval: {{ .Values.serviceMonitor.interval }} + {{- end }} + {{- if .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} + {{- end }} + path: /metrics + port: {{ .Values.metricsPort | default 8080 }} + tlsConfig: + insecureSkipVerify: true + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "vals-operator.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/vals-operator/vals-operator/0.4.1/values.yaml b/charts/vals-operator/vals-operator/0.4.1/values.yaml new file mode 100644 index 000000000..5bc2d97ad --- /dev/null +++ b/charts/vals-operator/vals-operator/0.4.1/values.yaml @@ -0,0 +1,106 @@ +replicaCount: 1 + +image: + repository: digitalisdocker/vals-operator + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +manageCrds: true + +# additional arguments to operator +args: [] + # -exclude-namespaces string + # Comma separated list of namespaces to ignore. + # -health-probe-bind-address string + # The address the probe endpoint binds to. (default ":8081") + # -kubeconfig string + # Paths to a kubeconfig. Only required if out-of-cluster. + # -leader-elect + # Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. + # -metrics-bind-address string + # The address the metric endpoint binds to. (default ":8080") + # -reconcile-period duration + # How often the controller will re-queue vals-operator events. (default 5s) + # -record-changes + # Records every time a secret has been updated. You can view them with kubectl describe. It may also be disabled globally and enabled per secret via the annotation 'vals-operator.digitalis.io/record: "true"' (default true) + # -ttl duration + # How often to check backend for updates. (default 5m0s) + # -watch-namespaces string + # Comma separated list of namespaces that vals-operator will watch. + # -zap-devel + # Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default true) + # -zap-encoder value + # Zap log encoding (one of 'json' or 'console') + # -zap-log-level value + # Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity + # -zap-stacktrace-level value + # Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic'). + + +environmentSecret: "" + +# See https://github.com/variantdev/vals +# for information on setting up your backend environment. +env: [] + # - name: VAULT_SKIP_VERIFY + # value: "true" + +secretEnv: [] + # - secretRef: + # name: aws-creds + +volumes: [] + # - name: creds + # secret: + # secretName: gcs-credentials +volumeMounts: [] + # - name: creds + # mountPath: /secret + # readOnly: true + + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +metricsPort: 8080 +serviceMonitor: + # When set to true then use a ServiceMonitor to collect metrics + enabled: false + # Custom labels to use in the ServiceMonitor to be matched with a specific Prometheus + labels: {} + # Set the namespace the ServiceMonitor should be deployed to + # namespace: default + # Set how frequently Prometheus should scrape + # interval: 30s + # Set timeout for scrape + # scrapeTimeout: 10s + +resources: {} + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/index.yaml b/index.yaml index e349ac8a9..8409e4729 100755 --- a/index.yaml +++ b/index.yaml @@ -3551,6 +3551,26 @@ entries: - assets/universal-crossplane/universal-crossplane-1.2.200100.tgz version: 1.2.200100 vals-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Vals-Operator + catalog.cattle.io/release-name: vals-operator + apiVersion: v2 + appVersion: v0.5.0 + created: "2022-02-18T13:18:49.589482-05:00" + description: This helm chart installs the Digitalis Vals Operator to manage sync + secrets from supported backends into Kubernetes + digest: 48919f4c9e4bf65c84d300466758533ef63ef00023403ce4fcd5189606af7d6a + icon: https://digitalis.io/wp-content/uploads/2020/06/cropped-Digitalis-512x512-Blue_Digitalis-512x512-Blue-32x32.png + kubeVersion: '>= 1.19' + maintainers: + - email: info@digitalis.io + name: Digitalis.IO + name: vals-operator + type: application + urls: + - assets/vals-operator/vals-operator-0.4.1.tgz + version: 0.4.1 - apiVersion: v2 appVersion: v0.4.0 created: "2022-01-07T09:27:48.235665Z" From 31ddc4889f22a0c7414f3b2937a319cc7aa5dc6e Mon Sep 17 00:00:00 2001 From: Samuel Attwood <45669855+samuelattwood@users.noreply.github.com> Date: Fri, 18 Feb 2022 16:38:09 -0500 Subject: [PATCH 08/13] Update pull-request.yaml Adding rebase to pull-request actions --- .github/workflows/pull-request.yaml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml index 2ffc47f66..4d9304ba3 100644 --- a/.github/workflows/pull-request.yaml +++ b/.github/workflows/pull-request.yaml @@ -11,15 +11,20 @@ jobs: steps: - uses: actions/checkout@v2 + with: + fetch-depth: 0 - name: Checkout into branch run: git checkout -b staging-pr-workflow + + - name: Fetch main-source + run: git fetch origin main-source + + - name: Rebase to main-source + run: git rebase origin/main-source - name: Pull scripts run: sudo make pull-scripts - - - name: Pull in all relevant branches - run: git fetch origin main - name: Validate - run: sudo make validate \ No newline at end of file + run: sudo make validate From 6a6dae076debb4b99142fbcda01cdce50504108e Mon Sep 17 00:00:00 2001 From: Samuel Attwood Date: Fri, 18 Feb 2022 18:43:26 -0500 Subject: [PATCH 09/13] Configuring git user is workflow to permit rebase --- .github/workflows/pull-request.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml index 4d9304ba3..3512fb6d5 100644 --- a/.github/workflows/pull-request.yaml +++ b/.github/workflows/pull-request.yaml @@ -19,6 +19,11 @@ jobs: - name: Fetch main-source run: git fetch origin main-source + + - name: Set git user for rebase + run: | + git config user.name "$(git log -n 1 --pretty=format:%an)" + git config user.email "$(git log -n 1 --pretty=format:%ae)" - name: Rebase to main-source run: git rebase origin/main-source From 71bea459f7e84d3e11cbfe75402f272e55246bb6 Mon Sep 17 00:00:00 2001 From: selvamt94 Date: Wed, 23 Feb 2022 16:15:44 -0800 Subject: [PATCH 10/13] updating NeuVector chart version to 1.9.1 --- .../generated-changes/overlay/questions.yml | 21 ++++- .../generated-changes/patch/Chart.yaml.patch | 19 +++-- .../generated-changes/patch/README.md.patch | 76 +++++++++++++++++++ packages/neuvector/package.yaml | 2 +- 4 files changed, 103 insertions(+), 15 deletions(-) create mode 100644 packages/neuvector/generated-changes/patch/README.md.patch diff --git a/packages/neuvector/generated-changes/overlay/questions.yml b/packages/neuvector/generated-changes/overlay/questions.yml index c0f700961..e499491de 100644 --- a/packages/neuvector/generated-changes/overlay/questions.yml +++ b/packages/neuvector/generated-changes/overlay/questions.yml @@ -13,7 +13,7 @@ questions: label: OEM name group: "Container Images" - variable: tag - default: "4.4.3" + default: "4.4.4" description: image tag for controller enforcer manager type: string label: Image Tag @@ -83,15 +83,28 @@ questions: label: Runtime Path - variable: crio.enabled default: "false" - description: Crio runtime. Enable only one runtime. + description: CRI-O runtime. Enable only one runtime. type: boolean - label: Crio Runtime + label: CRI-O Runtime show_subquestion_if: true group: "Container Runtime" subquestions: - variable: crio.path default: "/var/run/crio/crio.sock" - description: "Crio Runtime Path" + description: "CRI-O Runtime Path" + type: string + label: Runtime Path +- variable: k3s.enabled + default: "false" + description: k3s containerd runtime. Enable only one runtime. + type: boolean + label: k3s Containerd Runtime + show_subquestion_if: true + group: "Container Runtime" + subquestions: + - variable: k3s.runtimePath + default: " /run/k3s/containerd/containerd.sock" + description: "k3s Containerd Runtime Path" type: string label: Runtime Path #storage configurations diff --git a/packages/neuvector/generated-changes/patch/Chart.yaml.patch b/packages/neuvector/generated-changes/patch/Chart.yaml.patch index 1a54aa6b2..cdef3fa13 100644 --- a/packages/neuvector/generated-changes/patch/Chart.yaml.patch +++ b/packages/neuvector/generated-changes/patch/Chart.yaml.patch @@ -1,23 +1,22 @@ --- charts-original/Chart.yaml +++ charts/Chart.yaml @@ -1,11 +1,17 @@ -+annotations: -+ catalog.cattle.io/certified: partner -+ catalog.cattle.io/display-name: NeuVector -+ catalog.cattle.io/release-name: neuvector apiVersion: v1 - appVersion: 4.4.3 + appVersion: 4.4.4 description: Helm chart for NeuVector's core services -engine: gotpl home: https://neuvector.com icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4 -+keywords: -+- security -+kubeVersion: '>=1.13.0-0' maintainers: - email: support@neuvector.com name: becitsthere -name: core --version: 1.8.9 +name: neuvector -+version: 1.8.900 + version: 1.9.1 ++annotations: ++ catalog.cattle.io/release-name: neuvector ++ catalog.cattle.io/certified: partner ++ catalog.cattle.io/display-name: NeuVector ++keywords: ++- security ++kubeVersion: '>=1.13.0-0' diff --git a/packages/neuvector/generated-changes/patch/README.md.patch b/packages/neuvector/generated-changes/patch/README.md.patch new file mode 100644 index 000000000..fb2b2521d --- /dev/null +++ b/packages/neuvector/generated-changes/patch/README.md.patch @@ -0,0 +1,76 @@ +--- charts-original/README.md ++++ charts/README.md +@@ -71,7 +71,7 @@ + `controller.schedulerName` | kubernetes scheduler name | `nil` | + `controller.affinity` | controller affinity rules | ... | spread controllers to different nodes | + `controller.tolerations` | List of node taints to tolerate | `nil` | +-`controller.resources` | Add resources requests and limits to controller deployment | `{}` | see examples in [values.yaml](values.yaml) ++`controller.resources` | Add resources requests and limits to controller deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) + `controller.nodeSelector` | Enable and specify nodeSelector labels | `{}` | + `controller.disruptionbudget` | controller PodDisruptionBudget. 0 to disable. Recommended value: 2. | `0` | + `controller.priorityClassName` | controller priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` | +@@ -99,7 +99,7 @@ + `controller.federation.mastersvc.ingress.host` | Must set this host value if ingress is enabled | `nil` | + `controller.federation.mastersvc.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually) + `controller.federation.mastersvc.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. +-`controller.federation.mastersvc.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](values.yaml) ++`controller.federation.mastersvc.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) + `controller.federation.managedsvc.type` | Multi-cluster managed cluster service type. If specified, the deployment will be managed by the managed clsuter. Possible values include NodePort, LoadBalancer and ClusterIP. | `nil` | + `controller.federation.managedsvc.route.enabled` | If true, create a OpenShift route to expose the Multi-cluster managed cluster service | `false` | + `controller.federation.managedsvc.route.host` | Set OpenShift route host for manageed service | `nil` | +@@ -109,13 +109,13 @@ + `controller.federation.managedsvc.ingress.host` | Must set this host value if ingress is enabled | `nil` | + `controller.federation.managedsvc.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually) + `controller.federation.managedsvc.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. +-`controller.federation.managedsvc.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](values.yaml) ++`controller.federation.managedsvc.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) + `controller.ingress.enabled` | If true, create ingress for rest api, must also set ingress host value | `false` | enable this if ingress controller is installed + `controller.ingress.tls` | If true, TLS is enabled for controller rest api ingress service |`false` | If set, the tls-host used is the one set with `controller.ingress.host`. + `controller.ingress.host` | Must set this host value if ingress is enabled | `nil` | + `controller.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually) + `controller.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. +-`controller.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](values.yaml) ++`controller.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) + `controller.configmap.enabled` | If true, configure NeuVector global settings using a ConfigMap | `false` + `controller.configmap.data` | NeuVector configuration in YAML format | `{}` + `controller.secret.enabled` | If true, configure NeuVector global settings using secrets | `false` +@@ -125,7 +125,7 @@ + `enforcer.image.hash` | enforcer image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | | + `enforcer.priorityClassName` | enforcer priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` | + `enforcer.tolerations` | List of node taints to tolerate | `- effect: NoSchedule`
    `key: node-role.kubernetes.io/master` | other taints can be added after the default +-`enforcer.resources` | Add resources requests and limits to enforcer deployment | `{}` | see examples in [values.yaml](values.yaml) ++`enforcer.resources` | Add resources requests and limits to enforcer deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) + `manager.enabled` | If true, create manager | `true` | + `manager.image.repository` | manager image repository | `neuvector/manager` | + `manager.image.hash` | manager image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | | +@@ -133,7 +133,7 @@ + `manager.env.ssl` | If false, manager will listen on HTTP access instead of HTTPS | `true` | + `manager.svc.type` | set manager service type for native Kubernetes | `NodePort`;
    if it is OpenShift platform or ingress is enabled, then default is `ClusterIP` | set to LoadBalancer if using cloud providers, such as Azure, Amazon, Google + `manager.svc.loadBalancerIP` | if manager service type is LoadBalancer, this is used to specify the load balancer's IP | `nil` | +-`manager.svc.annotations` | Add annotations to manager service | `{}` | see examples in [values.yaml](values.yaml) ++`manager.svc.annotations` | Add annotations to manager service | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) + `manager.route.enabled` | If true, create a OpenShift route to expose the management consol service | `true` | + `manager.route.host` | Set OpenShift route host for management consol service | `nil` | + `manager.route.termination` | Specify TLS termination for OpenShift route for management consol service. Possible passthrough, edge, reencrypt | `passthrough` | +@@ -143,10 +143,10 @@ + `manager.ingress.enabled` | If true, create ingress, must also set ingress host value | `false` | enable this if ingress controller is installed + `manager.ingress.host` | Must set this host value if ingress is enabled | `nil` | + `manager.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. Currently only supports `/` +-`manager.ingress.annotations` | Add annotations to ingress to influence behavior | `{}` | see examples in [values.yaml](values.yaml) ++`manager.ingress.annotations` | Add annotations to ingress to influence behavior | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) + `manager.ingress.tls` | If true, TLS is enabled for manager ingress service |`false` | If set, the tls-host used is the one set with `manager.ingress.host`. + `manager.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually) +-`manager.resources` | Add resources requests and limits to manager deployment | `{}` | see examples in [values.yaml](values.yaml) ++`manager.resources` | Add resources requests and limits to manager deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) + `manager.affinity` | manager affinity rules | `{}` | + `manager.tolerations` | List of node taints to tolerate | `nil` | + `manager.nodeSelector` | Enable and specify nodeSelector labels | `{}` | +@@ -164,7 +164,7 @@ + `cve.scanner.priorityClassName` | cve scanner priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` | + `cve.scanner.replicas` | external scanner replicas | `3` | + `cve.scanner.dockerPath` | the remote docker socket if CI/CD integration need scan images before they are pushed to the registry | `nil` | +-`cve.scanner.resources` | Add resources requests and limits to scanner deployment | `{}` | see examples in [values.yaml](values.yaml) | ++`cve.scanner.resources` | Add resources requests and limits to scanner deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) | + `cve.scanner.affinity` | scanner affinity rules | `{}` | + `cve.scanner.tolerations` | List of node taints to tolerate | `nil` | + `cve.scanner.nodeSelector` | Enable and specify nodeSelector labels | `{}` | diff --git a/packages/neuvector/package.yaml b/packages/neuvector/package.yaml index 5620045cb..892914322 100644 --- a/packages/neuvector/package.yaml +++ b/packages/neuvector/package.yaml @@ -1,2 +1,2 @@ -url: https://neuvector.github.io/neuvector-helm/core-1.8.9.tgz +url: https://neuvector.github.io/neuvector-helm/core-1.9.1.tgz packageVersion: 00 From 97c2ce4ea5be1ca8a43dad80d1052a0a3a0f91a6 Mon Sep 17 00:00:00 2001 From: selvamt94 Date: Wed, 23 Feb 2022 16:15:50 -0800 Subject: [PATCH 11/13] updating NeuVector chart version to 1.9.1 --- assets/neuvector/neuvector-1.9.100.tgz | Bin 0 -> 15913 bytes .../neuvector/neuvector/1.9.100/.helmignore | 21 + charts/neuvector/neuvector/1.9.100/Chart.yaml | 17 + charts/neuvector/neuvector/1.9.100/README.md | 198 ++++ .../neuvector/neuvector/1.9.100/app-readme.md | 14 + .../neuvector/neuvector/1.9.100/questions.yml | 213 ++++ .../neuvector/1.9.100/templates/NOTES.txt | 20 + .../neuvector/1.9.100/templates/_helpers.tpl | 32 + .../templates/admission-webhook-service.yaml | 18 + .../1.9.100/templates/clusterrole.yaml | 119 +++ .../1.9.100/templates/clusterrolebinding.yaml | 145 +++ .../templates/controller-deployment.yaml | 199 ++++ .../1.9.100/templates/controller-ingress.yaml | 210 ++++ .../1.9.100/templates/controller-route.yaml | 82 ++ .../1.9.100/templates/controller-service.yaml | 89 ++ .../neuvector/1.9.100/templates/crd.yaml | 926 ++++++++++++++++++ .../1.9.100/templates/enforcer-daemonset.yaml | 123 +++ .../1.9.100/templates/init-configmap.yaml | 13 + .../1.9.100/templates/init-secret.yaml | 15 + .../1.9.100/templates/manager-deployment.yaml | 93 ++ .../1.9.100/templates/manager-ingress.yaml | 68 ++ .../1.9.100/templates/manager-route.yaml | 28 + .../1.9.100/templates/manager-service.yaml | 26 + .../neuvector/1.9.100/templates/psp.yaml | 77 ++ .../neuvector/1.9.100/templates/pvc.yaml | 25 + .../1.9.100/templates/rolebinding.yaml | 31 + .../1.9.100/templates/scanner-deployment.yaml | 74 ++ .../1.9.100/templates/serviceaccount.yaml | 13 + .../1.9.100/templates/updater-cronjob.yaml | 73 ++ .../neuvector/neuvector/1.9.100/values.yaml | 292 ++++++ index.yaml | 21 + 31 files changed, 3275 insertions(+) create mode 100644 assets/neuvector/neuvector-1.9.100.tgz create mode 100644 charts/neuvector/neuvector/1.9.100/.helmignore create mode 100644 charts/neuvector/neuvector/1.9.100/Chart.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/README.md create mode 100644 charts/neuvector/neuvector/1.9.100/app-readme.md create mode 100644 charts/neuvector/neuvector/1.9.100/questions.yml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/NOTES.txt create mode 100644 charts/neuvector/neuvector/1.9.100/templates/_helpers.tpl create mode 100644 charts/neuvector/neuvector/1.9.100/templates/admission-webhook-service.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/clusterrole.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/clusterrolebinding.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/controller-deployment.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/controller-ingress.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/controller-route.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/controller-service.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/crd.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/enforcer-daemonset.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/init-configmap.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/init-secret.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/manager-deployment.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/manager-ingress.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/manager-route.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/manager-service.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/psp.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/pvc.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/rolebinding.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/scanner-deployment.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/serviceaccount.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/templates/updater-cronjob.yaml create mode 100644 charts/neuvector/neuvector/1.9.100/values.yaml diff --git a/assets/neuvector/neuvector-1.9.100.tgz b/assets/neuvector/neuvector-1.9.100.tgz new file mode 100644 index 0000000000000000000000000000000000000000..0ca444f978b39e7afb84456fdcab169d1b947416 GIT binary patch literal 15913 zcmV+^KGwk>iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMYcbK5wwIJ$r9Q{bn}+{`1CEZec?=$<)uJ$7c+yNSnTd$MP1 zYb%CGNWz#TxBw_eo8*4>U*Sc9B>2$7&SS)>%E%+&87A=?78j6Vc3^^Ms9u!>qZ1Pof_71t{|kE+B`L?GI*{ z&(lfD5cNsSQOrf*VLBZB*TK>8{_*qwkNvp6xA*@nemv~mq2)cH0UP%`z>uF(%$L2p zbb>P6pZ?E}qrvE4u;=YP?L`pB9O4*JSp!87hhxCfBq5alxriJ9ib50QW6t;-QPeBb zY++g%4gPB|+S}``IR3f(FJPD=_7Do7S^h_dNBc)*`9D0`+sXf9JWs$Gn!+^XK#jkY zrJN0WPr%I_Gk_U@;2$TiUV2kPBgi=lz!Zl_6gWeE2q^*!NHLs*hyhN(1c3xHh5~?N zP5_@Hz)_ThkRvwe^+e#?kbeR9I1E8dxab9r z8HZtrf*t@U7X1sxfYTK90AQR#j%G^%3IM)L&=_2iFvRifw@d7eb}<07xh%LVKNtkTLTT#fvfc=f8RY!08mnnCs3aBshsV zJ}JBuFwhiHih{G0;&^tg+Q9Mbd=``J^^3R2Po);u)jV*I@NCYFih1Xe*#5)zWx@^7NizL*whjD!eaGMp2}g*xx|M7dBjFY#66j!A&B4uW?d zB+w@U`_WioG|FG?>sR|?oD$3yzQvOAmp#wt(0)x&d@aNfT#=L`Ap=u9OBE)XdDEO} zGyYdXrn`t}gkz{CWg+u?PDwhO>(8Q>lDiB}4j|C?5o51NkW<+MR|p1wr01!G{SIs31^7$C_hka3xP{4(*_emNC0ziKV6Wo6WCMgGz z5Kj;z6o5WO_hMW;4(SZ>zQtP^;F4Ks9*xSNChO@2 z2~!^NZj4`I{aUiOuzxN9f)QdkLqRuAkVSW_@I|shVH_R6Rvo_w89T?gtukh-jaO-` zisM8PBPsQfJ#&RP!dxTvM9c(9ItGV(dr|f#LXk9<_K*K-{}tA+Df)McSf%2~S#ke| zJy}t4Ln8iV)QH!v6svOmHACY=fr9Qbtw?Q#r8VW)CS z)}@$O05_Mj=f@}D;@6uOW4SJ1!s5m;3V>L(f;n8E%r=aD1jqtWxhNp9RTjtcT}E#K zKI6wavKyXoQa&dq2(gc1hTcf~Z|^4`9vz%7_kYI!`^&$7zq5PYGDcRp08?q7Eyq%e zygWO(tbtM)K&#w2V=1D!7or}Q%J9~eD-CZ=vC8n4%Jon@CDgAjmCXCj3#Ha|jDc8K z#W)uvwxujfdjL(R$me5lLG*gVsAM_R!jSeT2~kNADYNXOtCya#3Mw?#ueV2H4X~ZU zV#%kpFbnggZed9QE=Yhb3FY!tNMP_23}NgedVX2I-Vhy>R(j5;YMDNmAP!5zBMW*0 zv^UYPJsfjHV;GJ>U!c|B6pqKzNh~IzK1>p(|LXU;SOu0vP#%dEL%}!BqpJUxo14q)ev6qT;6_W2hs;xNKe}SGJVvVdt@${5{z8gODm`k{;AFY-7RY?8 z)t@f8mHvBVQ`pj~b$V4dWL9)=HU`qmX~x^=+T7zd2K_y-2fh>ksd07cVdld)Hr=Zt zTlDb(5D@w?bF_=6Yq`40*DlVk z;q3}{yZF0?!<{@{;c_*ea+a23I$S_Hq-i`<9|ny0cfDSjCN@3Fv~bd)NP}|!T?WhD z0UP%mvkgsSjw7TvP-1#_z@&#-#=8RRr$&B4I1dpO{ox%!s?M`Hjx>xMs1rhoc1_s$ z6q8OcC?@hBT@MBkmagV|G?^1}C#_T|X{TW-Y|QgZ0X(oim!-K}{PE&Hxp-#bnW0$5 zT@UzMzNI!=;{W>x$L09{;qmBb7yo;V=iNI`n1e75z~Hrt0PBUqKr?COy)j(O_YWU> zKT{F`l5&Ovq?YiCm7mE!vd}#E?dm1KY}^w|-SKev?p-Z#VYmv&0zVWoB#;!dxGA3n z5JjP15O*{*Sql9T<9H@MGGW6<5sU*i?&)4@6~I8>tY`WagxJC#2=Nr{0C4f^*^4)q zzh2$^_|-SUaPxTxW{3k%Zf0N!^brufx>8#l1{c}e4!0(L2LY@5Be>Y$W@&^f_ zoc(j}UxQd<@S&gWT(AO6`jvN&CMo{bAaZ_LLPG#yYaxR$M~y^GKC?_DV!;{HWHt{{O$VY#Z|R)Ytz^_O?RYiv4=T6;~x$W z4{Eyi(pm$zGtte9F*xTSf=j^R9Rd(c(LIQ8oN^t47wl}q1m~AJte%-(gUlrRJLLXc zR#Bs8v;I??glyq=B;$P@fu8ny09?PgdVPNCV8G4|)IhI{pW;T_!(dc|U`(92crwS% z8nHOY)QSB0u@&zp^0?;zH**vwh_V4swz3tdZT{aoI4I5k&yV)@ck};aJn!BOzXJ;# zjR8Yky6%OZ`Y{q@@#km^z8gxh8GhG$@iq}=f>1L;zyX=6sb20I?dfu!cIJbAzI#V* zasXcbUG6s;s12t=D`fgol>BvpC>3_Zdtn{NJ~%p%KXG)OPN(=S=zBRZAo!tj(L z$b~jFZA{ZJT!Me6FqB&u0ZbB!`=IxCq@c_4T(lwj!N3IhFl7iZ5+U%n%r&EWJjEys z7=RS1RTc_>+}ng1_-3+{)Hu7i7G;Iz6?4zGgWmZR=nanwNH%Avy^Hc4=E_eoK_F!U zPbQ{>Gz>Mi4a60tiUh{fjGpFWMpIL!wm>JMi`mco@s;er_{Z+!YT=pDLP04^6*;bj zw<{xH6aI*G)L+yA}~`fvIx_4H$_rrq-hk8A$V92%Z>Sr{8_&CS7W`~Q!Qo)_o; z{n7rx^WFUa7*A=>{T+^jF}PO4-HQ;1W@|%fH#yCR-u~$b3`$Rvr-A&B~eJs%hIhXmfj|YEdCe+v05{q055kvX)EH_8lS7@yYSz$LTOpssf9)HFq6MO-Bua$g+n1+V_PxD6_=cqu#wi#9 zZv+OXFo6>sVvZ3T{4GBKGx!B_{qx&zbF-272Muk;giEx(bvFNBo3@Up3V{zF3Wtth z%IAdQ|HvRtj?Ms+;bK&9{J60CVO^|BSkMXd2Srrweqd!JN|nqf<>pXoT)W(^NZ72( zCpZppJoD71_>ZZYX&NGVE(#|27fRAZ;^C=yW*+L2@1(D@r*wCqeq=|!v^8n+wn;)pNNJ(4Mr&~cMCy*3KPi?bz!usOeM=O zP7#c{v5iH{N`tinRBczxwVJ#vd!c?=vvHw>^}ZXbJGO37f5tZZGYw{eOILuwPpL@9jO`o&SH7=Z|Oq|1oF4pOhefF6#gi zqAN7b3_Q&a2MU1EstuIt0Z^SgEc!Heo~4uDk~)~Vjo4V7{{tI>tBAp;u*~^*av*!Z6NH^0d_oJOk2ou2 zB-n|>P9$dD0;BssiZsZ+{=wLOLpQ}Ltn~4{6^4=4=bFaeJvQrX=n?s2Jx>2m9&8R! z5|U+v;!QF`w&Z_%et39X_WwNJ-|hc=l&2Iy$%j5W-eJW|x9W>2+Z9Q|nkH}bmZMlE z^cpTk6U3ooGC#|>wNdj{vUPnvdsgy8pO&(;2F-j|fFm4VAs8&Lkx$})jlt1gA#0sL zpZo(xp$n8L2Hpc42Po#?(6v!TrdxYyk)~zuN)Kct(*UfI0-#9dF3`-8(-smN3W}$m z>CK@|Q_D_!*GR3bVn(eWau@}psu=<65NTe#kn^Gu4ZHe=nAk^G^YVoQtr^E>h3lYVhy04~-U*7Ke`oEOl zDyf^@m|&f;S(l#WmnH%9DTz6H%k6x3Qc@QI`Lzs)L+*GxDw?|v!&|p^9H+FLN z`o-1F`Spvl+9rRdB&yO(o>S6GdgnKlP(dHAOh|Bk*@(zb=NFHQf@Xa}ofFx@ubfS{ zEwMr#So@T^vx_%ZFW+3gxVk>SzIk){>&4IKzqp6CKkB3s?lGUcS5ph)pD+>z-h+4* zky?;2jnFGGe^!zjN8;^eo~I&D5MS(!o>&*2OnY1UD5UAvI9!(VS~^vcPMuLp-ZDll z**8jk*Dhq!;N1axq?YWIB#+djOeL~fQ^gD<4)C48*okCXcrWK2osz{X)hhB&X?iOo zFei);6H0t%3*v*fAmK*P{TU@`ImfqxuRd+k0^$ESFsVuymp-x~(O?vX#QhrH7>@UaQB)o@9lPOp6iEI5+R?`g&L~BkgbTmPf zVa8F+U(04rLx@Y$wJ%;>lvZ2;wRjXr9QuBP3J4 z45)4zbSqb(Z6qreZOo>r*r8#)X6@=~WaT+IaS4pBBEHou3p&nJRp_E{H@FScdj+g0 zYo!Hi2i|2yue4IA)ZSbhI6c$WP{Yj%j%l!w*9!Bg*EH8Pf?-QGoU-!#>9tI42_?TH zpQF+O&atXh&=j0kCZ%@qDSLm!n$A&-$SX}?=$Kp7VW2#n>h|ETJ577;>Bfa*yH0Fs zs+8;oPr;0XZ*a_AfXX++d<*tOMbGmJv6nNbcC&Ox4_PpB>$G-Fh7WeOAl#pbe-& zT@sV?%zhEplXZEO)7CD5DqDl%?;Dvg;*z2%ew%&JIl;#TyFrdv(Qt63=&*D-OmNZQ zBeY^3#c4D_bPPs&g;a;_dogyxw-dh4S@`N@tn3+Slqy|E<&Cj~sM7a(>gU(T4YHf# z9&?^88~+>5wOJpwDDaIW`r@@fWAE2e4UG`n^+jX#b(F{o05{YuoqE23x@m;}*apB9 z1zvXRQAayqRJH@k>R}^WV5b~*%3-G*Iw^;>4HT!;7wweM#&(Li*z~hDR7@uxN*Ps% z^vBXhmF{n@jw&Em)JK*28!Mz1h}+aiwVmIVN~$#QaSRrT#s;2?En6%*EwR%QJ1wz- zme|m8DU1I~E|y&`^OmP2{wJ?F-82bsTmIj}=ZB^IzsHBihr9USV?6KbGH5z(ld$(v zC0K~OVvlaN{TEG% zd`d#GV%*TQaHVnwoUQW427LFfaB;^9?usv)%?l2K&!5{HMz9<2cL1;* zNhlEBEf7;_`m7=a4cKo*CNgvHk02Dr_WZ)6qBMqUh(!&AZOVnA{euWb3ICF0l%(QI zUPQFm|C?R>qn|eWe>6HS+yBSMyZwKU@^s7pve7L;In2*UlLC<48aoCTBu4HFgGN8( zXMFOjf%-S6m*%vnqDWsu^SGaQvQobSXS5ESQ71URoz=lf0}IXyR}i%?H_XjOW%ug_ z{IkM}tjXP~5&S~@s*YQx>IcKk#LoMzT+CM(iPbq~-SyW)u+nF_>S-S9M%q~;pN;#- zjJu-MRQp-Wcbq-!AZMo7!;SLiX2!2D%oXan#(D3-$N5V(k-zB2Y5&t;EBF4Hr`i5L zIy!!SRI>jMj}LeD|D!x*B~1f!Gi5pd;~(pmkjFU11QVR^Md*;fx~6o?M>4KDaKtmIg?IsDN159FUaLgSpFz2gxl5 z)kEHxqsZ~P36y%EFMWv4Zvdb;b>zBIj|X5FlKa}P0gB71SIncVJ;o`H%Nb=fR*6jO z%FRf-p4u*VY{L|RltMS6lDpRJg0=2kr?yUB*$e>39L-QU=aqVj#;vP`X2t+INgaxv zIR!fRM8ct%BZ^@YpqAn6in4m{P%Y(FL|r|^nYUqWa4N;x_8?O9?-bK=GK%u!fod|O zatTsSAyqCQQEx1xuqHAGekFVfbbX(Ao&CW~ADWMh+Td?o*W|*V@~KBe-7hW zJxfeN;@`c2zOQ!HuGN((4mqktv$Z-ERvyEPmbq?1NQj_|!kkR>3p}sx-(|a;^ z2GY(z+8Ia<&(1({JUatvXCM_nY~MgiDDnRg6O@4bM%|CTu3fR|1XD@**SYeVoU|EF zyh$i=+t1A`Fo6t(IIba7Jp+gVp29SADZL4-rrQ?f8utpcp zF!RZx#su)dD`lnEe7&TE*C}ZaTntZECr+E)(LRN@+8iC!bi2LLYHf72H9A-tou~13 zE2A^0P8LQ-XkDy}Hrt}bvS_y}I$0I%Hbt`y(19Qwt%(k+(k7xd*byyOL?fN*8*Q)! z8X(?7$Au8hlEV<^IG)u=RijvT5y@t20kl{Qpw(&sZI%P*Dpw!h8m*n@K^J*zmNj(| zh#RqHyQsEdcW*sC>GoP-6kb|EH+62}4;3EPyG6gGD6zi2wlP@jDC3A3yNDx;*r8)b zp*(`D+u4PWcj4oQS$yuiqdV{DS}3b$=N)xCJMZYnwnx8w@8~Xkte#!?IPc*ueC+UQv@EgO>$fbgS{7H`mR4={QRn4L+p4M~7?w`nbn#<%@?@{# z%kDx7^{n85?&i{75oAZV_DWE@xwczu-MmnXf4iH9yR(nGo0q#2;X0A5omeeoY9vq_ zX*v+4jT`}HGRe{UE>+x8w?1UHwtKx{J-(L5bhmnGUcKDI>6f%%`8%98u2D+g9m|q3 zK`JYeQi0v#b$8N8YmdXfoVdW{7$tqAghDwu!+u+;v+C@FtLRMC&(-v|`#F zU-gLfJbK%>EkZqQzDD)bul$Oo<_HEgk)C3~2??6x{6OC*=HH=a^98JCtaaLU(UmBA zShtB^7n`>$dKAsj+YaTP{vTQL={EcEMeiT+jC^&lhqtiHWJ4#qQ9g}E=bCQxCWnX< zsg-@_Y8u+i`rIrvGz~nR;PkV~2kpq(p66`0??pLq=5km0ok5lXx=02^Z9Ca?o&}a? zX{A@m4lTy%iLv&sNQl6cP;i0L*RnX^L>N0%kni;B%$Qg*+iTgbPx+is{2!I?y7uOu z8UyWzZQVOGf&QR~O8b(;b^VYnToKGDV0ZYTNk9&&%K>QZE0&JeFM+E0pbJASM1o2JZX&2}V2 zS7@5ubEWe?AL#2lG%q%guV%(omR-PJExFG0$LH`jr(*aZp5ZE48ML9_4pZ@bF>6Jx`Vr}N8% z*r|G_sAc<3%=Nv0bYbf#Wo^5AK1=1_)~Scn_&YY-Naugj-1sr)fM&w%<^-oO-oxo1 zUHm%Aog~k1Et}>N=C$dX0Fa zPRy7)A&+q-Y^y44Rj4KesKO!FoI;Lfdf$VJ++C3{#PRGmSpa}atS*AwpTD%aPzm3a zh+;aXt6S6*PgsD|2FnD5Tc(VBGgFBpaRg`RG7ZCPsAEK1*i7IpD24u=wL;MhGftPFZ+;c`Z}#(s^ZMCc zqbMqVmTWWzeXAQ;10OyNt=Fn*HkaD7HHS0s;X}XBYOeM=H?{(3dm!uEEA~C(5sAiN z4%vKNT9iOhln{nFk>&neUrsle{xKP`tn)niV#js;97|vRiNs zN>*4|c(uAGFPCYa(SAKMR3UW*P^JQk7vceRoYgh zHk;L6={{(frt2F!IeY!$>gN3V#aV5WKU3n^h&RP34CGR!{6&AegnT~M=Z^;o3C=GY z5&7x-;&D+}kT8wVE1_bW7%Nk{L1y|@NE!(ssg8WEK%4L@f9jVd`5vG! zo+0qn9a=sEUoBt=#y>XDNGbu@$h&v468P|84BmrWhe#Oj!Q|)=#XfisVzw2k@D+Q#QGBlmNrpEFCLYmznuM^QU z9I|;pvk0j+p6P6Go8im`Jhm0htdHZ}T)ljA{o?BNi>o)Kpx2gyc-H2?s(rE;Fc#K@ z{a&41oc!|Q>dp1_OE+SDGM)A-ja53|F?eRM@)6~DihanDUK%tj8$uC$sxh%VkT}|Ji<>o*)>ekh;f3VH8D<5v@MLCbVK73KrFXdRIFOH?{LI*Sou?_%yBm_3dvP`v6$8xrdNI1_ zC^-7P%A=OcNwL4)XA#xy{xsQtl%yPOAp+cD|BVhthvoS1@o0Dc>rtMP;>b?O+E!g< z=_~od3JgC&gh;hndw{pY+1V=rp>YP2ZDeZUa|sPK4pOddYZY^`U_q~QVy6zjwr-PY zGe&X~#7<7Bo6IM=!baH!?5t8X^N_dmAguK?>3=<_554{$?H?Ui*8ls5JN^F{Plx#b z`U!VSN@1fF0bBTA%4)!oao6spNx9qLni0i90)wAm2xA}7^GooJ6(6iX>%X)L_x1Gy zMF6$vl-eni(~+6@hdc$b-VUDWec(t-+?ZI{j)?A@#oK>e`afY?*z|2ZZT{cUaXJ6P z;qmj)PX9l~Q_}wlODZ~F-4|Z+2wlq4O!Azoibi!xM3b!Y5HOFehgRFihrf zaFj-L&2rG)m5;!jOsYvPYMu z-V5f#P#eM;fbO!rU{YR|>wjFoIXQcEej)$5cyVL>CB9wWT%DZ0kbj*?Qm zc~<^2N|Gwflb0`hg(#w@sVkNS7s~gZVJ46A7|rj22*+cvr}v`09fPCi&kvsKV&|8q z#cEb+1DeJs>^FwA?x$&p#^C9dczY5rpUTPMC63d#_21JZ2~mV%4#PZOm9yxSsjNco z@8zon8gn+86=laPUyAKN{Zu1$$-k5akUw)5aWx58LINYfvIZ_^0ZSROYF)sL(cC0k zb4XUnjP==!j7gZaT6QeSq0PO2?BrKF1MZW0oc7Ls`oZx}~0e=Y5b(-7jQQ8A16pONzw0O8+Flt+lrSHwRA;~xu?KpqHm z?&;cJH>_vEiZTIeeVaNV?BTcD4Jvxo46I*pw&1RdAs9qB{wyZlN6Q<; z@?f#uo3#%YcLeguA22S9l|lQ{I$*4^9M=qhI_^>LMx<)wW?< zx?jd<1)l|I^jm;ceHTtEyg?)zTo0g=52F(x=gMGBPe!$B=V~sG25@>c>NsI@&jx7s zZh&<>92(>Dde{t4-Fg@_+$PY9#inh#E)c0lrt0-5HdOU`tVYNx#757Dv+=EwwmV+i zr@re$yEmjm+@}IH#8c!i{SZ}eB1#D3*J8!*x}*rbwRNri_#}#8@z8$$cwjie@sQ1H zzIx@mW~_c~J5c4qpg~W|ZO{%j$~u-ZDm&+=Gz8uPc>fD7Pi{{C0=&O~{)rv99{+?O zMHGDXjSqQ#(T_^Zz=p*$62+DKkdr$Uf7_QpL1 z%J0Aahxc{leI49<{g?6Auf|_rfBm1^r~MD&c?zD+IZxPlXm?UCH-|96Y^Xzq!_1&! z!#aa%m`A{92|~z{+~yAo`6fpIifKGoB%ca z;C_x`FiCM3;CKcS=-|CeXi26EMXgV!iJM z>^|vzH<-Y?-gg5YCB5(d|K1bu8d6MB2F}l3u-+h{` z!IV&|Q0A4%?m2$SfG@O$wqn?z2YOGQfJ=%Js5UZ*Wm3w7E|FmOdp!^QEi(?lXmI>} z&r{!U%s33i@)O1ZxWQu91PKWMW`L$~jN@6)GrM;IB_RU0zWPh;W4;A&0dXjnu+M-$ zNB$j{EXTdx?d`2kVn#yL`wD1V2h0!$y0!#WfF!E)dgn4YCR7-ph^I70!85>xf`maJ zew5oE_j-jFGJGOn$^Wl-p4Hx_Ty-!P(XBhZt4?v3&g!a{*4bR~!UHjhuXMrRu3k2k zmBeup)u%`piQW{r=^#e`dpcrpIC|fah5Se=HCKcwP~Q zAFX!Bm3F0Z4{`(8G^gGGFR8Ql&AXzXwLk7F0DWb#d$HYZHAYXKfYYn9-cQJfDMOOB zr&nj-o70qW5&^>{;0(!qlbFQc0(0sc_%H?&q!C5|n4qc95SbP+DkGsJNPYecKoFyQ z@YJWlQ!vj}4`zll2H-}FE)DVs2v#HhR6u%~SGYro=9)Q@bNfA{0q{wbK#nE5EC=9H zn&5yTE*rf^lQ|)G#`X17*g`T(!!x0}1g9DrwU2Z~U0LZo`jm7~54wwEhTf%KHn z$e2$+ZOnNFe2P6Hc{{&p7eJ=ixeRase%4*PJ79oNU|&IN7*i^!rIIc_N0k7K?Ghko3D{yq(lPioq!D+(m3zq|+h$8PuOw|m~>xto; zk}xFqQe0$HA!a=5rz$L7i)kZ598m_yRDQOmA34kd5rwB-SgM5=8}xdYrZRXB&Jgn{ zPUI9M{!&HXgA2kD>v`q@?}6tz{xZLMx7n76Tku}!F2&fQBRB@XCMdoZAD>Bn;=)Ac zM5ed^@4>C)qg(L4cWXSx0*Gru%{I4Ze;MW(gOr%WCCZbe~Ls_I8u*#cD(lFGyW9Py8i$WY>1<>k^HC?|4x9>g(7z`nR-ohvm{lhW%r`B7- zHu~2$#&I^oe4b8(z+0EC9TMzKu6#+)C;fz%xEh0>Ld+jh?mCkk_2a2iJxSN0oYSx#zdn&KFV33GudiRB0{AjP6SD`{g1@1OswbSlrDP|!wt4o*zo1Rs^PyIMzGzCN{; zCb&|mo$`uYrxTE=2Eoi&gmkAYs1YewN>aoaEp9@5U`63mXwn+3Gs3&h}(#(-=Ybx4evModJ!ptQG;tnnA zbYlq(@oya#mG(H1mTC>;1e=VI0@!KJR97#qZ@|grdA9nJT+;}}iXIr{Xs72as=^9< zr&iIB3+bg@b2ZFOE|nKrw1(a$#H>@A8+H%A-L{o(SaPvek2f!`?R>>jGRv5VcMa_i zz$IY}D=U{VKBpv|&7T1j%#g66QS8%YA}0*%dn2-Q!nBdZ8%Zw8HB8CX3!9W)NkXwI zb^*3(Xsx7$0#D5^*&b$kg2766v^1`7FI%N`Axu&Tgy|MC0ivk|ecRY7EhH!^w2+{v z+`<$E+Ta+dY1x*wS82$xaR-P=DS~uqmTLv6llELjFS(MlEs1*^hM6MfMBDg)@HwIe z9<#VinF9dFewYRbT#!IsaPSP+8DXVH(ypfS%K@kvC1(#emey{hsgv6VoNU1;qaf3r z3t4SwZ#4>Rs53fIVLRHx*@@DnPOgE}MI^d844_~b6rX`e7+)}CM6QNSB}s6}j0L+T zo7OQCYQvPamq9G|33u-X4_ToLVi>ITX33_i_QoKRu|tls5KqLFVDgZ8vS(5pv08GG zSUHwtT&pw2l3NGeduX^@+PazF!6LIo&f-eDD_>XdfRStbsu5(FhJvs+FRwk#iOTV! zTLj9RpcrI=GM_I~aRL-B!8b1ve=3lbwVw&?mI{OJ+wRyVkk5_OoBqp4R$%r=`OJ4jO2 z#Vt>gb+qX;NtTf6G)t;lQdT=pkEM3DIzQGB-~r~xtQVV1kP6w4J404FvcVLo`t$H} zWCiE8Nzz^6L1xKHQ=j%USwZ)s&XW~vb`xbWD6pEcp-3o{1V5f?Sx<==JRe4NG$@G& zQ3s!_;D0O;-wCr*m{n{$f{9z4b>!O)a?e6CfuSHj$MKA5?;6x0^;a;-#ki)W0CHFf zGw0!WL4kjqyn1Ouipm{ZjPXfk+z@5G-Mt1~-9!gm@6b}w7bFnVGS(*G-J~wFptv2~ z%__QLyIGCq$gU~o0%)STB~T9&)6Lp;2Fx-P9xz;%VX%7a)dOfcMLr*c3v#U^$hZGJ zq5pGxhn8bm)1xFrB~e#mDB4gCPz_vfKY~Gk0syCCg`k#*GH7KXx_W#!qvNL#-7VeL z=Hh#64@k!L*;Tp@t~ceInO>K2Kr<{~g!3>QZ(4RTyg}dr`8_B9V!&9a$*=c~nf9eK zB9t*%Kw|Kho106WGhUij2nK=_;`Oz2kZcc$n2!zILZnWZ6|vfNZ|$pn4{nVs2Yz-8)h#a&d};s!1vgJ5%{)Ud^Gn+Hf; z4LYKuH7(W!MO{!G5z!i`b%D_G_3Z5{*9Ts#y6&CdZ0YB^f&PJ)SKu_I6vbSgiwc+d z$*bGpZ55#{th+w023x_akl7bo^CUPKW>7`$(^&3j+Hz$J)==(KGGOifpa#F{W>B+b z$kbYYi>HCG`JTvXzAWeN;spY_SaExCC1-@Gtt=$HeMj0Vwid=rC@maFhAD-88uE|G zos$+S%|^a1RRh(%3%%G+YuC9u2TKv5hu-wul>ewqnupv~>d2oZn^)=f3TRmQwQl#o z9{5iDmq96IY$vHxQvgq$E9HUFsK-C;m1t$h-6>cfkemG{S zP550#{(ro-u+Hfh1*fwboLA^HVJDUaUG}nJ#|(8LQkEd)_iw*3VltS^if& zo=}sWf35jT4S#0JYHm!a5gM+-7N0 zA}??KvtZMhsEP6#`O6$Z zQEWpp3;ga5SR0vLhY-t%XMU)Yzh4(mNfx?N09EZ%q2d-grzDBZag_H41`%cqlh_3FwY5=2YS z9Bi}?c4z7y49cn>D4a_E+#ktws?I{6gmSV~$6KEPsAFeX`XeCEyf@0A(7VaZ5o9
    !9i7mQ(E!}fafl4|hD_>o@q_kx3aqJ3AI$f>-l^bbpM(F_&W1l1PN4pe?3Z?6 z>q8!;)2o8X0^+kl?{iFu@&|iza|kAhM-dDnwCx-)E%~4J4=VS69vtoO@;^Pw^W@1B zaFxd1jj%&5DdEVMM*rV2pOch>QxYYVM3|wTmCGPNAzlbw2Iax)5J~@K?5X?$=G-?U zVJd-12V1Tw5=w;n7^-t?D%D1kH!bwXA`N3iRd$c1GObm9g=iv=UP_3{hZio@eE$+I z5e3IqJ5x;2Jq*Kv=@?)rW>|*F0?46)Vs)XK+a`;T4eail)say6csA$ZQh~aHu|L;4 zg{?T8jIwZ~au&$ETHM2N-o_L`o>Dz!SULWfnq8W*jDyFZ4Foyf)&u(%3!_kB@31~tbl)v_&uR_&kSFwldd*%MI^?Y zP`QaF4lG{leJ_tx;y6e}Z=W*7Phl-9Dc`f)i)=O{0X`QN6$p^cz%b<`(*E3;=rz>s zodgRQ`zSDIn;x676G)!Dyv)Hp%dEEuNl!<%GM77XV%_K@B>r92L#Wb$z(le|?|UiZ zl6EzmAUC(4UxNvX{dolG9qaXUk`%Eh8-t(JqQ79Kh;@swQ=Z8bDbn7R&H_$kW+me> zC}fRLOM72(vJ6pC66ITe2ASfK(4~|LJ0#5Z`KtsVsx88SAmJd(H6lcqQX%QpMV4f1 z{afZf*;yxr`CqzTWYlT7`IX;=r+NM#9UeU|-~W2JcetDXALYqLecXGZgtf5wj`h4O zwZS->6?(vI2^rVs(U)bi^gH9sbDTjC>&G!*T&QJp-NhK3%VJk~v6;HjSti0&0xgTM zegQLrq5@Kt=F91BY7r0xb%Ab8b2uwBeK9bZm2cn)hS@<%AixhydeuuE$*k~I$}!bjDFn?x*FVd@e1N~o9$|` zepRTj63VrEU2AYQ^q=IfuC$rdFcs*CfOa?9T@s72av#8%3M^zv?6hx^n2f*K zhpa~vLP7-Nyh|E{#;peco0I!DES;Ds>k0B&b#y|@O20IL;AXm z)zy9xPjCtOToo&gRMs)OWJO69qbvp1kv{ASyPW3E=yqy1klTXuz$7n)Zc1ba9YQaz zVW{H2(NI`%oi2lLb?*JzPX7k#O8^f|coSf%ZQ0@h1J``U#>xL_hHr1cw%x@HE4Zz1~YrW@}NfyVHHuowxPGe^uUhSssXU z{h?tnTstkBM`5*1yRGK)xz*v#P}kbi(w$WfDs2z$j9tlMTLU|5_iq8ocONQPuDV$` zOQCEMBR_o+U=ARYvC*f-BW*Tj8PE7{=K>rI0wCmEIg6z`HIeCnW5iTqKZD4iGqs9+ zU4dxz$FA3vuNBW^H``!lk-CAr)f|L=6p^#Ku)fsQoNeBg;5l$yUG|#KL4R@L=b+h2 z68OXfH9Aat<_KJYh~{LQbJ)8dlFK@X;xwvi7aM@pDU*=8)F}W+lG%O2@+)8|Uu3&w zvN!hWwOW>{O)l6b#Ko*R@_(OH)Tv00960 LCipGO00II4fAZxX literal 0 HcmV?d00001 diff --git a/charts/neuvector/neuvector/1.9.100/.helmignore b/charts/neuvector/neuvector/1.9.100/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/neuvector/neuvector/1.9.100/Chart.yaml b/charts/neuvector/neuvector/1.9.100/Chart.yaml new file mode 100644 index 000000000..ed28b055f --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/Chart.yaml @@ -0,0 +1,17 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: NeuVector + catalog.cattle.io/release-name: neuvector +apiVersion: v1 +appVersion: 4.4.4 +description: Helm chart for NeuVector's core services +home: https://neuvector.com +icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4 +keywords: +- security +kubeVersion: '>=1.13.0-0' +maintainers: +- email: support@neuvector.com + name: becitsthere +name: neuvector +version: 1.9.100 diff --git a/charts/neuvector/neuvector/1.9.100/README.md b/charts/neuvector/neuvector/1.9.100/README.md new file mode 100644 index 000000000..d5be1eb79 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/README.md @@ -0,0 +1,198 @@ +# NeuVector Helm Chart + +Helm chart for NeuVector container security's core services. + +## Preparation if using Helm 2 + +- Kubernetes 1.7+ +- Helm installed and Tiller pod is running +- Cluster role `cluster-admin` available, check by: + +```console +$ kubectl get clusterrole cluster-admin +``` + +If nothing returned, then add the `cluster-admin`: + +cluster-admin.yaml +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-admin +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' +- nonResourceURLs: + - '*' + verbs: + - '*' +``` + +```console +$ kubectl create -f cluster-admin.yaml +``` + +- If you have not created a service account for tiller, and give it admin abilities on the cluster: + +```console +$ kubectl create serviceaccount --namespace kube-system tiller +$ kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller +$ kubectl patch deployment tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' -n kube-system +``` + +## CRD +Because the CRD (Custom Resource Definition) policies can be deployed before NeuVector's core product, a new 'crd' helm chart is created. The crd template in the 'core' chart is kept for the backward compatibility. Please set 'crdwebhook.enabled' to false, if you use the new 'crd' chart. + +## Choosing container runtime +NeuVector platform support docker, cri-o and containerd as the container runtime. For the k3s or bottlerocket cluster, they have their own runtime socket path. You should enable their runtime, k3s.enabled and bottlerocket.enabled, respectively. + +## Configuration + +The following table lists the configurable parameters of the NeuVector chart and their default values. + +Parameter | Description | Default | Notes +--------- | ----------- | ------- | ----- +`openshift` | If deploying in OpenShift, set this to true | `false` | +`registry` | NeuVector container registry | `registry.neuvector.com` | +`tag` | image tag for controller enforcer manager | `latest` | +`oem` | OEM release name | `nil` | +`imagePullSecrets` | image pull secret | `nil` | +`psp` | NeuVector Pod Security Policy when psp policy is enabled | `false` | +`serviceAccount` | Service account name for NeuVector components | `default` | +`controller.enabled` | If true, create controller | `true` | +`controller.image.repository` | controller image repository | `neuvector/controller` | +`controller.image.hash` | controller image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | | +`controller.replicas` | controller replicas | `3` | +`controller.schedulerName` | kubernetes scheduler name | `nil` | +`controller.affinity` | controller affinity rules | ... | spread controllers to different nodes | +`controller.tolerations` | List of node taints to tolerate | `nil` | +`controller.resources` | Add resources requests and limits to controller deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) +`controller.nodeSelector` | Enable and specify nodeSelector labels | `{}` | +`controller.disruptionbudget` | controller PodDisruptionBudget. 0 to disable. Recommended value: 2. | `0` | +`controller.priorityClassName` | controller priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` | +`controller.env` | User-defined environment variables for controller. | `[]` | +`controller.pvc.enabled` | If true, enable persistence for controller using PVC | `false` | Require persistent volume type RWX, and storage 1Gi +`controller.pvc.storageClass` | Storage Class to be used | `default` | +`controller.pvc.capacity` | Storage capacity | `1Gi` | +`controller.azureFileShare.enabled` | If true, enable the usage of an existing or statically provisioned Azure File Share | `false` | +`controller.azureFileShare.secretName` | The name of the secret containing the Azure file share storage account name and key | `nil` | +`controller.azureFileShare.shareName` | The name of the Azure file share to use | `nil` | +`controller.apisvc.type` | Controller REST API service type | `nil` | +`controller.apisvc.annotations` | Add annotations to controller REST API service | `{}` | +`controller.apisvc.route.enabled` | If true, create a OpenShift route to expose the Controller REST API service | `false` | +`controller.apisvc.route.termination` | Specify TLS termination for OpenShift route for Controller REST API service. Possible passthrough, edge, reencrypt | `passthrough` | +`controller.apisvc.route.host` | Set controller REST API service hostname | `nil` | +`controller.certificate.secret` | Replace controller REST API certificate using secret if secret name is specified | `nil` | +`controller.certificate.keyFile` | Replace controller REST API certificate key file | `tls.key` | +`controller.certificate.pemFile` | Replace controller REST API certificate pem file | `tls.pem` | +`controller.federation.mastersvc.type` | Multi-cluster primary cluster service type. If specified, the deployment will be used to manage other clusters. Possible values include NodePort, LoadBalancer and ClusterIP. | `nil` | +`controller.federation.mastersvc.route.enabled` | If true, create a OpenShift route to expose the Multi-cluster primary cluster service | `false` | +`controller.federation.mastersvc.route.host` | Set OpenShift route host for primary cluster service | `nil` | +`controller.federation.mastersvc.route.termination` | Specify TLS termination for OpenShift route for Multi-cluster primary cluster service. Possible passthrough, edge, reencrypt | `passthrough` | +`controller.federation.mastersvc.ingress.enabled` | If true, create ingress for federation master service, must also set ingress host value | `false` | enable this if ingress controller is installed +`controller.federation.mastersvc.ingress.tls` | If true, TLS is enabled for controller federation master ingress service |`false` | If set, the tls-host used is the one set with `controller.federation.mastersvc.ingress.host`. +`controller.federation.mastersvc.ingress.host` | Must set this host value if ingress is enabled | `nil` | +`controller.federation.mastersvc.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually) +`controller.federation.mastersvc.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. +`controller.federation.mastersvc.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) +`controller.federation.managedsvc.type` | Multi-cluster managed cluster service type. If specified, the deployment will be managed by the managed clsuter. Possible values include NodePort, LoadBalancer and ClusterIP. | `nil` | +`controller.federation.managedsvc.route.enabled` | If true, create a OpenShift route to expose the Multi-cluster managed cluster service | `false` | +`controller.federation.managedsvc.route.host` | Set OpenShift route host for manageed service | `nil` | +`controller.federation.managedsvc.route.termination` | Specify TLS termination for OpenShift route for Multi-cluster managed cluster service. Possible passthrough, edge, reencrypt | `passthrough` | +`controller.federation.managedsvc.ingress.enabled` | If true, create ingress for federation managed service, must also set ingress host value | `false` | enable this if ingress controller is installed +`controller.federation.managedsvc.ingress.tls` | If true, TLS is enabled for controller federation managed ingress service |`false` | If set, the tls-host used is the one set with `controller.federation.managedsvc.ingress.host`. +`controller.federation.managedsvc.ingress.host` | Must set this host value if ingress is enabled | `nil` | +`controller.federation.managedsvc.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually) +`controller.federation.managedsvc.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. +`controller.federation.managedsvc.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) +`controller.ingress.enabled` | If true, create ingress for rest api, must also set ingress host value | `false` | enable this if ingress controller is installed +`controller.ingress.tls` | If true, TLS is enabled for controller rest api ingress service |`false` | If set, the tls-host used is the one set with `controller.ingress.host`. +`controller.ingress.host` | Must set this host value if ingress is enabled | `nil` | +`controller.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually) +`controller.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. +`controller.ingress.annotations` | Add annotations to ingress to influence behavior | `ingress.kubernetes.io/protocol: https ingress.kubernetes.io/rewrite-target: /` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) +`controller.configmap.enabled` | If true, configure NeuVector global settings using a ConfigMap | `false` +`controller.configmap.data` | NeuVector configuration in YAML format | `{}` +`controller.secret.enabled` | If true, configure NeuVector global settings using secrets | `false` +`controller.secret.data` | NeuVector configuration in key/value pair format | `{}` +`enforcer.enabled` | If true, create enforcer | `true` | +`enforcer.image.repository` | enforcer image repository | `neuvector/enforcer` | +`enforcer.image.hash` | enforcer image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | | +`enforcer.priorityClassName` | enforcer priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` | +`enforcer.tolerations` | List of node taints to tolerate | `- effect: NoSchedule`
    `key: node-role.kubernetes.io/master` | other taints can be added after the default +`enforcer.resources` | Add resources requests and limits to enforcer deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) +`manager.enabled` | If true, create manager | `true` | +`manager.image.repository` | manager image repository | `neuvector/manager` | +`manager.image.hash` | manager image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | | +`manager.priorityClassName` | manager priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` | +`manager.env.ssl` | If false, manager will listen on HTTP access instead of HTTPS | `true` | +`manager.svc.type` | set manager service type for native Kubernetes | `NodePort`;
    if it is OpenShift platform or ingress is enabled, then default is `ClusterIP` | set to LoadBalancer if using cloud providers, such as Azure, Amazon, Google +`manager.svc.loadBalancerIP` | if manager service type is LoadBalancer, this is used to specify the load balancer's IP | `nil` | +`manager.svc.annotations` | Add annotations to manager service | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) +`manager.route.enabled` | If true, create a OpenShift route to expose the management consol service | `true` | +`manager.route.host` | Set OpenShift route host for management consol service | `nil` | +`manager.route.termination` | Specify TLS termination for OpenShift route for management consol service. Possible passthrough, edge, reencrypt | `passthrough` | +`manager.certificate.secret` | Replace manager UI certificate using secret if secret name is specified | `nil` | +`manager.certificate.keyFile` | Replace manager UI certificate key file | `tls.key` | +`manager.certificate.pemFile` | Replace manager UI certificate pem file | `tls.pem` | +`manager.ingress.enabled` | If true, create ingress, must also set ingress host value | `false` | enable this if ingress controller is installed +`manager.ingress.host` | Must set this host value if ingress is enabled | `nil` | +`manager.ingress.path` | Set ingress path |`/` | If set, it might be necessary to set a rewrite rule in annotations. Currently only supports `/` +`manager.ingress.annotations` | Add annotations to ingress to influence behavior | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) +`manager.ingress.tls` | If true, TLS is enabled for manager ingress service |`false` | If set, the tls-host used is the one set with `manager.ingress.host`. +`manager.ingress.secretName` | Name of the secret to be used for TLS-encryption | `nil` | Secret must be created separately (Let's encrypt, manually) +`manager.resources` | Add resources requests and limits to manager deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) +`manager.affinity` | manager affinity rules | `{}` | +`manager.tolerations` | List of node taints to tolerate | `nil` | +`manager.nodeSelector` | Enable and specify nodeSelector labels | `{}` | +`cve.updater.enabled` | If true, create cve updater | `true` | +`cve.updater.secure` | If ture, API server's certificate is validated | `false` | +`cve.updater.image.repository` | cve updater image repository | `neuvector/updater` | +`cve.updater.image.tag` | image tag for cve updater | `latest` | +`cve.updater.image.hash` | cve updateer image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | | +`cve.updater.priorityClassName` | cve updater priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` | +`cve.updater.schedule` | cronjob cve updater schedule | `0 0 * * *` | +`cve.scanner.enabled` | If true, cve scanners will be deployed | `true` | +`cve.scanner.image.repository` | cve scanner image repository | `neuvector/scanner` | +`cve.scanner.image.tag` | cve scanner image tag | `latest` | +`cve.updater.image.hash` | cve scanner image hash in the format of sha256:xxxx. If present it overwrites the image tag value. | | +`cve.scanner.priorityClassName` | cve scanner priorityClassName. Must exist prior to helm deployment. Leave empty to disable. | `nil` | +`cve.scanner.replicas` | external scanner replicas | `3` | +`cve.scanner.dockerPath` | the remote docker socket if CI/CD integration need scan images before they are pushed to the registry | `nil` | +`cve.scanner.resources` | Add resources requests and limits to scanner deployment | `{}` | see examples in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) | +`cve.scanner.affinity` | scanner affinity rules | `{}` | +`cve.scanner.tolerations` | List of node taints to tolerate | `nil` | +`cve.scanner.nodeSelector` | Enable and specify nodeSelector labels | `{}` | +`docker.path` | docker path | `/var/run/docker.sock` | +`containerd.enabled` | Set to true, if the container runtime is containerd | `false` | **Note**: For k3s cluster, set k3s.enabled to true instead +`containerd.path` | If containerd is enabled, this local containerd socket path will be used | `/var/run/containerd/containerd.sock` | +`crio.enabled` | Set to true, if the container runtime is cri-o | `false` | +`crio.path` | If cri-o is enabled, this local cri-o socket path will be used | `/var/run/crio/crio.sock` | +`k3s.enabled` | Set to true for k3s | `false` | +`k3s.runtimePath` | If k3s is enabled, this local containerd socket path will be used | `/run/k3s/containerd/containerd.sock` | +`bottlerocket.enabled` | Set to true if using AWS bottlerocket | `false` | +`bottlerocket.runtimePath` | If bottlerocket is enabled, this local containerd socket path will be used | `/run/dockershim.sock` | +`admissionwebhook.type` | admission webhook type | `ClusterIP` | +`crdwebhook.enabled` | Enable crd service and create crd related resources | `true` | +`crdwebhook.type` | crd webhook type | `ClusterIP` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release --namespace neuvector ./neuvector-helm/ --set manager.env.ssl=off +``` + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release --namespace neuvector ./neuvector-helm/ -f values.yaml +``` + +--- +Contact for access to container registry and docs. + diff --git a/charts/neuvector/neuvector/1.9.100/app-readme.md b/charts/neuvector/neuvector/1.9.100/app-readme.md new file mode 100644 index 000000000..fad2e5235 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/app-readme.md @@ -0,0 +1,14 @@ +### Run-Time Protection Without Compromise + +NeuVector delivers a complete run-time security solution with container process/file system protection and vulnerability scanning combined with the only true Layer 7 container firewall. Protect sensitive data with a complete container security platform. + +NeuVector integrates tightly with Rancher and Kubernetes to extend the built-in security features for applications that require defense in depth. Security features include: + ++ Build phase vulnerability scanning with Jenkins plug-in and registry scanning ++ Admission control to prevent vulnerable or unauthorized image deployments using Kubernetes admission control webhooks ++ Complete run-time scanning with network, process, and file system monitoring and protection ++ The industry's only layer 7 container firewall for multi-protocol threat detection and automated segmentation ++ Advanced network controls including DLP detection, service mesh integration, connection blocking and packet captures ++ Run-time vulnerability scanning and CIS benchmarks + +Please Note: Before installing this chart, you will need to get an image pull secret and license key from NeuVector. Without this data supplied, the chart will not work. Configure correct container runtime and runtime path. diff --git a/charts/neuvector/neuvector/1.9.100/questions.yml b/charts/neuvector/neuvector/1.9.100/questions.yml new file mode 100644 index 000000000..e499491de --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/questions.yml @@ -0,0 +1,213 @@ +questions: +#image configurations +- variable: registry + default: "registry.neuvector.com" + description: image registry + type: string + label: Image Registry + group: "Container Images" +- variable: oem + default: "" + description: OEM release name + type: string + label: OEM name + group: "Container Images" +- variable: tag + default: "4.4.4" + description: image tag for controller enforcer manager + type: string + label: Image Tag + group: "Container Images" +- variable: imagePullSecrets + default: "" + description: secret name to pull image + type: string + label: Image Pull Secrets + group: "Container Images" +- variable: controller.image.repository + default: "neuvector/controller" + description: controller image repository + type: string + label: Controller image path + group: "Container Images" +- variable: manager.image.repository + default: "neuvector/manager" + description: manager image repository + type: string + label: Manager image path + group: "Container Images" +- variable: enforcer.image.repository + default: "neuvector/enforcer" + description: enforcer image repository + type: string + label: Enforcer image path + group: "Container Images" +- variable: cve.scanner.image.repository + default: "neuvector/scanner" + description: scanner image repository + type: string + label: Scanner image path + group: "Container Images" +- variable: cve.updater.image.repository + default: "neuvector/updater" + description: cve updater image repository + type: string + label: CVE Updater image path + group: "Container Images" +#Container Runtime configurations +- variable: docker.enabled + default: true + description: Docker runtime. Enable only one runtime. + type: boolean + label: Docker Runtime + show_subquestion_if: true + group: "Container Runtime" + subquestions: + - variable: docker.path + default: "/var/run/docker.sock" + description: "Docker Runtime Path" + type: string + label: Runtime Path +- variable: containerd.enabled + default: "false" + description: Containerd runtime. Enable only one runtime. + type: boolean + label: Containerd Runtime + show_subquestion_if: true + group: "Container Runtime" + subquestions: + - variable: containerd.path + default: " /var/run/containerd/containerd.sock" + description: "Containerd Runtime Path" + type: string + label: Runtime Path +- variable: crio.enabled + default: "false" + description: CRI-O runtime. Enable only one runtime. + type: boolean + label: CRI-O Runtime + show_subquestion_if: true + group: "Container Runtime" + subquestions: + - variable: crio.path + default: "/var/run/crio/crio.sock" + description: "CRI-O Runtime Path" + type: string + label: Runtime Path +- variable: k3s.enabled + default: "false" + description: k3s containerd runtime. Enable only one runtime. + type: boolean + label: k3s Containerd Runtime + show_subquestion_if: true + group: "Container Runtime" + subquestions: + - variable: k3s.runtimePath + default: " /run/k3s/containerd/containerd.sock" + description: "k3s Containerd Runtime Path" + type: string + label: Runtime Path +#storage configurations +- variable: controller.pvc.enabled + default: false + description: If true, enable persistence for controller using PVC + type: boolean + label: PVC status + group: "PVC Configuration" +- variable: controller.pvc.storageClass + default: "" + description: Storage Class to be used + type: string + label: Storage Class Name + group: "PVC Configuration" +#ingress configurations +- variable: manager.ingress.enabled + default: false + description: If true, create ingress, must also set ingress host value + type: boolean + label: Manager ingress status + group: "Ingress Configuration" +- variable: manager.ingress.host + default: "" + description: Must set this host value if ingress is enabled + type: string + label: Manager Ingress host + group: "Ingress Configuration" +- variable: manager.ingress.path + default: "/" + description: Set ingress path + type: string + label: Manager Ingress path + group: "Ingress Configuration" +- variable: manager.ingress.annotations + default: "{}" + description: Add annotations to ingress to influence behavior. Please use the 'Edit as YAML' feature in the Rancher UI to add single or multiple lines of annotation. + type: string + label: Manager Ingress annotations + group: "Ingress Configuration" +- variable: controller.ingress.enabled + default: false + description: If true, create ingress for rest api, must also set ingress host value + type: boolean + label: Controller ingress status + group: "Ingress Configuration" +- variable: controller.ingress.host + default: "" + description: Must set this host value if ingress is enabled + type: string + label: Controller Ingress host + group: "Ingress Configuration" +- variable: controller.ingress.path + default: "/" + description: Set ingress path + type: string + label: Controller Ingress path + group: "Ingress Configuration" +- variable: controller.ingress.annotations + default: "{}" + description: Add annotations to ingress to influence behavior. Please use the 'Edit as YAML' feature in the Rancher UI to add single or multiple lines of annotation. + type: string + label: Controller Ingress annotations + group: "Ingress Configuration" +#service configurations +- variable: manager.svc.type + default: "NodePort" + description: Set manager service type for native Kubernetes + type: enum + label: Manager service type + group: "Service Configuration" + options: + - "NodePort" + - "ClusterIP" + - "LoadBalancer" +- variable: controller.federation.mastersvc.type + default: "" + description: Multi-cluster master cluster service type. If specified, the deployment will be used to manage other clusters. Possible values include NodePort, LoadBalancer and Ingress + type: enum + label: Fed Master Service Type + group: "Service Configuration" + options: + - "NodePort" + - "Ingress" + - "LoadBalancer" +- variable: controller.federation.managedsvc.type + default: "" + description: Multi-cluster managed cluster service type. If specified, the deployment will be managed by the master clsuter. Possible values include NodePort, LoadBalancer and Ingress + type: enum + label: Fed Managed service type + group: "Service Configuration" + options: + - "NodePort" + - "Ingress" + - "LoadBalancer" +- variable: controller.apisvc.type + default: "NodePort" + description: Controller REST API service type + type: enum + label: Controller REST API Service Type + group: "Service Configuration" + options: + - "NodePort" + - "ClusterIP" + - "LoadBalancer" + diff --git a/charts/neuvector/neuvector/1.9.100/templates/NOTES.txt b/charts/neuvector/neuvector/1.9.100/templates/NOTES.txt new file mode 100644 index 000000000..e79b2cc21 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/NOTES.txt @@ -0,0 +1,20 @@ +{{- if and .Values.manager.enabled .Values.manager.ingress.enabled }} +From outside the cluster, the NeuVector URL is: +http://{{ .Values.manager.ingress.host }} +{{- else if not .Values.openshift }} +Get the NeuVector URL by running these commands: +{{- if contains "NodePort" .Values.manager.svc.type }} + NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services neuvector-service-webui) + NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo https://$NODE_IP:$NODE_PORT +{{- else if contains "ClusterIP" .Values.manager.svc.type }} + CLUSTER_IP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.clusterIP}" services neuvector-service-webui) + echo https://$CLUSTER_IP:8443 +{{- else if contains "LoadBalancer" .Values.manager.svc.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w neuvector-service-webui' + + SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} neuvector-service-webui -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + echo https://$SERVICE_IP:8443 +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/neuvector/neuvector/1.9.100/templates/_helpers.tpl b/charts/neuvector/neuvector/1.9.100/templates/_helpers.tpl new file mode 100644 index 000000000..c0cc49294 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "neuvector.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "neuvector.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "neuvector.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/neuvector/neuvector/1.9.100/templates/admission-webhook-service.yaml b/charts/neuvector/neuvector/1.9.100/templates/admission-webhook-service.yaml new file mode 100644 index 000000000..8a0a76aaa --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/admission-webhook-service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: neuvector-svc-admission-webhook + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + ports: + - port: 443 + targetPort: 20443 + protocol: TCP + name: admission-webhook + type: {{ .Values.admissionwebhook.type }} + selector: + app: neuvector-controller-pod \ No newline at end of file diff --git a/charts/neuvector/neuvector/1.9.100/templates/clusterrole.yaml b/charts/neuvector/neuvector/1.9.100/templates/clusterrole.yaml new file mode 100644 index 000000000..6673e2f6c --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/clusterrole.yaml @@ -0,0 +1,119 @@ +{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}} +{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}} +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRole +metadata: + name: neuvector-binding-app + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - "" + resources: + - nodes + - pods + - services + - namespaces + verbs: + - get + - list + - watch + - update + +--- + +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRole +metadata: + name: neuvector-binding-rbac + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.openshift }} +- apiGroups: + - image.openshift.io + resources: + - imagestreams + verbs: + - get + - list + - watch +{{- end }} +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + - clusterrolebindings + - clusterroles + verbs: + - get + - list + - watch + +--- + +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRole +metadata: + name: neuvector-binding-admission + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + - mutatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - update + - delete + +--- + +{{- if $oc4 }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: neuvector-binding-co + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - config.openshift.io + resources: + - clusteroperators + verbs: + - get + - list +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/clusterrolebinding.yaml b/charts/neuvector/neuvector/1.9.100/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..2ae8aed3a --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/clusterrolebinding.yaml @@ -0,0 +1,145 @@ +{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}} +{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}} + +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRoleBinding +metadata: + name: neuvector-binding-app + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: +{{- if not $oc3 }} + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- end }} + name: neuvector-binding-app +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- if $oc3 }} +userNames: +- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }} +{{- end }} + +--- + +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRoleBinding +metadata: + name: neuvector-binding-rbac + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: +{{- if not $oc3 }} + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- end }} + name: neuvector-binding-rbac +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- if $oc3 }} +userNames: +- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }} +{{- end }} + +--- + +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRoleBinding +metadata: + name: neuvector-binding-admission + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: +{{- if not $oc3 }} + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- end }} + name: neuvector-binding-admission +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- if $oc3 }} +userNames: +- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }} +{{- end }} + +--- + +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRoleBinding +metadata: + name: neuvector-binding-view + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: +{{- if not $oc3 }} + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- end }} + name: view +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- if $oc3 }} +userNames: +- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }} +{{- end }} + +--- + +{{- if $oc4 }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: neuvector-binding-co + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: neuvector-binding-co +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/controller-deployment.yaml b/charts/neuvector/neuvector/1.9.100/templates/controller-deployment.yaml new file mode 100644 index 000000000..3e60ed29a --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/controller-deployment.yaml @@ -0,0 +1,199 @@ +{{- if .Values.controller.enabled -}} +{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Deployment +metadata: + name: neuvector-controller-pod + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.controller.replicas }} + minReadySeconds: 60 + strategy: +{{ toYaml .Values.controller.strategy | indent 4 }} + selector: + matchLabels: + app: neuvector-controller-pod + template: + metadata: + labels: + app: neuvector-controller-pod + release: {{ .Release.Name }} + spec: + {{- if .Values.controller.affinity }} + affinity: +{{ toYaml .Values.controller.affinity | indent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: +{{ toYaml .Values.controller.tolerations | indent 8 }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: +{{ toYaml .Values.controller.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.controller.schedulerName }} + schedulerName: {{ .Values.controller.schedulerName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + serviceAccountName: {{ .Values.serviceAccount }} + serviceAccount: {{ .Values.serviceAccount }} + containers: + - name: neuvector-controller-pod + {{ if eq .Values.registry "registry.neuvector.com" }} + {{ if .Values.oem }} + image: "{{ .Values.registry }}/{{ .Values.oem }}/controller:{{ .Values.tag }}" + {{- else }} + image: "{{ .Values.registry }}/controller:{{ .Values.tag }}" + {{- end }} + {{- else }} + {{ if .Values.controller.image.hash }} + image: "{{ .Values.registry }}/{{ .Values.controller.image.repository }}@{{ .Values.controller.image.hash }}" + {{- else }} + image: "{{ .Values.registry }}/{{ .Values.controller.image.repository }}:{{ .Values.tag }}" + {{- end }} + {{- end }} + securityContext: + privileged: true + resources: + {{- if .Values.controller.resources }} +{{ toYaml .Values.controller.resources | indent 12 }} + {{- else }} +{{ toYaml .Values.resources | indent 12 }} + {{- end }} + readinessProbe: + exec: + command: + - cat + - /tmp/ready + initialDelaySeconds: 5 + periodSeconds: 5 + env: + - name: CLUSTER_JOIN_ADDR + value: neuvector-svc-controller.{{ .Release.Namespace }} + - name: CLUSTER_ADVERTISED_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: CLUSTER_BIND_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if or .Values.controller.pvc.enabled .Values.controller.azureFileShare.enabled }} + - name: CTRL_PERSIST_CONFIG + value: "1" + {{- end }} + {{- with .Values.controller.env }} +{{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - mountPath: /var/neuvector + name: nv-share + readOnly: false + {{- if .Values.containerd.enabled }} + - mountPath: /var/run/containerd/containerd.sock + {{- else if .Values.k3s.enabled }} + - mountPath: /var/run/containerd/containerd.sock + {{- else if .Values.bottlerocket.enabled }} + - mountPath: /var/run/containerd/containerd.sock + {{- else if .Values.crio.enabled }} + - mountPath: /var/run/crio/crio.sock + {{- else }} + - mountPath: /var/run/docker.sock + {{- end }} + name: runtime-sock + readOnly: true + - mountPath: /host/proc + name: proc-vol + readOnly: true + - mountPath: /host/cgroup + name: cgroup-vol + readOnly: true + - mountPath: /etc/config + name: config-volume + readOnly: true + {{- if .Values.controller.certificate.secret }} + - mountPath: /etc/neuvector/certs/ssl-cert.key + subPath: {{ .Values.controller.certificate.keyFile }} + name: cert + readOnly: true + - mountPath: /etc/neuvector/certs/ssl-cert.pem + subPath: {{ .Values.controller.certificate.pemFile }} + name: cert + readOnly: true + {{- end }} + terminationGracePeriodSeconds: 300 + restartPolicy: Always + volumes: + - name: nv-share + {{- if .Values.controller.pvc.enabled }} + persistentVolumeClaim: + claimName: neuvector-data + {{- else if .Values.controller.azureFileShare.enabled }} + azureFile: + secretName: {{ .Values.controller.azureFileShare.secretName }} + shareName: {{ .Values.controller.azureFileShare.shareName }} + readOnly: false + {{- else }} + hostPath: + path: /var/neuvector + {{- end }} + - name: runtime-sock + hostPath: + {{- if .Values.containerd.enabled }} + path: {{ .Values.containerd.path }} + {{- else if .Values.crio.enabled }} + path: {{ .Values.crio.path }} + {{- else if .Values.k3s.enabled }} + path: {{ .Values.k3s.runtimePath }} + {{- else if .Values.bottlerocket.enabled }} + path: {{ .Values.bottlerocket.runtimePath }} + {{- else }} + path: {{ .Values.docker.path }} + {{- end }} + - name: proc-vol + hostPath: + path: /proc + - name: cgroup-vol + hostPath: + path: /sys/fs/cgroup + - name: config-volume + projected: + sources: + - configMap: + name: neuvector-init + optional: true + - secret: + name: neuvector-init + optional: true + {{- if .Values.controller.certificate.secret }} + - name: cert + secret: + secretName: {{ .Values.controller.certificate.secret }} + {{- end }} +{{- if gt (int .Values.controller.disruptionbudget) 0 }} +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: neuvector-controller-pdb + namespace: neuvector +spec: + minAvailable: {{ .Values.controller.disruptionbudget }} + selector: + matchLabels: + app: neuvector-controller-pod +{{- end }} +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/controller-ingress.yaml b/charts/neuvector/neuvector/1.9.100/templates/controller-ingress.yaml new file mode 100644 index 000000000..2738f1791 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/controller-ingress.yaml @@ -0,0 +1,210 @@ +{{- if .Values.controller.enabled }} +{{- if .Values.controller.ingress.enabled }} +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: neuvector-restapi-ingress + namespace: {{ .Release.Namespace }} +{{- with .Values.controller.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.controller.ingress.tls }} + tls: + - hosts: + - {{ .Values.controller.ingress.host }} +{{- if .Values.controller.ingress.secretName }} + secretName: {{ .Values.controller.ingress.secretName }} +{{- end }} +{{- end }} + rules: + - host: {{ .Values.controller.ingress.host }} + http: + paths: + - path: {{ .Values.controller.ingress.path }} + pathType: Prefix + backend: + service: + name: neuvector-svc-controller-api + port: + number: 10443 +{{- else }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: neuvector-restapi-ingress + namespace: {{ .Release.Namespace }} +{{- with .Values.controller.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.controller.ingress.tls }} + tls: + - hosts: + - {{ .Values.controller.ingress.host }} +{{- if .Values.controller.ingress.secretName }} + secretName: {{ .Values.controller.ingress.secretName }} +{{- end }} +{{- end }} + rules: + - host: {{ .Values.controller.ingress.host }} + http: + paths: + - path: {{ .Values.controller.ingress.path }} + backend: + serviceName: neuvector-svc-controller-api + servicePort: 10443 +{{- end }} +{{- end }} +{{- if .Values.controller.federation.mastersvc.ingress.enabled }} +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: neuvector-mastersvc-ingress + namespace: {{ .Release.Namespace }} +{{- with .Values.controller.federation.mastersvc.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.controller.federation.mastersvc.ingress.tls }} + tls: + - hosts: + - {{ .Values.controller.federation.mastersvc.ingress.host }} +{{- if .Values.controller.federation.mastersvc.ingress.secretName }} + secretName: {{ .Values.controller.federation.mastersvc.ingress.secretName }} +{{- end }} +{{- end }} + rules: + - host: {{ .Values.controller.federation.mastersvc.ingress.host }} + http: + paths: + - path: {{ .Values.controller.federation.mastersvc.ingress.path }} + pathType: Prefix + backend: + service: + name: neuvector-svc-controller-fed-master + port: + number: 11443 +{{- else }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: neuvector-mastersvc-ingress + namespace: {{ .Release.Namespace }} +{{- with .Values.controller.federation.mastersvc.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.controller.federation.mastersvc.ingress.tls }} + tls: + - hosts: + - {{ .Values.controller.federation.mastersvc.ingress.host }} +{{- if .Values.controller.federation.mastersvc.ingress.secretName }} + secretName: {{ .Values.controller.federation.mastersvc.ingress.secretName }} +{{- end }} +{{- end }} + rules: + - host: {{ .Values.controller.federation.mastersvc.ingress.host }} + http: + paths: + - path: {{ .Values.controller.federation.mastersvc.ingress.path }} + backend: + serviceName: neuvector-svc-controller-fed-master + servicePort: 11443 +{{- end }} +{{- end }} +{{- if .Values.controller.federation.managedsvc.ingress.enabled }} +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: neuvector-managedsvc-ingress + namespace: {{ .Release.Namespace }} +{{- with .Values.controller.federation.managedsvc.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.controller.federation.managedsvc.ingress.tls }} + tls: + - hosts: + - {{ .Values.controller.federation.managedsvc.ingress.host }} +{{- if .Values.controller.federation.managedsvc.ingress.secretName }} + secretName: {{ .Values.controller.federation.managedsvc.ingress.secretName }} +{{- end }} +{{- end }} + rules: + - host: {{ .Values.controller.federation.managedsvc.ingress.host }} + http: + paths: + - path: {{ .Values.controller.federation.managedsvc.ingress.path }} + pathType: Prefix + backend: + service: + name: neuvector-svc-controller-fed-managed + port: + number: 10443 +{{- else }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: neuvector-managedsvc-ingress + namespace: {{ .Release.Namespace }} +{{- with .Values.controller.federation.managedsvc.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.controller.federation.managedsvc.ingress.tls }} + tls: + - hosts: + - {{ .Values.controller.federation.managedsvc.ingress.host }} +{{- if .Values.controller.federation.managedsvc.ingress.secretName }} + secretName: {{ .Values.controller.federation.managedsvc.ingress.secretName }} +{{- end }} +{{- end }} + rules: + - host: {{ .Values.controller.federation.managedsvc.ingress.host }} + http: + paths: + - path: {{ .Values.controller.federation.managedsvc.ingress.path }} + backend: + serviceName: neuvector-svc-controller-fed-managed + servicePort: 10443 +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/neuvector/neuvector/1.9.100/templates/controller-route.yaml b/charts/neuvector/neuvector/1.9.100/templates/controller-route.yaml new file mode 100644 index 000000000..ad0720fab --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/controller-route.yaml @@ -0,0 +1,82 @@ +{{- if .Values.openshift -}} +{{- if .Values.controller.apisvc.route.enabled }} +{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: route.openshift.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: Route +metadata: + name: neuvector-route-api + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.controller.apisvc.route.host }} + host: {{ .Values.controller.apisvc.route.host }} +{{- end }} + to: + kind: Service + name: neuvector-svc-controller-api + port: + targetPort: controller-api + tls: + termination: {{ .Values.controller.apisvc.route.termination }} +--- +{{ end -}} +{{- if .Values.controller.federation.mastersvc.route.enabled }} +{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: route.openshift.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: Route +metadata: + name: neuvector-route-fed-master + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.controller.federation.mastersvc.route.host }} + host: {{ .Values.controller.federation.mastersvc.route.host }} +{{- end }} + to: + kind: Service + name: neuvector-svc-controller-fed-master + port: + targetPort: fed + tls: + termination: {{ .Values.controller.federation.mastersvc.route.termination }} +--- +{{ end -}} +{{- if .Values.controller.federation.managedsvc.route.enabled }} +{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: route.openshift.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: Route +metadata: + name: neuvector-route-fed-managed + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.controller.federation.managedsvc.route.host }} + host: {{ .Values.controller.federation.managedsvc.route.host }} +{{- end }} + to: + kind: Service + name: neuvector-svc-controller-fed-managed + port: + targetPort: fed + tls: + termination: {{ .Values.controller.federation.managedsvc.route.termination }} +{{ end -}} +{{- end -}} diff --git a/charts/neuvector/neuvector/1.9.100/templates/controller-service.yaml b/charts/neuvector/neuvector/1.9.100/templates/controller-service.yaml new file mode 100644 index 000000000..e7971b2ed --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/controller-service.yaml @@ -0,0 +1,89 @@ +{{- if .Values.controller.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: neuvector-svc-controller + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + clusterIP: None + ports: + - port: 18300 + protocol: "TCP" + name: "cluster-tcp-18300" + - port: 18301 + protocol: "TCP" + name: "cluster-tcp-18301" + - port: 18301 + protocol: "UDP" + name: "cluster-udp-18301" + selector: + app: neuvector-controller-pod +{{- if .Values.controller.apisvc.type }} +--- +apiVersion: v1 +kind: Service +metadata: + name: neuvector-svc-controller-api + namespace: {{ .Release.Namespace }} +{{- with .Values.controller.apisvc.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.controller.apisvc.type }} + ports: + - port: 10443 + protocol: "TCP" + name: "controller-api" + selector: + app: neuvector-controller-pod +{{ end -}} +{{- if .Values.controller.federation.mastersvc.type }} +--- +apiVersion: v1 +kind: Service +metadata: + name: neuvector-svc-controller-fed-master + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.controller.federation.mastersvc.type }} + ports: + - port: 11443 + name: fed + protocol: TCP + selector: + app: neuvector-controller-pod +{{ end -}} +{{- if .Values.controller.federation.managedsvc.type }} +--- +apiVersion: v1 +kind: Service +metadata: + name: neuvector-svc-controller-fed-managed + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.controller.federation.managedsvc.type }} + ports: + - port: 10443 + name: fed + protocol: TCP + selector: + app: neuvector-controller-pod +{{ end -}} +{{- end -}} diff --git a/charts/neuvector/neuvector/1.9.100/templates/crd.yaml b/charts/neuvector/neuvector/1.9.100/templates/crd.yaml new file mode 100644 index 000000000..9778da678 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/crd.yaml @@ -0,0 +1,926 @@ +{{- if .Values.crdwebhook.enabled -}} +{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}} +{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}} +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: apiextensions.k8s.io/v1 +{{- else }} +apiVersion: apiextensions.k8s.io/v1beta1 +{{- end }} +kind: CustomResourceDefinition +metadata: + name: nvsecurityrules.neuvector.com + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + group: neuvector.com + names: + kind: NvSecurityRule + listKind: NvSecurityRuleList + plural: nvsecurityrules + singular: nvsecurityrule + scope: Namespaced +{{- if (semverCompare "<1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} + version: v1 +{{- end }} + versions: + - name: v1 + served: true + storage: true +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} + schema: + openAPIV3Schema: + properties: + spec: + properties: + egress: + items: + properties: + action: + enum: + - allow + - deny + type: string + applications: + items: + type: string + type: array + name: + type: string + ports: + type: string + priority: + type: integer + selector: + properties: + comment: + type: string + criteria: + items: + properties: + key: + type: string + op: + type: string + value: + type: string + required: + - key + - op + - value + type: object + type: array + name: + type: string + original_name: + type: string + required: + - name + - criteria + type: object + required: + - action + - name + - selector + type: object + type: array + file: + items: + properties: + app: + items: + type: string + type: array + behavior: + enum: + - monitor_change + - block_access + type: string + filter: + type: string + recursive: + type: boolean + required: + - behavior + - filter + type: object + type: array + ingress: + items: + properties: + action: + enum: + - allow + - deny + type: string + applications: + items: + type: string + type: array + name: + type: string + ports: + type: string + priority: + type: integer + selector: + properties: + comment: + type: string + criteria: + items: + properties: + key: + type: string + op: + type: string + value: + type: string + required: + - key + - op + - value + type: object + type: array + name: + type: string + original_name: + type: string + required: + - name + - criteria + type: object + required: + - action + - name + - selector + type: object + type: array + process: + items: + properties: + action: + enum: + - allow + - deny + type: string + allow_update: + type: boolean + name: + type: string + path: + type: string + required: + - action + type: object + type: array + process_profile: + properties: + baseline: + enum: + - default + - shield + type: string + type: object + target: + properties: + policymode: + enum: + - Discover + - Monitor + - Protect + - N/A + type: string + selector: + properties: + comment: + type: string + criteria: + items: + properties: + key: + type: string + op: + type: string + value: + type: string + required: + - key + - op + - value + type: object + type: array + name: + type: string + original_name: + type: string + required: + - name + - criteria + type: object + required: + - selector + type: object + waf: + properties: + settings: + items: + properties: + action: + enum: + - allow + - deny + type: string + name: + type: string + required: + - name + - action + type: object + type: array + status: + type: boolean + type: object + required: + - target + type: object + type: object +{{- end }} +--- +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: apiextensions.k8s.io/v1 +{{- else }} +apiVersion: apiextensions.k8s.io/v1beta1 +{{- end }} +kind: CustomResourceDefinition +metadata: + name: nvclustersecurityrules.neuvector.com + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + group: neuvector.com + names: + kind: NvClusterSecurityRule + listKind: NvClusterSecurityRuleList + plural: nvclustersecurityrules + singular: nvclustersecurityrule + scope: Cluster +{{- if (semverCompare "<1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} + version: v1 +{{- end }} + versions: + - name: v1 + served: true + storage: true +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} + schema: + openAPIV3Schema: + properties: + spec: + properties: + egress: + items: + properties: + action: + enum: + - allow + - deny + type: string + applications: + items: + type: string + type: array + name: + type: string + ports: + type: string + priority: + type: integer + selector: + properties: + comment: + type: string + criteria: + items: + properties: + key: + type: string + op: + type: string + value: + type: string + required: + - key + - op + - value + type: object + type: array + name: + type: string + original_name: + type: string + required: + - name + - criteria + type: object + required: + - action + - name + - selector + type: object + type: array + file: + items: + properties: + app: + items: + type: string + type: array + behavior: + enum: + - monitor_change + - block_access + type: string + filter: + type: string + recursive: + type: boolean + required: + - behavior + - filter + type: object + type: array + ingress: + items: + properties: + action: + enum: + - allow + - deny + type: string + applications: + items: + type: string + type: array + name: + type: string + ports: + type: string + priority: + type: integer + selector: + properties: + comment: + type: string + criteria: + items: + properties: + key: + type: string + op: + type: string + value: + type: string + required: + - key + - op + - value + type: object + type: array + name: + type: string + original_name: + type: string + required: + - name + - criteria + type: object + required: + - action + - name + - selector + type: object + type: array + process: + items: + properties: + action: + enum: + - allow + - deny + type: string + allow_update: + type: boolean + name: + type: string + path: + type: string + required: + - action + type: object + type: array + process_profile: + properties: + baseline: + enum: + - default + - shield + type: string + type: object + target: + properties: + policymode: + enum: + - Discover + - Monitor + - Protect + - N/A + type: string + selector: + properties: + comment: + type: string + criteria: + items: + properties: + key: + type: string + op: + type: string + value: + type: string + required: + - key + - op + - value + type: object + type: array + name: + type: string + original_name: + type: string + required: + - name + - criteria + type: object + required: + - selector + type: object + waf: + properties: + settings: + items: + properties: + action: + enum: + - allow + - deny + type: string + name: + type: string + required: + - name + - action + type: object + type: array + status: + type: boolean + type: object + required: + - target + type: object + type: object +{{- end }} +--- +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: apiextensions.k8s.io/v1 +{{- else }} +apiVersion: apiextensions.k8s.io/v1beta1 +{{- end }} +kind: CustomResourceDefinition +metadata: + name: nvadmissioncontrolsecurityrules.neuvector.com + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + group: neuvector.com + names: + kind: NvAdmissionControlSecurityRule + listKind: NvAdmissionControlSecurityRuleList + plural: nvadmissioncontrolsecurityrules + singular: nvadmissioncontrolsecurityrule + scope: Cluster +{{- if (semverCompare "<1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} + version: v1 +{{- end }} + versions: + - name: v1 + served: true + storage: true +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} + schema: + openAPIV3Schema: + properties: + spec: + properties: + config: + properties: + client_mode: + enum: + - service + - url + type: string + enable: + type: boolean + mode: + enum: + - monitor + - protect + type: string + required: + - enable + - mode + - client_mode + type: object + rules: + items: + properties: + action: + enum: + - allow + - deny + type: string + comment: + type: string + criteria: + items: + properties: + name: + type: string + op: + type: string + sub_criteria: + items: + properties: + name: + type: string + op: + type: string + value: + type: string + required: + - name + - op + - value + type: object + type: array + value: + type: string + required: + - name + - op + - value + type: object + type: array + disabled: + type: boolean + id: + type: integer + required: + - action + - criteria + type: object + type: array + type: object + type: object +{{- end }} +--- +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: apiextensions.k8s.io/v1 +{{- else }} +apiVersion: apiextensions.k8s.io/v1beta1 +{{- end }} +kind: CustomResourceDefinition +metadata: + name: nvwafsecurityrules.neuvector.com + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + group: neuvector.com + names: + kind: NvWafSecurityRule + listKind: NvWafSecurityRuleList + plural: nvwafsecurityrules + singular: nvwafsecurityrule + scope: Cluster +{{- if (semverCompare "<1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} + version: v1 +{{- end }} + versions: + - name: v1 + served: true + storage: true +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} + schema: + openAPIV3Schema: + properties: + spec: + properties: + sensor: + properties: + comment: + type: string + name: + type: string + rules: + items: + properties: + name: + type: string + patterns: + items: + properties: + context: + enum: + - url + - header + - body + - packet + type: string + key: + enum: + - pattern + type: string + op: + enum: + - regex + - '!regex' + type: string + value: + type: string + required: + - key + - op + - value + - context + type: object + type: array + required: + - name + - patterns + type: object + type: array + required: + - name + type: object + required: + - sensor + type: object + type: object +{{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: neuvector-svc-crd-webhook + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + ports: + - port: 443 + targetPort: 30443 + protocol: TCP + name: crd-webhook + type: {{ .Values.crdwebhook.type }} + selector: + app: neuvector-controller-pod +--- +# ClusterRole for NeuVector to operate CRD +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRole +metadata: + name: neuvector-binding-customresourcedefinition + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - update + - watch + - create + - get +--- +# ClusterRoleBinding for NeuVector to operate CRD +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRoleBinding +metadata: + name: neuvector-binding-customresourcedefinition + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: +{{- if not $oc3 }} + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- end }} + name: neuvector-binding-customresourcedefinition +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- if $oc3 }} +userNames: +- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }} +{{- end }} +--- +# ClusterRole for NeuVector to manager user-created network/process CRD rules +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRole +metadata: + name: neuvector-binding-nvsecurityrules + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - neuvector.com + resources: + - nvsecurityrules + - nvclustersecurityrules + verbs: + - list + - delete +--- +# ClusterRoleBinding for NeuVector to manager user-created network/process CRD rules +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRoleBinding +metadata: + name: neuvector-binding-nvsecurityrules + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: +{{- if not $oc3 }} + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- end }} + name: neuvector-binding-nvsecurityrules +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- if $oc3 }} +userNames: +- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }} +{{- end }} +--- +# ClusterRole for NeuVector to manager user-created admission control CRD rules +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRole +metadata: + name: neuvector-binding-nvadmissioncontrolsecurityrules + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - neuvector.com + resources: + - nvadmissioncontrolsecurityrules + verbs: + - list + - delete +--- +# ClusterRoleBinding for NeuVector to manager user-created admission control CRD rules +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRoleBinding +metadata: + name: neuvector-binding-nvadmissioncontrolsecurityrules + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: +{{- if not $oc3 }} + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- end }} + name: neuvector-binding-nvadmissioncontrolsecurityrules +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- if $oc3 }} +userNames: +- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }} +{{- end }} +--- +# ClusterRole for NeuVector to manager user-created waf CRD rules +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRole +metadata: + name: neuvector-binding-nvwafsecurityrules + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - neuvector.com + resources: + - nvwafsecurityrules + verbs: + - list + - delete +--- +# ClusterRoleBinding for NeuVector to manager user-created waf CRD rules +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: ClusterRoleBinding +metadata: + name: neuvector-binding-nvwafsecurityrules + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: +{{- if not $oc3 }} + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- end }} + name: neuvector-binding-nvwafsecurityrules +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- if $oc3 }} +userNames: +- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }} +{{- end }} +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/enforcer-daemonset.yaml b/charts/neuvector/neuvector/1.9.100/templates/enforcer-daemonset.yaml new file mode 100644 index 000000000..4ce4d879d --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/enforcer-daemonset.yaml @@ -0,0 +1,123 @@ +{{- if .Values.enforcer.enabled -}} +{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: DaemonSet +metadata: + name: neuvector-enforcer-pod + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: neuvector-enforcer-pod + template: + metadata: + labels: + app: neuvector-enforcer-pod + release: {{ .Release.Name }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + {{- if .Values.enforcer.tolerations }} + tolerations: +{{ toYaml .Values.enforcer.tolerations | indent 8 }} + {{- end }} + hostPID: true + {{- if .Values.enforcer.priorityClassName }} + priorityClassName: {{ .Values.enforcer.priorityClassName }} + {{- end }} + serviceAccountName: {{ .Values.serviceAccount }} + serviceAccount: {{ .Values.serviceAccount }} + containers: + - name: neuvector-enforcer-pod + {{ if eq .Values.registry "registry.neuvector.com" }} + {{ if .Values.oem }} + image: "{{ .Values.registry }}/{{ .Values.oem }}/enforcer:{{ .Values.tag }}" + {{- else }} + image: "{{ .Values.registry }}/enforcer:{{ .Values.tag }}" + {{- end }} + {{- else }} + {{ if .Values.enforcer.image.hash }} + image: "{{ .Values.registry }}/{{ .Values.enforcer.image.repository }}@{{ .Values.enforcer.image.hash }}" + {{- else }} + image: "{{ .Values.registry }}/{{ .Values.enforcer.image.repository }}:{{ .Values.tag }}" + {{- end }} + {{- end }} + securityContext: + privileged: true + resources: + {{- if .Values.enforcer.resources }} +{{ toYaml .Values.enforcer.resources | indent 12 }} + {{- else }} +{{ toYaml .Values.resources | indent 12 }} + {{- end }} + env: + - name: CLUSTER_JOIN_ADDR + value: neuvector-svc-controller.{{ .Release.Namespace }} + - name: CLUSTER_ADVERTISED_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: CLUSTER_BIND_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + volumeMounts: + {{- if .Values.containerd.enabled }} + - mountPath: /var/run/containerd/containerd.sock + {{- else if .Values.k3s.enabled }} + - mountPath: /var/run/containerd/containerd.sock + {{- else if .Values.bottlerocket.enabled }} + - mountPath: /var/run/containerd/containerd.sock + {{- else if .Values.crio.enabled }} + - mountPath: /var/run/crio/crio.sock + {{- else }} + - mountPath: /var/run/docker.sock + {{- end }} + name: runtime-sock + readOnly: true + - mountPath: /host/proc + name: proc-vol + readOnly: true + - mountPath: /host/cgroup + name: cgroup-vol + readOnly: true + - mountPath: /lib/modules + name: modules-vol + readOnly: true + terminationGracePeriodSeconds: 1200 + restartPolicy: Always + volumes: + - name: runtime-sock + hostPath: + {{- if .Values.containerd.enabled }} + path: {{ .Values.containerd.path }} + {{- else if .Values.crio.enabled }} + path: {{ .Values.crio.path }} + {{- else if .Values.k3s.enabled }} + path: {{ .Values.k3s.runtimePath }} + {{- else if .Values.bottlerocket.enabled }} + path: {{ .Values.bottlerocket.runtimePath }} + {{- else }} + path: {{ .Values.docker.path }} + {{- end }} + - name: proc-vol + hostPath: + path: /proc + - name: cgroup-vol + hostPath: + path: /sys/fs/cgroup + - name: modules-vol + hostPath: + path: /lib/modules +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/init-configmap.yaml b/charts/neuvector/neuvector/1.9.100/templates/init-configmap.yaml new file mode 100644 index 000000000..4d3b97129 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/init-configmap.yaml @@ -0,0 +1,13 @@ +{{- if .Values.controller.configmap.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: neuvector-init + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{ toYaml .Values.controller.configmap.data | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/neuvector/neuvector/1.9.100/templates/init-secret.yaml b/charts/neuvector/neuvector/1.9.100/templates/init-secret.yaml new file mode 100644 index 000000000..8a5081408 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/init-secret.yaml @@ -0,0 +1,15 @@ +{{- if .Values.controller.secret.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: neuvector-init + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{- range $key, $val := .Values.controller.secret.data }} + {{ $key }}: | {{ toYaml $val | b64enc | nindent 4 }} +{{- end }} +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/manager-deployment.yaml b/charts/neuvector/neuvector/1.9.100/templates/manager-deployment.yaml new file mode 100644 index 000000000..e6a6e7226 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/manager-deployment.yaml @@ -0,0 +1,93 @@ +{{- if .Values.manager.enabled -}} +{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Deployment +metadata: + name: neuvector-manager-pod + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: 1 + selector: + matchLabels: + app: neuvector-manager-pod + template: + metadata: + labels: + app: neuvector-manager-pod + release: {{ .Release.Name }} + spec: + {{- if .Values.manager.affinity }} + affinity: +{{ toYaml .Values.manager.affinity | indent 8 }} + {{- end }} + {{- if .Values.manager.tolerations }} + tolerations: +{{ toYaml .Values.manager.tolerations | indent 8 }} + {{- end }} + {{- if .Values.manager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.manager.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + {{- if .Values.manager.priorityClassName }} + priorityClassName: {{ .Values.manager.priorityClassName }} + {{- end }} + serviceAccountName: {{ .Values.serviceAccount }} + serviceAccount: {{ .Values.serviceAccount }} + containers: + - name: neuvector-manager-pod + {{ if eq .Values.registry "registry.neuvector.com" }} + {{ if .Values.oem }} + image: "{{ .Values.registry }}/{{ .Values.oem }}/manager:{{ .Values.tag }}" + {{- else }} + image: "{{ .Values.registry }}/manager:{{ .Values.tag }}" + {{- end }} + {{- else }} + {{ if .Values.manager.image.hash }} + image: "{{ .Values.registry }}/{{ .Values.manager.image.repository }}@{{ .Values.manager.image.hash }}" + {{- else }} + image: "{{ .Values.registry }}/{{ .Values.manager.image.repository }}:{{ .Values.tag }}" + {{- end }} + {{- end }} + env: + - name: CTRL_SERVER_IP + value: neuvector-svc-controller.{{ .Release.Namespace }} + {{- if not .Values.manager.env.ssl }} + - name: MANAGER_SSL + value: "off" + {{- end }} + volumeMounts: + {{- if .Values.manager.certificate.secret }} + - mountPath: /etc/neuvector/certs/ssl-cert.key + subPath: {{ .Values.manager.certificate.keyFile }} + name: cert + readOnly: true + - mountPath: /etc/neuvector/certs/ssl-cert.pem + subPath: {{ .Values.manager.certificate.pemFile }} + name: cert + readOnly: true + {{- end }} + resources: + {{- if .Values.manager.resources }} +{{ toYaml .Values.manager.resources | indent 12 }} + {{- else }} +{{ toYaml .Values.resources | indent 12 }} + {{- end }} + restartPolicy: Always + volumes: + {{- if .Values.manager.certificate.secret }} + - name: cert + secret: + secretName: {{ .Values.manager.certificate.secret }} + {{- end }} +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/manager-ingress.yaml b/charts/neuvector/neuvector/1.9.100/templates/manager-ingress.yaml new file mode 100644 index 000000000..456090208 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/manager-ingress.yaml @@ -0,0 +1,68 @@ +{{- if and .Values.manager.enabled .Values.manager.ingress.enabled -}} +{{- if (semverCompare ">=1.19-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: neuvector-webui-ingress + namespace: {{ .Release.Namespace }} +{{- with .Values.manager.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.manager.ingress.tls }} + tls: + - hosts: + - {{ .Values.manager.ingress.host }} +{{- if .Values.manager.ingress.secretName }} + secretName: {{ .Values.manager.ingress.secretName }} +{{- end }} +{{- end }} + rules: + - host: {{ .Values.manager.ingress.host }} + http: + paths: + - path: {{ .Values.manager.ingress.path }} + pathType: Prefix + backend: + service: + name: neuvector-service-webui + port: + number: 8443 +{{- else }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: neuvector-webui-ingress + namespace: {{ .Release.Namespace }} +{{- with .Values.manager.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.manager.ingress.tls }} + tls: + - hosts: + - {{ .Values.manager.ingress.host }} +{{- if .Values.manager.ingress.secretName }} + secretName: {{ .Values.manager.ingress.secretName }} +{{- end }} +{{- end }} + rules: + - host: {{ .Values.manager.ingress.host }} + http: + paths: + - path: {{ .Values.manager.ingress.path }} + backend: + serviceName: neuvector-service-webui + servicePort: 8443 +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/charts/neuvector/neuvector/1.9.100/templates/manager-route.yaml b/charts/neuvector/neuvector/1.9.100/templates/manager-route.yaml new file mode 100644 index 000000000..9723f28d5 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/manager-route.yaml @@ -0,0 +1,28 @@ +{{- if .Values.openshift -}} +{{- if .Values.manager.route.enabled }} +{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: route.openshift.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: Route +metadata: + name: neuvector-route-webui + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: +{{- if .Values.manager.route.host }} + host: {{ .Values.manager.route.host }} +{{- end }} + to: + kind: Service + name: neuvector-service-webui + port: + targetPort: manager + tls: + termination: {{ .Values.manager.route.termination }} +{{- end }} +{{- end -}} diff --git a/charts/neuvector/neuvector/1.9.100/templates/manager-service.yaml b/charts/neuvector/neuvector/1.9.100/templates/manager-service.yaml new file mode 100644 index 000000000..e18e55c35 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/manager-service.yaml @@ -0,0 +1,26 @@ +{{- if .Values.manager.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: neuvector-service-webui + namespace: {{ .Release.Namespace }} +{{- with .Values.manager.svc.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.manager.svc.type }} +{{- if and .Values.manager.svc.loadBalancerIP (eq .Values.manager.svc.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.manager.svc.loadBalancerIP }} +{{- end }} + ports: + - port: 8443 + name: manager + protocol: TCP + selector: + app: neuvector-manager-pod +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/psp.yaml b/charts/neuvector/neuvector/1.9.100/templates/psp.yaml new file mode 100644 index 000000000..c1d68857b --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/psp.yaml @@ -0,0 +1,77 @@ +{{- if .Values.psp -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: neuvector-binding-psp + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + labels: + chart: {{ template "neuvector.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + privileged: true + readOnlyRootFilesystem: false + allowPrivilegeEscalation: true + allowedCapabilities: + - SYS_ADMIN + - NET_ADMIN + - SYS_PTRACE + - IPC_LOCK + requiredDropCapabilities: + - ALL + volumes: + - '*' + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + hostIPC: true + hostPID: true + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: neuvector-binding-psp + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - policy + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - neuvector-binding-psp +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: neuvector-binding-psp + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: neuvector-binding-psp +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/pvc.yaml b/charts/neuvector/neuvector/1.9.100/templates/pvc.yaml new file mode 100644 index 000000000..1e976bbf6 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/pvc.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.controller.enabled .Values.controller.pvc.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: neuvector-data + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + accessModes: +{{ toYaml .Values.controller.pvc.accessModes | indent 4 }} + volumeMode: Filesystem +{{- if .Values.controller.pvc.storageClass }} + storageClassName: {{ .Values.controller.pvc.storageClass }} +{{- end }} + resources: + requests: +{{- if .Values.controller.pvc.capacity }} + storage: {{ .Values.controller.pvc.capacity }} +{{- else }} + storage: 1Gi +{{- end }} +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/rolebinding.yaml b/charts/neuvector/neuvector/1.9.100/templates/rolebinding.yaml new file mode 100644 index 000000000..eda38e210 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/rolebinding.yaml @@ -0,0 +1,31 @@ +{{- $oc4 := and .Values.openshift (semverCompare ">=1.12-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}} +{{- $oc3 := and .Values.openshift (not $oc4) (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) -}} +{{- if $oc3 }} +apiVersion: authorization.openshift.io/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} +apiVersion: v1 +{{- end }} +kind: RoleBinding +metadata: + name: neuvector-admin + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: +{{- if not $oc3 }} + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- end }} + name: admin +subjects: +- kind: ServiceAccount + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} +{{- if $oc3 }} +userNames: +- system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.serviceAccount }} +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/scanner-deployment.yaml b/charts/neuvector/neuvector/1.9.100/templates/scanner-deployment.yaml new file mode 100644 index 000000000..9120869a6 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/scanner-deployment.yaml @@ -0,0 +1,74 @@ +{{- if .Values.cve.scanner.enabled -}} +{{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: apps/v1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Deployment +metadata: + name: neuvector-scanner-pod + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + strategy: +{{ toYaml .Values.cve.scanner.strategy | indent 4 }} + replicas: {{ .Values.cve.scanner.replicas }} + selector: + matchLabels: + app: neuvector-scanner-pod + template: + metadata: + labels: + app: neuvector-scanner-pod + spec: + {{- if .Values.cve.scanner.affinity }} + affinity: +{{ toYaml .Values.cve.scanner.affinity | indent 8 }} + {{- end }} + {{- if .Values.cve.scanner.tolerations }} + tolerations: +{{ toYaml .Values.cve.scanner.tolerations | indent 8 }} + {{- end }} + {{- if .Values.cve.scanner.nodeSelector }} + nodeSelector: +{{ toYaml .Values.cve.scanner.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + {{- if .Values.cve.scanner.priorityClassName }} + priorityClassName: {{ .Values.cve.scanner.priorityClassName }} + {{- end }} + serviceAccountName: {{ .Values.serviceAccount }} + serviceAccount: {{ .Values.serviceAccount }} + containers: + - name: neuvector-scanner-pod + {{ if eq .Values.registry "registry.neuvector.com" }} + {{ if .Values.oem }} + image: "{{ .Values.registry }}/{{ .Values.oem }}/scanner:{{ .Values.cve.scanner.image.tag }}" + {{- else }} + image: "{{ .Values.registry }}/scanner:{{ .Values.cve.scanner.image.tag }}" + {{- end }} + {{- else }} + {{ if .Values.cve.scanner.image.hash }} + image: "{{ .Values.registry }}/{{ .Values.cve.scanner.image.repository }}@{{ .Values.cve.scanner.image.hash }}" + {{- else }} + image: "{{ .Values.registry }}/{{ .Values.cve.scanner.image.repository }}:{{ .Values.cve.scanner.image.tag }}" + {{- end }} + {{- end }} + imagePullPolicy: Always + env: + - name: CLUSTER_JOIN_ADDR + value: neuvector-svc-controller.{{ .Release.Namespace }} + {{- if .Values.cve.scanner.dockerPath }} + - name: SCANNER_DOCKER_URL + value: {{ .Values.cve.scanner.dockerPath }} + {{- end }} + resources: +{{ toYaml .Values.cve.scanner.resources | indent 12 }} + restartPolicy: Always +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/serviceaccount.yaml b/charts/neuvector/neuvector/1.9.100/templates/serviceaccount.yaml new file mode 100644 index 000000000..47da190a5 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if not .Values.openshift}} +{{- if ne .Values.serviceAccount "default"}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount }} + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end }} +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/templates/updater-cronjob.yaml b/charts/neuvector/neuvector/1.9.100/templates/updater-cronjob.yaml new file mode 100644 index 000000000..ce3c71758 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/templates/updater-cronjob.yaml @@ -0,0 +1,73 @@ +{{- if .Values.cve.updater.enabled -}} +{{- if (semverCompare ">=1.21-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: batch/v1 +{{- else if (semverCompare ">=1.8-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} +apiVersion: batch/v1beta1 +{{- else }} +apiVersion: batch/v2alpha1 +{{- end }} +kind: CronJob +metadata: + name: neuvector-updater-pod + namespace: {{ .Release.Namespace }} + labels: + chart: {{ template "neuvector.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + schedule: {{ .Values.cve.updater.schedule | quote }} + jobTemplate: + spec: + template: + metadata: + labels: + app: neuvector-updater-pod + release: {{ .Release.Name }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + {{- if .Values.cve.updater.priorityClassName }} + priorityClassName: {{ .Values.cve.updater.priorityClassName }} + {{- end }} + serviceAccountName: {{ .Values.serviceAccount }} + serviceAccount: {{ .Values.serviceAccount }} + containers: + - name: neuvector-updater-pod + {{ if eq .Values.registry "registry.neuvector.com" }} + {{ if .Values.oem }} + image: "{{ .Values.registry }}/{{ .Values.oem }}/updater:{{ .Values.cve.updater.image.tag }}" + {{- else }} + image: "{{ .Values.registry }}/updater:{{ .Values.cve.updater.image.tag }}" + {{- end }} + {{- else }} + {{ if .Values.cve.updater.image.hash }} + image: "{{ .Values.registry }}/{{ .Values.cve.updater.image.repository }}@{{ .Values.cve.updater.image.hash }}" + {{- else }} + image: "{{ .Values.registry }}/{{ .Values.cve.updater.image.repository }}:{{ .Values.cve.updater.image.tag }}" + {{- end }} + {{- end }} + imagePullPolicy: Always + {{- if .Values.cve.scanner.enabled }} + lifecycle: + postStart: + exec: + command: + - /bin/sh + - -c + {{- if (semverCompare ">=1.9-0" (substr 1 -1 .Capabilities.KubeVersion.GitVersion)) }} + {{- if .Values.cve.updater.secure }} + - /usr/bin/curl -v -X PATCH -H "Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type:application/strategic-merge-patch+json" -d '{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"'`date +%Y-%m-%dT%H:%M:%S%z`'"}}}}}' 'https://kubernetes.default/apis/apps/v1/namespaces/{{ .Release.Namespace }}/deployments/neuvector-scanner-pod' + {{- else }} + - /usr/bin/curl -kv -X PATCH -H "Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type:application/strategic-merge-patch+json" -d '{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"'`date +%Y-%m-%dT%H:%M:%S%z`'"}}}}}' 'https://kubernetes.default/apis/apps/v1/namespaces/{{ .Release.Namespace }}/deployments/neuvector-scanner-pod' + {{- end }} + {{- else }} + - /usr/bin/curl -kv -X PATCH -H "Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type:application/strategic-merge-patch+json" -d '{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"'`date +%Y-%m-%dT%H:%M:%S%z`'"}}}}}' 'https://kubernetes.default/apis/extensions/v1beta1/namespaces/{{ .Release.Namespace }}/deployments/neuvector-scanner-pod' + {{- end }} + {{- end }} + env: + - name: CLUSTER_JOIN_ADDR + value: neuvector-svc-controller.{{ .Release.Namespace }} + restartPolicy: Never +{{- end }} diff --git a/charts/neuvector/neuvector/1.9.100/values.yaml b/charts/neuvector/neuvector/1.9.100/values.yaml new file mode 100644 index 000000000..1106e0257 --- /dev/null +++ b/charts/neuvector/neuvector/1.9.100/values.yaml @@ -0,0 +1,292 @@ +# Default values for neuvector. +# This is a YAML-formatted file. +# Declare variables to be passed into the templates. + +openshift: false + +registry: registry.neuvector.com +tag: 4.4.4 +oem: +imagePullSecrets: +psp: false +serviceAccount: default + +controller: + # If false, controller will not be installed + enabled: true + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + image: + repository: neuvector/controller + hash: + replicas: 3 + disruptionbudget: 0 + schedulerName: + priorityClassName: + env: [] + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - neuvector-controller-pod + topologyKey: "kubernetes.io/hostname" + tolerations: [] + nodeSelector: {} + # key1: value1 + # key2: value2 + apisvc: + type: + annotations: {} + # OpenShift Route configuration + route: + enabled: false + termination: passthrough + host: + pvc: + enabled: false + accessModes: + - ReadWriteMany + storageClass: + capacity: + azureFileShare: + enabled: false + secretName: + shareName: + certificate: + secret: + keyFile: tls.key + pemFile: tls.pem + federation: + mastersvc: + type: + # Federation Master Ingress + ingress: + enabled: false + host: # MUST be set, if ingress is enabled + path: "/" # or this could be "/api", but might need "rewrite-target" annotation + annotations: + ingress.kubernetes.io/protocol: https + # ingress.kubernetes.io/rewrite-target: / + tls: false + secretName: + # OpenShift Route configuration + route: + enabled: false + termination: passthrough + host: + managedsvc: + type: + # Federation Managed Ingress + ingress: + enabled: false + host: # MUST be set, if ingress is enabled + path: "/" # or this could be "/api", but might need "rewrite-target" annotation + annotations: + ingress.kubernetes.io/protocol: https + # ingress.kubernetes.io/rewrite-target: / + tls: false + secretName: + # OpenShift Route configuration + route: + enabled: false + termination: passthrough + host: + ingress: + enabled: false + host: # MUST be set, if ingress is enabled + path: "/" # or this could be "/api", but might need "rewrite-target" annotation + annotations: + ingress.kubernetes.io/protocol: https + # ingress.kubernetes.io/rewrite-target: / + tls: false + secretName: + resources: {} + # limits: + # cpu: 400m + # memory: 2792Mi + # requests: + # cpu: 100m + # memory: 2280Mi + configmap: + enabled: false + data: + # eulainitcfg.yaml: | + # ... + # ldapinitcfg.yaml: | + # ... + # oidcinitcfg.yaml: | + # ... + # samlinitcfg.yaml: | + # ... + # sysinitcfg.yaml: | + # ... + # userinitcfg.yaml: | + # ... + secret: + # NOTE: files defined here have preferrence over the ones defined in the configmap section + enabled: false + data: {} + # eulainitcfg.yaml: + # license_key: 0Bca63Iy2FiXGqjk... + # ... + # ldapinitcfg.yaml: + # directory: OpenLDAP + # ... + # oidcinitcfg.yaml: + # Issuer: https://... + # ... + # samlinitcfg.yaml: + # ... + # sysinitcfg.yaml: + # ... + # userinitcfg.yaml: + # ... + +enforcer: + # If false, enforcer will not be installed + enabled: true + image: + repository: neuvector/enforcer + hash: + priorityClassName: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + resources: {} + # limits: + # cpu: 400m + # memory: 2792Mi + # requests: + # cpu: 100m + # memory: 2280Mi + +manager: + # If false, manager will not be installed + enabled: true + image: + repository: neuvector/manager + hash: + priorityClassName: + env: + ssl: true + svc: + type: NodePort + loadBalancerIP: + annotations: {} + # azure + # service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "apps-subnet" + # OpenShift Route configuration + route: + enabled: true + termination: passthrough + host: + certificate: + secret: + keyFile: tls.key + pemFile: tls.pem + ingress: + enabled: false + host: # MUST be set, if ingress is enabled + path: "/" + annotations: {} + # kubernetes.io/ingress.class: my-nginx + # nginx.ingress.kubernetes.io/whitelist-source-range: "1.1.1.1" + # nginx.ingress.kubernetes.io/rewrite-target: / + # nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + # only for end-to-end tls conf - ingress-nginx accepts backend self-signed cert + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + tls: false + secretName: # my-tls-secret + resources: {} + # limits: + # cpu: 400m + # memory: 2792Mi + # requests: + # cpu: 100m + # memory: 2280Mi + affinity: {} + tolerations: [] + nodeSelector: {} + # key1: value1 + # key2: value2 + +cve: + updater: + # If false, cve updater will not be installed + enabled: true + secure: false + image: + repository: neuvector/updater + tag: latest + hash: + schedule: "0 0 * * *" + priorityClassName: + scanner: + enabled: true + replicas: 3 + dockerPath: "" + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + image: + repository: neuvector/scanner + tag: latest + hash: + priorityClassName: + resources: {} + # limits: + # cpu: 400m + # memory: 2792Mi + # requests: + # cpu: 100m + # memory: 2280Mi + affinity: {} + tolerations: [] + nodeSelector: {} + # key1: value1 + # key2: value2 +docker: + path: /var/run/docker.sock + +resources: {} + # limits: + # cpu: 400m + # memory: 2792Mi + # requests: + # cpu: 100m + # memory: 2280Mi + +k3s: + enabled: false + runtimePath: /run/k3s/containerd/containerd.sock + +bottlerocket: + enabled: false + runtimePath: /run/dockershim.sock + +containerd: + enabled: false + path: /var/run/containerd/containerd.sock + +crio: + enabled: false + path: /var/run/crio/crio.sock + +admissionwebhook: + type: ClusterIP + +crdwebhook: + enabled: true + type: ClusterIP diff --git a/index.yaml b/index.yaml index 63b757153..cef0e2341 100755 --- a/index.yaml +++ b/index.yaml @@ -2326,6 +2326,27 @@ entries: - assets/nats/nats-0.10.0.tgz version: 0.10.0 neuvector: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: NeuVector + catalog.cattle.io/release-name: neuvector + apiVersion: v1 + appVersion: 4.4.4 + created: "2022-02-23T16:15:47.730445764-08:00" + description: Helm chart for NeuVector's core services + digest: 3acc84eae24466ea0e60c6044059173c693aeeb049484bbcb8730874efef589f + home: https://neuvector.com + icon: https://avatars2.githubusercontent.com/u/19367275?s=200&v=4 + keywords: + - security + kubeVersion: '>=1.13.0-0' + maintainers: + - email: support@neuvector.com + name: becitsthere + name: neuvector + urls: + - assets/neuvector/neuvector-1.9.100.tgz + version: 1.9.100 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: NeuVector From 23424c071c452691988d1bbc20e605ec61266a51 Mon Sep 17 00:00:00 2001 From: David Marchant Date: Thu, 24 Feb 2022 15:12:41 +0000 Subject: [PATCH 12/13] Update ondat-operator package dir --- .../ondat-operator/generated-changes/patch/Chart.yaml.patch | 4 ++-- packages/ondat-operator/package.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/ondat-operator/generated-changes/patch/Chart.yaml.patch b/packages/ondat-operator/generated-changes/patch/Chart.yaml.patch index 1768a065f..328d56589 100644 --- a/packages/ondat-operator/generated-changes/patch/Chart.yaml.patch +++ b/packages/ondat-operator/generated-changes/patch/Chart.yaml.patch @@ -3,8 +3,8 @@ @@ -19,3 +19,8 @@ sources: - https://github.com/ondat - version: 0.5.2 -+kubeVersion: "1.18 - 1.22" + version: 0.5.4 ++kubeVersion: ">= 1.19" +annotations: + catalog.cattle.io/certified: partner # Enables the "partner" badge in the UI for easier identification + catalog.cattle.io/release-name: ondat-operator # Your chart's name in kebab-case, this is used for deployment diff --git a/packages/ondat-operator/package.yaml b/packages/ondat-operator/package.yaml index 8519de4ab..d73db6621 100644 --- a/packages/ondat-operator/package.yaml +++ b/packages/ondat-operator/package.yaml @@ -1,2 +1,2 @@ -url: https://github.com/ondat/charts/releases/download/ondat-operator-0.5.2/ondat-operator-0.5.2.tgz +url: https://github.com/ondat/charts/releases/download/ondat-operator-0.5.4/ondat-operator-0.5.4.tgz packageVersion: 00 From fffb72a0658809063746d2dc465812d22c80905c Mon Sep 17 00:00:00 2001 From: David Marchant Date: Thu, 24 Feb 2022 16:43:30 +0000 Subject: [PATCH 13/13] Result of 'make chart' --- .../ondat-operator/ondat-operator-0.5.400.tgz | Bin 0 -> 19382 bytes .../ondat-operator/0.5.400/Chart.yaml | 26 + .../ondat-operator/0.5.400/LICENSE | 21 + .../ondat-operator/0.5.400/README.md | 271 ++++++ .../ondat-operator/0.5.400/app-readme.md | 75 ++ .../ondat-operator/0.5.400/ci/std-values.yaml | 5 + .../0.5.400/crds/storageoscluster_crd.yaml | 424 +++++++++ .../ondat-operator/0.5.400/questions.yml | 177 ++++ .../0.5.400/templates/NOTES.txt | 51 ++ .../0.5.400/templates/_helpers.tpl | 67 ++ .../0.5.400/templates/cleanup.yaml | 315 +++++++ .../0.5.400/templates/config-maps.yaml | 75 ++ .../0.5.400/templates/namespaces.yaml | 22 + .../0.5.400/templates/operator.yaml | 87 ++ .../ondat-operator/0.5.400/templates/psp.yaml | 29 + .../0.5.400/templates/rbac.yaml | 840 ++++++++++++++++++ .../0.5.400/templates/secrets.yaml | 19 + .../0.5.400/templates/service-account.yaml | 13 + .../0.5.400/templates/services.yaml | 42 + .../templates/storageoscluster_cr.yaml | 52 ++ .../validating-webhook-configuration.yaml | 31 + .../ondat-operator/0.5.400/values.yaml | 144 +++ index.yaml | 30 + 23 files changed, 2816 insertions(+) create mode 100644 assets/ondat-operator/ondat-operator-0.5.400.tgz create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/Chart.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/LICENSE create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/README.md create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/app-readme.md create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/ci/std-values.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/crds/storageoscluster_crd.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/questions.yml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/NOTES.txt create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/_helpers.tpl create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/cleanup.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/config-maps.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/namespaces.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/operator.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/psp.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/rbac.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/secrets.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/service-account.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/services.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/storageoscluster_cr.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/templates/validating-webhook-configuration.yaml create mode 100644 charts/ondat-operator/ondat-operator/0.5.400/values.yaml diff --git a/assets/ondat-operator/ondat-operator-0.5.400.tgz b/assets/ondat-operator/ondat-operator-0.5.400.tgz new file mode 100644 index 0000000000000000000000000000000000000000..7bda536b890df5fdd07550f5283d90d4aae25ba0 GIT binary patch literal 19382 zcmV)hK%>7OiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ%cHB0yFgm~a6gchqw(U%ly7`jnon$^mYCGDoB`ryI_G~A| zLX+rjj7We(fGv08eCxc!dBgK0XH@~vdP|5w*CD)qO|x)Hg)U2H zJe<-j3a;dghH9}|GEF|HvN#nJ7E2YREaNj7(@dr^k7Y|m$}%Z9%jDjk*E(TJ=2^(F z=MI+>uBZ7Z2xVfh?akbvw}Yp_lkIKqbJPC)^QS*G`2UQ?Ia7DM0j%Kv&!6u;Yx4iS z=TGkW|EG8!kV7`6d8~<Gexu{BSuoH6pIKKT9UcUGoo3N##A#Ec<{x6)30&cT#_5y>^(JT>%WD>$W)eu|m?lgO z$ipulZUV!SLj}fwJd9*`#WMHJ!E*>L0gjkn3tpBB%@bAv^d1!Zwy%&DcnCtpm``#f z1(1no!ZgcN;JKneMJPKMa>`gL71uJGSEzJLmckJsnobJI27ae`99xPdN8@9uPcx>N z(9oeU&Xs2C^v9NjUI3Q0T{y5jVd)S)CVI+>5fU|(c^na%Yw5e9Q%25BpHI$R(?N3$ zVgrCfA`QrhrLmkROz2>7ftJNpN<-`p7fRDOZf_1bNPv|bFOq30fw#a&rVJY}uw;Em zeR4hJ;S{Q5Y|JvOf{?Dco|53q2%$mI8QWX@yV_-_a?Ug~h-?FJH<$Uth{XH~hmb2q zC`mKsvkzQpE+!?U0$;}^>~_Q{7G^L}Ms5WlB|7#`j{r=>gyu9>fDu#tjE#>QJdmc` z0C(J-MhO>$8YT)=@x{O{nKz1M&|4S?&Gi(T2py@^<^(zcxC^jmZtFE>7eJm;r9eL3 z0Hm4TBv%@?52^`Fj<)c=Bp}<;5J;QEQcO$_QrpbH3|CEQ#-c-B5c7y3sfBCyGA#=jMfE<>^OT1)j^}2%^|od-+lu*U%a+|r z<9xz}sY zfkm%o-S|X7p9%o@l*K9G zNt(%-CFx8?c?d*CH6S;gP{x2YLz;FZ7c6E8 z(=cyF&a(X5Fl)!|Yy$@|RZeO&jMB9KM&Za&@FY}(Q^2z%C{K_GRu%NX!U}VxW#SkS zf`pY)`FXs^sf>zmmqyB8Q$Z(Y$L!v@Y0CY?6tv~c_PrDA22b%#sQBBR;Rg-OG2Yjj zhEtZk-D?B;&z#Od9=ZBbg^F*5iu=@7r~%&#cC2hr1Adyx83(O;Ri^=PS{AOs-VJtw z9Y3WyoU&-KjWeeB&+YJAdY_rn4b0R!|BBC;U`m~4a>UjE8{6_JlyCHWw+a{*{9(pt zEIVTp&?D%&y@s{#M_AR*T-^mEnWnhlx(#dv+fZZ?#darywhkOeg!NNr9mp|}F0EM6 zCv--6jQnAo$pp$=0w&+(qn{o*`F@&@0wWot@g>_T`vtJK9-EyL{$5TqP@*vk zC!oTowH6B^wr#a+2iw7}*}`R}X}lIRQ?z6CZCecI=_I2OdzsTLS`N;Z^e0fXrFWj; zSYrtc2UrnTxCI;AV$3KC6nRQ=VY(C*GHbETk?ScF-UB05yRC;Sr-p)*&Zx-KL4zBC zm=Nl?Z2%*#1x@&tEmH0HPO$T3u)Bd}tzrKuRP?RmlZ)Ybpg-u_+Za~Ze|Dcg-EP=_ zo<7^#zPJB;iszSKd=QRg2|6n$gmY0=*l zQ7K31+sjiByn*BW^2>8moQac{>3)Fvz1zwYf@f3XRUHAXv+@waoTlWHYOzftZ~Z z>eZC$ntsrsaCwVrJo%O?_Us8^B7{8$+F8;DAnTe6ZFeOPcwqa42VVUEXUDtZpk){} z!b_N@Ab1X1Y%=!m!w=&rkG#DyzL}Acp><qGdnjn8vUaMoR@#6;h$!c< zLHaGTX?yBdr>=EIRm#G_Dl5iv5x!HuC0~I~>oXChk_-Kf=N+@_cXYta}ff&2*?YWc~C#g@r5E%03F72SlO@=#u)>A&R7f^ssqv|G9z;2SO6uV zOcIuboW@>JBt=6MQH&v^NQQaBgt0dhs=e9?WqoeYf?JT+7J>}fmSdYD0kBgD$pDQBV;(bLrC$Sk0VQ-m zzS#QpR}V^g!w)I!3c8`ghv+im*>XOK*q94OdL@xU=^pXR?`V+>jIRk=Kb;@|B~NB7 z%Xq}dN20S_gyh*C{>hW`d_3kKNY5{U0d>RQriTSDc5RIFIG%&P7o!UUtXAxM;Qhc1 zbS$r-4H$g3LIv60uzK)19?|5Sir;1SQyzT80VDzsoCSpzSiDu+`%;CLXS!Be<}Zesa^w`X?GJ!>-xJ!ft9I+C1X7wU|Ml{ zEv=YRDAg>TVl3C|98eYC=5M=+laHV_#vo7k{?a48_r061sphJ;r?T!u!3l*+Mo~Y< zJ?eF3Eo`M z7QB7AKHhG;|98rN)+}`wu0YG>zn$mX+dB*L-}b%#=O=l*2gGV}XEJ8}>W^=^h)`h> z5jn51KSic=#>iC4D<7yCPe#25<^`V1FzYn}%Qc}_DB&=w@OQ?t+~}y+QyJs=Y@QM} zV}j5zXr|PAfQuy0Sl=uiqtHS%c(2UqeR0lSsJbl6WEOZ2$ahj+nFAt5Y)3|HEHma~ z7o7!&Fro5{DMH1ZNIeCejg7UORSme%nM@`m&De~~ToLHTglT0Ph|t1|Z|Q_`5m>A7 zf5}nR3Jl_%klJY6rk-|3b7IDn<`XpePnjD#06OO)WHo)h(B_>KQMhk_SPCsTQ*>3z z=o3c6srSHc-$`-G4s$%?QBFZnb{*q_D3;O;SnxmONOc=@s#LMmuw&Ut-4p=)bHN3J z;C8&=l+~NEI0@8r3udr4Ku-(P-^I^$8te^758tXh3>k}9)UT3mzm+t4El{r+pG-A? z+v$0p@B4MMWFr~|G}lv^@t+Y$aP=ixq6&mnR@X9Y4N7eN?(vZ2F~hTXdsv(Dmzm5{ z(xYjrdU#S2QI<%dV9A)}>(t1UWeHa(s^Zc`Y6O)G79Cp6xPGXB947`Q`=FVC@~Rh* zW2p%&U)Y+l9K|(P_(hG_l+L)!`rd;=twZOevOCW9wXl|;tQ~Gf(3 z4frmVk@5(L{3H7&=1MQtEr+rQqwJyiS$I=5AlrFgfDIjAgsL>vY@Ej(a2w68(M%`7 zFSeb@n5p8gk=a?y3oAL5B`Pl}1=r}0T|b?ju;0U&D#g}DB`;^H6SA@Rx|b+{a_1?! zMk8%>WEBppUn%=8%vkX^ezzz?_l$(5n_t+s43seJSg7HSNN=-v)nchdECNmwCgY`h#ziyIFqsA0(j+5K(x@!$h;Xyh#5 zp9g2P=^p`slvDj*a?~=nrTXqS>CU04_j?f=Hm8HVUUAIfbSu*__ZI(NFpad+3Oc&T zGEB(4@i#-Hd5nKD>47?2&aP4Uj2l8=-3x-!^?>~P>tKxvnoh{Czm{!&^@2oiA!XSDX;n%2wvoy=o$19oS8ZK9 zMl`&VHlnI<`3Q>fr6*3C+NArX!lCg@g;zTOk5F(~@W}Fdk zqu>-VyHMh{Fe2QJQ2$k|2%TvBo#;Gknh-i>xh<J1F>bJx+V^bIn=R1*Z5hqfx`) zV77v-7VKWL5lr{;(rtzA#5nP}51{X#&-!%xe;|qbAfnvX2V`abmnY5qFVFY3@8f?y z$3QSqXHA*Pe5_SSiq0Y=f>q&|W7NyDpvIl2WL<`naE9#Wt z=2h`%bzvhKd4!w|U+rHE58ofX-hVlKzkho4{`LOx{>$N+k;yh{)<|XRHr}%XwAR*j zM%xh*zsAtJYIfk^;QZ+A&imniUkuNV_g}r=zqr^x_-=Ui{_WmUw5-v;r8rvN%iAKX z66U`t#;R4mnKW~dPo3~(7gt=<#A?s{-e)K<^AgZNT z>m#Y&x0@oUmVaSg^wcXYMb4|Ex5MM%`8iP1x5L$_>Fm#H#I*J5rf6x&9kA##(jCJ@I$lTY z=xBN6SZ^(O)Uu1LG3a{vj$xmkoL%g{T5I5)?JpgATQ6=f_-ZPYHO5{m-ZAtyr!UX; z4~OqxzS%!JTsrcu#+MGfr3;O5|G{;Xj6tiBZ}7FIrqZqdciMjnu(!_!yej|u?rt;w z=lRa_-Fy4br+Ah+c-Cmhdq57Qc&LjcAXEdj;GdOjp+W(?T(OKysVWZlIWryv3aDRT zP>syUNb0GLZZQ$k&e5PHr^o}cUp->tnp|UsM+~R?4-J9=xSMdKQ&!08#|9JNN{{D|o^HdocO;c5#vL6=CM(u2^jZIYI?@Q@r z^?~~zlm)uO`;4VA52+fE9n6MgQqUT7BvgmfR~7thgrQAvv@Vubj=b)nP^BF_X{n86 z!_jOup7lqx#%xwSE!w;%>b})J@qKeP_I2A)TDM<=PqymWEeT;_R{d@k;OWWX`{VuB z!}HVqgJH!h0P_mJ$YfG=l^`Z_jrBZQcz;UubWluGP)%E2^q>yXc|g3T=>pJ|3X6Fv z-Xl&xDz+T_)~zx&$~ApO@a<_OgMN3_GS7aM#^Sd8wD3%|PG2%zA8(4-+cL+PKi5Cu z(PMFrX}c%5<}^NJF`b{YP>Ki^{?kf{lw}-mt}x$rx4k-Un*@?E8u5)g47KJr?eCKA z?rc{&R`F6Sv1_t-R~yEJ-@8w@liJ%N@%Q%jYhL+M<_lkdvC{!!rv}1Z3~?ZZW*=(k z!Q9fP8K3c(P1sO{G&bqGi|aznWA1f&jjo>6b&wmizH2!F<;wSG3Dp`T{*!+bE?4tBPKCW$vn6<>iuRf7*1R(xt`w>mTj z40GpsE2cin#lAY0;!MiA3qYG&UB`aamg1Tp&5U%xK6?I~IP=zpwM~{U7kj+!Zp6!y!q&0CkZyz#46WLCt1mS?r5hd?Tz`DwGo zhcV^JvQJg8%@V2zlHIbwo#l&4HEY8JaPJfJECC9UQEPrF9eG?(V)Vn!0eaW>2b%%y#IO)OV*5(#z42c8$F)+f zeZgUh?~UmARWlz-nMKz6(m80{SAP3-5vpSDyYa8MxsOb_pDr4nrZnW5uh-9_e;r*h zYJ4kyh`7pfykUQoN0S9CuZfqAb~i{x(4JnPB@e+u3HPbeYxsV1!YytTmMIfz%E!9i zV{^B$f#7V&QCq42112;^$WRJkj(j0q#PU8?=qy(^$gZ$RVZd;>7|Ys=rS#X5qrtH$ zbB$rSeiz<;s;&HXxf6Z|9jePhmA-Xx_>CLbY(u%jL^}U=ib%KGHK^Vj`Y)-k{035Q zRnPhKLu=8i?rvnW#q>XwVRdXUt(rkqyKCv+s;#z5+qhe3ci6d3{dF1qyHbO0Aj`C8 z19D35rzh)*LjCy~zpaw;udF)$F4V=F>V~VD`P^FZ-6+AoCw1^PY0%c?QLEZ$li4@p ziuY3Cjikcb1=(F<;mvr@y0mfAMZY2K-7xj zMN2jy!;m`(_Gac4+ddbQPQYo#CEDiKlC~>NR*0lM&+a|JtW7Y+HN5Clr&yfpDHGcI z9du5qmRC$``?ZL);*6YCu$wcZ#JE^me!`o8>c0_ zCyP5Y$o!^sQETeX2&4FZPYHKxD)~)mq0!hqHT>SFp-p%BO(~+K!TZhQ_ems8edV{H zi$)utfD|@~4YXkml_6ksgUP{^%Bb*pvmS@RZ*>({s!~5!?02Zm+&+S^MGq$Ls&{Pt zR8vyPyIv|Ihj_$0XQq(RQOUbru2`*B#e;GEKutIMsNl9tc5w+FEDTU|YgczQ6ye)Y z#yS@g{aX`A$#~Y}Jguy!8)qfg0Sf8hJ2%&Bhk)HKrW4`+gefvm8&z>+5l8@w{xzx5VDOKuPW>E(K_qhweJ)iyef0pon zq*vcbWw+P|R`CDrXS=&~{=d7u^K|E)|9^_7!T;;BS!Elz*@e=-&8f=2;g;~(IPI$S ze-{GaiuJ#{{j{0?>Dlw`C->|BQ#{Mo^(VIC{VsE~-r9=hvMm*v>->eRbZI?xou=7@ zX_L>+6{;tN{Y>~@unm7^g8AK$Lgf~|81^IwuUW`cwIe@>sZ!ldHVPsB%8Qy@@vAS@Ay+z+26XYW2D4G9dRxW< zz@q&r%~*8EGuvnPv%RzR!u1BOX5Z5Ain+|>)Lpmx%P*LHnP)5_y{j4CCJwc10g~iO zlMy3|!QR_wu>cyl;&zX+P+GfLR&s4s@4o6Ttz%`3l$FHvu^Q?yI$udTOQEz~!L=rU ziU!-(m+mF78eV<+Dvqj)X+s^*qEI7MX!o#$jM%vqL@rpC1Xs*1kLDLE_xa}jg5if0 zrrYE_b?J$Ha>d$LV0Sa_DSRZzQT4umOnzT!0^?fNX#guOUNzQ@2`yvhVv=3w%6G-+ zyGGbny*{qq@Si!2T~d4O-zrFM5Vj5*kMTC^-myHlo+QoT-P?En@K2}ww|G0aUkjPv zKp0$g{`X{euOa{KJ>9;)|Lv1JwfldT*lkaacdY2ogIRF7A2<&=_+jv_Z2$nKjP3oo)@g#Jau}rlh?hq&O*T zDuW)uYxnYuxjR-~P#zaZu%+C{WwsdTX*`*19B4IB_+o4yy0TQ!QXs}{nAHylXT$xA zp`F#*^&{;;hl)pRh+Cx^kYg#>A8RXY@c&mw2gBp@;oY{e+Wy}-|J&Ky-M&Bn`y|in zqYLtihfFBuc?U9`XM8f%~8Pwma4tyowCewD#8_TL^hg}Nk)ZcQJ;)6#)upv znZm_ZSzwH%&nmg*frNvnQPzxI^!Y0=vxN}=!5%=R}q;O@DfrO zy7IInxnh0nXP+c8;$!%i;jq$t6mvE0lL&T5KFT%g69q33hkY2w7CJS?u@}meGevN$ zW$*gtmQxA1Nw~HsQt;+_DwFz1xbnt%Cb*h1tP@G1WZ#7zI$z4MjAMBXLkp#dxMhgm z#gq{`k~4;5F*7TqMrTXBXq79|2{WtqlbX^v1|{Blj6{SBLMy|`pn0XK(459-y2cA9 z8p8}c@8Y{5IX`)E@x%VvkQ|+p)3cMeM~A~h(%U~LN9Vmh`Qhl|yOTE;|LN3Z+jaR2rG z%OTb}x%h5)=0P#jJMzPKLwE)4?H`lFbe0w6gY?!P)Zet8a+VTi79 z@W zlyJBLfg_fALr+vQE7L3Q*d+YOQ9G0Vu5Po_jq zGnr4OX|DSume&L{S`p4YzO>p8*d>%m5iegRC}|AWeX@gbkatHy|;9vJT@= zeJ^23LC^k~vz#dd0B>9;Rz&c_jB6g!xJZjbl#FxS|0@IVPBV$(2{WV)t z8vT%Q&0d3i=!S|oht7slgn5=Rq2sw{QY-LL%*bd?l04QtjTxb7aoL0ljF{7)VlI@X zajb9%CZuN_jshNuB@7v;471_bDfi9nyXnMvrxm#{ z@=__|KDnOqa0-;Dr%Vt!qdcY~69)r=mWz!^STX|q%s3^KOkz2rajCKgp7++G3Z>-& z3Jo)<5Ox7cNy5j3iYlH;N+X!cJ&`jbP*P!L7zR$b#!LHolCTKmaAGC^%4N(qqbFP| zB%0}Xtc zsZYj?>KvFhbk1?kG^Rp1j!G1bnfl+ZnZWZNJRslk$rPuPMdXDA;dx)+d$Mn+kjF4+ zzjLTus2q*TWG-{V>5hszQ(e! z1k`V zc39+S8kUMcn=D|AYD$J8%;tDS1M$g7>M5vd21VL5WprSua30diTt!^b$s}VF)V5&< zVM67KBn&7*NCNZ}vr5J!f~m5J9AhUdb9Vu(w7?#e^v#?A)I7ochZqAR%)ek(qbxUG zDdS>D8H>y!Aifz%N?|h~T*O>3WIQC4i91jo41G#cdj%auVJ%lfe zP(`j;9K*i|!!AW4n7)?T6&XbYh5?mE7oWV6fT@hp#0*&0YEZmMgOJlq0#uf(nnczB zXI8n(wMWK?tb%idG-f1E(YAnp!vb*AXZBfH+s_teBp^S?>`ED?3z7vF|k3FJA+WG*5G%KIgb&FGE{9v!0#jh^?_|K=jW{na2@1 z0`6(J>^Nh>@>#6-H;~H6h=D!=625nhD-e_?o}e-K8q@{Y5d3dE@AC2zb_5wS?*ZA5 zU`4}BA}ACXWZ?Z7W-!4YKr6P08@ef44BjioQ})TQ`HI;5XPn_s$2J*T`Hj?VAFgdn zY9yKargi-PR=qI!(HtrHUC@dGifPj60QL1(Oox$=d4PEO&0u%$`CnUWqAU=&fW5E3 za({gTShXwVn#Xa`dtK;61+AC55xJ~)_Rz z2z0#&MKmHj>`z)|bL+7K5dDV*G>%1IXqO0p1|G z5X`0ToAQ^~T!0H0s(O~9221<|Tj<&&h7?T6GBmYG%M~`!Sdq6|h!!PZnSXs4t!eQa z^2HYyJpJN}0eQuhMsdwL1Sl($U1C3C`0DRpA}2+pnggxHeI=nH2c^Q0Fzz5`f(jXp z;u#7<{if+YYwzAG&aX*IGnz1<^T0E^=nOX7sJ;$WALZ+W&dD`bQ&?1%HPds2%T5BJVyVe&%!&kuk7xfTmv`%uT=Ws?*y z7ibZvB{O16!744{%_*#RS@XQUBxBCvC}>A{ujx_Vvs2J(_N3P~9qa>5(zxQEveA@u z_>wP9au=8DA(u1mIRX3~s99LJW}?*)S)Xy^d{9<=xU(HR*$sA{2D^J$qHka92D`hp z7kk0ZGko!oJRpCUIc{3OH-;Anht3{-bgF_@AN4AQ;Y@?1=R`eFUn$CSAUvRLQV`gW zW;90CBxNBVJ0lMJ04j^o1C5S+?KvLXCz^*>%oq*VBB2q*IJ^%guCgs5Oe^Xe*)cvOM5=&~yg!9f{Zd z9PN%APFZ+W@)5j$pii<~3>rI@lQt(zFZdhPwyEW`dA$F6NE#3H{qOkK`NjUln{)VU z|K-qISOtWTy{$c0D;LO<33-wjBG|Xw+~m-Py$+2)sD|*8S%Rb%%L#}^RKXsk-4TBw zQlb-rIfF2=)Qq!hr}L;V<0H2ln4&=kvTR|th_S`7ZGb~u1MXRX%?JJ)BFcabT+CBu@XM%@qG)D+!s@g30@U{Q=e0R+@jyKBedDn17k|2+ zwJHlQ>Ysey_XzocW@gF{jA(m~f!I zqq=*99D^VnlZ6)co~nAM+FICK*+Ow1L#B+zdOA0R3u0Hz*%ZaAiWwcm5ruM^JrQxG zK};#7sKQmH8Mjne6O8m@^$DI88ikdhJ@3>3BOl2jQm-+?!Y@NdzI(_=;`=SXyi13Y zfG2dq0?>LD2fgT$e5_!o>{W4wgB3_wfqW#Tg0ofXb>P_b4_?q!KH)yNG^}Y$5NkWOfjJ^z01zrf&l$!OmvpBz7uEv10~hdW))HmXt_iI zu@y2ELK>T-!oK4+H^;nnl}}chX-#MMkzD@r>*b;VTj?sw#1-*-QAhYlE}>`zj6iYe zBbsd;iI#k?yr`r$*^TZN&csJ@2@PMi!Kg*MttOI6K%$Nn30bDw`?eTGS6?zWZJ{l4%_6|T#M3|* z%}+GT60=}Ht_EI%lCoLlOW@)@1dt6{Zv(p?i`M8Z)G7;_HL~Mq#1QvV>3b$ zU4-M}2_V`!N3g-iFj^R+I~}R89JdE_#pgCDs&?F_B{pCbnJzAXdz%Vq%3qtr-JP2_ zta)2z={~*;_QLMH3@C2k3(!Kv-|h^p>%_isXUX2~HM)Ipene(F#FZniAers0)8(#4 zZ2wn7U!(7*&fc_u^CtL8$&EW-ZKb34i{RJj{TWmI=aoITr8e%hgWl2OI@l$>eZ^-? zFr`j2Ickch7N{#iu*}*$twXN&vlZl$ULMPc9cFySvNIdE)`3?jNK8pm6kWT^ZOzm> zz6^eQ|GD70lViZg%~%D3TVN8caN7F=8%Sq=j%z2J>+}b~E%c`qj_D6*c!wauG!5%c zLGy~Qz^~UZR~owmxyG|ha~l^LDHm7<8;y z=}qJ{9VMOXV%1xiSFp9F5HW* zDE$Ya^+NTjzi2&j9-faq@0eZ3^JNAQ<%ojNii}nz0-4o?EXz?)4TAYbIWJ-}7DGQt zkq`wrUp4znlCUh~H1>)jR&Z2|A*6cl7plD_y#EDk}JbtbEU_T&-mT@{}?^65$*FQMw zyQrb6!-*_yqw##vhx>D5;E(+@*MAnZrhbopyZfJ>ZEx>BYu^8~`|RHS`$?W!{?C+` zcgZeZvx>6LnU!n~L2Bm<89yrybc5=p5;4pC2@{yS6XfxTWeB_oz1R+(1Us9h3@sgb z$p)!G8@V!__|wQx=|Tf0<_6VfN=o5w$#wUqE_jl{rTv)+|nPT zflpGyUhv=N6o(B0+e(- z8_7ilf{WCA?SeE1q@rOtMh1#UF1Vg=6^9ctQ(LI9Y$-nRX%&VetV~>^Da;U{m>k zy!Fsr-F%vbZFoxg+Hv)cIccuE^OS|PB@i)%H(%iSU0dX44R-q)geO$I`Kr;CgY%k`b2hwPANH>+Lop|XcCuVut;1e&BkF zq9_~Z@i|6EoXW_Y!{b?9#(aC!$0EW_ZvjLR51Y{J%EVD#abux|Z8gnkh$)wGvo?o^ z=*uy7=b<1>Ra-`1dXyTBakor>ACMmf`F3uWCZ2?qLolb-)?$y2ll8FcF%uz+zQzb0 zqi3OY7eMrDjvsMsmws^y8(}*|e;+S?l z*0~)hlU)Rs0_Nhc93o;dcj9Zr(twzx$-F`kNN~Dz$8wG}uBM5;tZ^b#_Gx0d6)fqw z%!>>!CQ}fo2Qd#}Rg)9gkbrKXOAR+W10IKy<-#y|0~W|$^Mv7gGh>JLaac&X5yKqL z)#K_F(|9vND=%kUAuhQ2apgA?3#aH=iihpC7g=;Ok7f^87l4C;U z=#)#fDaI>|AtNEpRh9L?0vX9nn>{a2k-GPbp9_13m1?X{+nC1jh=x}HMo8gfgD8-q zhp2J=>|SMjVHsmQ3?b)SgsgUPETnVXHY^()6=sOLU4ABHXT<~^VWGNNkY*;6 zdp)2>6>eioCHLj_iZb-^z(K!}P7^9_4K$4au3Lj`207jtz)Do%ox!wmf47EL3*Ngm zw3fKK+rga9Q*T-aD6)n(1${9x*aGnR<{Hoqg&zwAl%7nOsiYy?LgU!C0^z< zi*5sH$==1q1ao$+5POeIu^MZ%Rh!1!HpaM0Aa-|O7{{}@%(DVI2Bc6m9Z8y@bc|o+ zwP=xzSFqdW;q5|!HKmc2_+Ei?a!YIMR4R?UuX0HJYi zgk~sEJkd6HC17n3a-MU{7iA9f-DPJ3aw$_L)Rd3)WuIJTRKUItf6^-j|4gnJ{KN2< zJ&dj7<}ohf(2#LR&r{BZQ+#bF;ChC}P6Gb^tu0q$ z)K_e7Tpk|5)x}-u%Ry|`7qiq z3r1I$D>l-|JMQaW+NE0g%0Ue-_ad`UbuvgQBbP$in3=}tH8w8@^bmvT&x){6$8cwv(Ij&h-pBug2N8d5vuPjORye&UcLUlN07_(&%r7&Ew9AkxqaayI9RiqG!NV6O)T$Tg6g{>Gbe}i{k2(!5YYjZJw_>8fqgQ z86-U{xgcLrqkX9H54*;i%i6!8=LA}Jw5MOmx zp3)g+Iyd2^%rF@jCGTWHZerG$HYd(h;F}4knRp;!;2F<7;M)<2OimN@MWdQccxdiB zCMj^5<@30L6>K!wt|0cXvs|}y;~pOLt{zI6Rz*T_Dse6WZd=9w>YwEZbhl3OCqIO zvrG)g|NZF4zkKu`|LxJEciaA7fBMU#9|Qd3i^qR^{L%gR%j3t79=-ei^~;OX;ZOYW z$9E!6uFS8GkKVE2Piq2s{P=Hw?zcje`Txefrz8QgyZ+5mEB+w#V3GV&BOJZO0v3}Fh@?GG(TDI-D$7V;j z(MWaOz{P$mCKN|k)z))~PNQY(%Bb~zEqnC&LXGM`+H$=Y`6NEK?b#mqt(+utEa+kc z?a%t*Peb6SM{JhQztZ8b^OBL-(#Vl+HtgaHZw}$+2h8yKZqIa}FDyy+stCx%JT=aD1Jc`?(QGT`qb*x+%f#5I zI)nW`K&_NZeu z)M0eK0gu0Ub-qw#Q&x}Rr8tQzZX1yqhY@bn6b_N!S{^+OZNQN3U+p#`)R+^?5dW9$AGZ0^>P8Ar_=Te&2rVvt>mhbuNO4To0- zI-2h)b`xjTGj_oLw6`3}WW{VidU>%eah?i*uW2wL*Z8jLp{3 zjvTx7SuB>Cs)*SZIEyikkQ*qkA=))+qC3pQClt+*f255GLl$_ z&3LFE2jqWPCQa^UHZch)E%`dpM_VC-DzZ%;VYM<(bv%FUu4^-q3tbcCobkBOc-L_E zWG*v*VAs}~L34>zja?Wot;SRp>|ZW1|5q&b6*oi2DvF8x2h2Am8N=*4mMU*Zxi_*g zw~P6H>EYIRUsH;`HJUfMa5hPUVRG3WLbV$5K92^)Usm^7lesi?%Lcn>1qs@?&q`W!wL;*C#*rPBV#q>s(CW1#qn& zdxs|aDf+SJK>r0#wqCPr!oFwoueG~vxA^2Tn0##x!rV8Yi;H>6zD{Ubd-?!D{u;Kv@c2Cp-zw=fLkB4U}zBwIqYY3KuDcseb1N_61v zn+qXInI8L9K!nw1R*B1I4_qS*S&Ba2t9H1EYUTsPn~E&T04f^VFs-HpxgappuW>?!!+(OP9GKf9*H=!;3o@fS!!`!zOQV(tFEZ!HWLgMh>$_Dg&~8V?iy$i_O=X zLsPV3{s-nX$!LPXw1nejyqZVNFUlS0~u3 zkIDnqm|rnc2y2D~j+lw4r`d?>jArv{ZXO$XBlMm>VA#c;;in8Qr}GT9Syl zlocrYCKwY6%CYgipRf<08au^~EAl83YG-G6?>rwxGND|&Nc7g@zdibAPGhuuf%@|z z(T~^IMz^-ud>gFWjrge|z*}u=M-mFShWcy;xK~z4OZj7X0+Z`Tjplvw}^4A#jJqQ`pq!s)=#yBs`XE;hfX(kxUv^}trKVz7S(&z?OoP z$%a+6a+o9{H&6$SNnxyx1lN3-IeI19+$ryk0bI>)Q5CNeyK#L%yL`abs9eo+d}AP4 zG`eBkn|l4=+FES0bM35=DxuXkc1lojRslqoU=T!`X(POxa?X9oBZ;tR|?z4uwi7NT3Y&^5o=wR zdHh*1xsrE3d8T0&y>p&HJC9 z-{1f7X&!qxi&BH{c#v1#wZUazQ(y!kNEe`L4e)eVpxKDaAf#HyEXZWc2E{dLw$l5A zi}%GXAG^VmVB0Hb3|e&A<_2Z53y-+-(E+hwyoEd@WnO{9T7C*uWW*SA!F-*I7H>$8 zl=h>Jq$MgKM<)C&Ry)hN?j;MJP$fc%=FcHEWRf+SKo7Y;sS=2oPk zQR`hml^Z(xF_?TLz`Yy04o=j7bfClY9#n31PUxh8i=Z|E7U`JdtvUyGKdMi_8&zJM zcPPWFBJW$T>=l%;9tSpaoVOUDqNeity~;<9D&F(4yBVsuex+CBcCv7wkxCtVsYy8$ z$Ik-nQ;_CyY;L#mj8tGpb~HYg`qT(@-h#Z)D^Q1x5`aQd1Y@>UGZZHB1b^C5ch|yF zCVNA%pZklQfS3Ci=TrPVW;0v_OU9V7R4T4z))-^2(6zQ)jb6)GOW8-(2?i9;>ToLp zj~#GXwJ~&iBE6*3nob(Mo$UrsgYD%#Y>>33lN)yMf=*zGI2tW(nQU`audfv+qZ;qc zmetZE^ZHoq#hi;muQ?=Wz3#HQvbq8Ex|Wpfms?R~#e+)4+LFTi-VO?*g*`QCOsGRd z;ep&jcx|(JT=6bl#6p=*Y5B~$-nCF#wPWaNxoPieKAv~eMc0pRN;+=uZJ`o0$Y_jT z`YtbA-95GG_TS#)YN(*=vVpePb3+oCo~DuRvx_Xuo?y(vYj7wiw|wo3mc}>YQP}&& zezJ^ysw21=yK2MNCQPff$H=;1Hfs5g+Xm3gt6U#}+iBa#p*?vb-wn$B?nzr%dMesqcSk}U$@MGKb zy|LvN89@DFc^ZZ?U&RC7l#O@yFr`Xe%Pea2u&r~aw&)y!kX;YF3VEgSPIYtu(>drh+mUc*sI z6G(VENM)w`M6r}+=$tp(?P>&t=wi>t*HcpiuG^yNH z6Rbt)EmvC?l#bvQslAJWTnb+^e2d6OOg&zSbRDaku6UF2yohh8t;Ze)JQOG1PExV% zYpm8?EztuCP+eEWkbyipvH8p%BMw%M#zbZm-i{?9l>=K&Nym$UEgMI)Xcwon5ZkiR zL{_3k3g(2;8%S)u*5Ttt92+94lqhb7t+qhP-wDdfp{<9vQi5?w*oe!*)+#+Vv$eUr z6`LdSdp36{?8-w&Sdr7MOzerpifZUk#}}l3DKM>QKrW$>j~9v5W#SO7ze7%1|{H!3dmXVv`Ai-Z6TJ`lI25&q> zrzIPf@UV>OdANM2^}#He;L33BWNJUzkSa50B2~$u3ry`8Y7zOm#uzt_OKsB*8r`aF zr3%>PxQLJ~f@YhMvUOitJWr!~>X<7R7%*2-kxZ|z}vR$FsG^b=E zLqjtr_ohaNMlkNlc`Yn=E#1rn@M0%Ew;BKbxqt4T`)A|l{|^8F|Np9oxc&g50RWC| BL literal 0 HcmV?d00001 diff --git a/charts/ondat-operator/ondat-operator/0.5.400/Chart.yaml b/charts/ondat-operator/ondat-operator/0.5.400/Chart.yaml new file mode 100644 index 000000000..88f648916 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Ondat Operator + catalog.cattle.io/release-name: ondat-operator +apiVersion: v2 +appVersion: v2.6.0 +description: Cloud Native storage for containers +home: https://ondat.io +icon: https://docs.ondat.io/images/generic/Ondat_logo.svg +keywords: +- storage +- block-storage +- volume +- operator +kubeVersion: '>= 1.19' +maintainers: +- email: david@ondat.io + name: DavidMarchant +- email: richard.kovacs@ondat.io + name: mhmxs +- email: angelos.perivolaropoulos@ondat.io + name: aeroniero33 +name: ondat-operator +sources: +- https://github.com/ondat +version: 0.5.400 diff --git a/charts/ondat-operator/ondat-operator/0.5.400/LICENSE b/charts/ondat-operator/ondat-operator/0.5.400/LICENSE new file mode 100644 index 000000000..4bb9ee44c --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 StorageOS + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/charts/ondat-operator/ondat-operator/0.5.400/README.md b/charts/ondat-operator/ondat-operator/0.5.400/README.md new file mode 100644 index 000000000..007025e0f --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/README.md @@ -0,0 +1,271 @@ +# Ondat Operator Helm Chart + +> **Note**: This chart requires Helm 3 and defaults to StorageOS v2. To upgrade +> from a previous chart or from StorageOS version 1.x to 2.x, please contact +> support for assistance. + +StorageOS is a cloud native, software-defined storage platform that transforms +commodity server or cloud based disk capacity into enterprise-class persistent +storage for containers. StorageOS volumes offer high throughput, low latency +and consistent performance, and are therefore ideal for deploying databases, +message queues, and other mission-critical stateful solutions. StorageOS +Project edition also offers ReadWriteMany volumes that are concurrently +accessible by multiple applications. + +The Ondat Operator installs and manages StorageOS within a cluster. Cluster +nodes may contribute local or attached disk-based storage into a distributed +pool, which is then available to all cluster members via a global namespace. + +Volumes are available across the cluster so if an application container gets +moved to another node it has immediate access to re-attach its data. + +StorageOS is extremely lightweight - minimum requirements are a reserved CPU +core and 2GB of free memory. There are minimal external dependencies, and no +custom kernel modules. + +After StorageOS is installed, please register for a free personal license to +enable 1TiB of capacity and HA with synchronous replication by following the +instructions [here](https://docs.ondat.io/docs/operations/licensing). For +additional capacity, features and support plans contact sales@ondat.io. + +## Highlighted Features + +* High Availability - synchronous replication insulates you from node failure. +* Delta Sync - replicas out of sync due to transient failures only transfer + changed blocks. +* Multiple AccessModes - dynamically provision ReadWriteOnce or ReadWriteMany + volumes. +* Rapid Failover - quickly detects node failure and automates recovery actions + without administrator intervention. +* Data Encryption - both in transit and at rest. +* Scalability - disaggregated consensus means no single scheduling point of + failure. +* Thin provisioning - only consume the space you need in a storage pool. +* Data reduction - transparent inline data compression to reduce the amount of + storage used in a backing store as well as reducing the network bandwidth + requirements for replication. +* Flexible configuration - all features can be enabled per volume, using PVC + and StorageClass labels. +* Multi-tenancy - fully supports standard Namespace and RBAC methods. +* Observability & instrumentation - Log streams for observability and + Prometheus support for instrumentation. +* Deployment flexibility - scale up or scale out storage based on application + requirements. Works with any infrastructure – on-premises, VM, bare metal + or cloud. + +## About StorageOS + +StorageOS is a software-defined cloud native storage platform delivering +persistent storage for Kubernetes. StorageOS is built from the ground-up with +no legacy restrictions to give enterprises working with cloud native workloads +a scalable storage platform with no compromise on performance, availability or +security. For additional information, visit www.ondat.io. + +This chart installs a Ondat Cluster Operator which helps deploy and +configure a StorageOS cluster on kubernetes. + +## Prerequisites + +- Helm 3 +- Kubernetes 1.18+ +- Privileged mode containers (enabled by default) +- Etcd cluster + +Refer to the [StorageOS prerequisites +docs](https://docs.ondat.io/docs/prerequisites/) for more information. + +## Installing the chart + + + +```console +# Add ondat charts repo. +$ helm repo add ondat https://charts.ondat.io +# Install the chart in a namespace. +$ kubectl create namespace ondat-operator +$ helm install my-ondat ondat/ondat-operator \ + --namespace ondat-operator \ + --set cluster.kvBackend.address=:2379 \ + --set cluster.admin.password= +``` + +This will install the Ondat cluster operator in `ondat-operator` +namespace and deploys StorageOS with a minimal configuration. Etcd address +(kvBackend) and admin password are mandatory values to install the chart. + +The password must be at least 8 characters long and the default username is +`storageos`, which can be changed like the above values. Find more information +about installing etcd in our [etcd +docs](https://docs.ondat.io/docs/prerequisites/etcd/). + +To avoid passing the password as a flag, install the chart with the values file. +Create a values.yaml file and pass the file name with `--values` flag. + +```yaml +cluster: + kvBackend: + address: :2379 + admin: + password: +``` + +```console +$ helm install ondat/ondat-operator \ + --namespace ondat-operator \ + --values +``` +> **Tip**: List all releases using `helm list -A` + +## Creating a StorageOS cluster manually + +The Helm chart supports a subset of StorageOSCluster custom resource parameters. +For advanced configurations, you may wish to create the cluster resource +manually and only use the Helm chart to install the Operator. + +To disable auto-provisioning the cluster with the Helm chart, set +`cluster.create` to false: + +```yaml +cluster: + ... + create: false +``` + +Create a secret to store storageos cluster secrets: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: "storageos-api" + namespace: + labels: + app: "storageos" +type: "kubernetes.io/storageos" +data: + # echo -n '' | base64 + username: c3RvcmFnZW9z + password: c3RvcmFnZW9z +``` + +Create a `StorageOSCluster` custom resource and refer the above secret in the +`secretRefName` field. + +```yaml +apiVersion: "storageos.com/v1" +kind: "StorageOSCluster" +metadata: + name: "example-storageos" + namespace: +spec: + secretRefName: "storageos-api" + kvBackend: + address: "etcd-client.etcd.svc.cluster.local:2379" + # address: '10.42.15.23:2379,10.42.12.22:2379,10.42.13.16:2379' # You can set ETCD server IPs. + storageClassName: "storageos" +``` + + +Learn more about advanced configuration options +[here](https://github.com/storageos/cluster-operator/blob/master/README.md#storageoscluster-resource-configuration). + +To check cluster status, run: + +```console +$ kubectl get storageoscluster --namespace +NAME READY STATUS AGE +example-storageos 3/3 Running 4m +``` + +All the events related to this cluster are logged as part of the cluster object +and can be viewed by describing the object. + +```console +$ kubectl describe storageoscluster example-storageos --namespace +Name: example-storageos +Namespace: default +Labels: +... +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning ChangedStatus 1m (x2 over 1m) storageos-operator 0/3 StorageOS nodes are functional + Normal ChangedStatus 35s storageos-operator 3/3 StorageOS nodes are functional. Cluster healthy +``` + +## Configuration + +The following tables lists the configurable parameters of the StorageOSCluster +Operator chart and their default values. + +Parameter | Description | Default +--------- | ----------- | ------- +`operator.image.repository` | StorageOS Operator container image repository | `storageos/operator` +`operator.image.tag` | StorageOS Operator container image tag | `v2.5.0` +`operator.image.pullPolicy` | StorageOS Operator container image pull policy | `IfNotPresent` +`podSecurityPolicy.enabled` | If true, create & use PodSecurityPolicy resources | `false` +`podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` +`cluster.create` | If true, auto-create the StorageOS cluster | `true` +`cluster.name` | Name of the storageos deployment | `storageos` +`cluster.namespace` | Namespace to install the StorageOS cluster into | `kube-system` +`cluster.createNamespace` | If true, create the namespace used by the cluster | `true` +`cluster.secretRefName` | Name of the secret containing StorageOS API credentials | `storageos-api` +`cluster.admin.username` | Username to authenticate to the StorageOS API with | `storageos` +`cluster.admin.password` | Password to authenticate to the StorageOS API with | +`cluster.sharedDir` | The path shared into to kubelet container when running kubelet in a container | +`cluster.kvBackend.address` | List of etcd targets, in the form ip[:port], separated by commas | +`cluster.kvBackend.backend` | Key-Value store backend name | `etcd` +`cluster.kvBackend.tlsSecretName` | Name of the secret containing kv backend tls cert | +`cluster.kvBackend.tlsSecretNamespace` | Namespace of the secret containing kv backend tls cert | +`cluster.nodeSelectorTerm.key` | Key of the node selector term used for pod placement | +`cluster.nodeSelectorTerm.value` | Value of the node selector term used for pod placement | +`cluster.toleration.key` | Key of the pod toleration parameter | +`cluster.toleration.value` | Value of the pod toleration parameter | +`cluster.disableTelemetry` | If true, no telemetry data will be collected from the cluster | `false` +`cluster.storageClassName` | Name of the StorageClass to be created | `storageos` +`cluster.images.apiManager.repository` | StorageOS API Manager container image repository | +`cluster.images.apiManager.tag` | StorageOS API Manager container image tag | +`cluster.images.csiV1ExternalAttacherV3.repository` | CSI v1 External Attacher v3 image repository | +`cluster.images.csiV1ExternalAttacherV3.tag` | CSI v1 External Attacher v3 image tag | +`cluster.images.csiV1ExternalProvisioner.repository` | CSI v1 External Provisioner image repository | +`cluster.images.csiV1ExternalProvisioner.tag` | CSI v1 External Provisioner image tag | +`cluster.images.csiV1ExternalResizer.repository` | CSI v1 External Resizer image repository | +`cluster.images.csiV1ExternalResizer.tag` | CSI v1 External Resizer image tag | +`cluster.images.csiV1LivenessProbe.repository` | CSI v1 Liveness Probe image repository | +`cluster.images.csiV1LivenessProbe.tag` | CSI v1 Liveness Probe image tag | +`cluster.images.csiV1NodeDriverRegistrar.repository` | CSI v1 Node Driver Registrar image repository | +`cluster.images.csiV1NodeDriverRegistrar.tag` | CSI v1 Node Driver Registrar image tag | +`cluster.images.init.repository` | StorageOS init container image repository | +`cluster.images.init.tag` | StorageOS init container image tag | +`cluster.images.node.repository` | StorageOS Node container image repository | +`cluster.images.node.tag` | StorageOS Node container image tag | + +## Deleting a StorageOS Cluster + +Deleting the `StorageOSCluster` custom resource object would delete the +storageos cluster and its associated resources. + +In the above example, + +```console +$ kubectl delete storageoscluster example-storageos --namespace +``` + +would delete the custom resource and the cluster. + +## Uninstalling the Chart + +To uninstall/delete the storageos cluster operator deployment: + +```console +$ helm uninstall --namespace ondat-operator +``` + +If the chart was installed with cluster auto-provisioning enabled, chart +uninstall will clean-up the installed StorageOS cluster resources as well. + +Learn more about configuring the StorageOS Operator on +[GitHub](https://github.com/storageos/operator). diff --git a/charts/ondat-operator/ondat-operator/0.5.400/app-readme.md b/charts/ondat-operator/ondat-operator/0.5.400/app-readme.md new file mode 100644 index 000000000..94c18184d --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/app-readme.md @@ -0,0 +1,75 @@ +# Ondat Operator + +StorageOS is a cloud native, software-defined storage platform that transforms +commodity server or cloud based disk capacity into enterprise-class persistent +storage for containers. StorageOS volumes offer high throughput, low latency +and consistent performance, and are therefore ideal for deploying databases, +message queues, and other mission-critical stateful solutions. StorageOS +Project edition also offers ReadWriteMany volumes that are concurrently +accessible by multiple applications. + +The Ondat Operator installs and manages StorageOS within a cluster. Cluster +nodes may contribute local or attached disk-based storage into a distributed +pool, which is then available to all cluster members via a global namespace. + +Volumes are available across the cluster so if an application container gets +moved to another node it has immediate access to re-attach its data. + +StorageOS is extremely lightweight - minimum requirements are a reserved CPU +core and 2GB of free memory. There are minimal external dependencies, and no +custom kernel modules. + + +After StorageOS is installed, please register for a free personal license to +enable 1TiB of capacity and HA with synchronous replication by following the +instructions [here](https://docs.ondat.io/docs/operations/licensing). For +additional capacity, features and support plans contact sales@ondat.io. + +## Highlighted Features + +* High Availability - synchronous replication insulates you from node failure. +* Delta Sync - replicas out of sync due to transient failures only transfer + changed blocks. +* Multiple AccessModes - dynamically provision ReadWriteOnce or ReadWriteMany + volumes. +* Rapid Failover - quickly detects node failure and automates recovery actions + without administrator intervention. +* Data Encryption - both in transit and at rest. +* Scalability - disaggregated consensus means no single scheduling point of + failure. +* Thin provisioning - only consume the space you need in a storage pool. +* Data reduction - transparent inline data compression to reduce the amount of + storage used in a backing store as well as reducing the network bandwidth + requirements for replication. +* Flexible configuration - all features can be enabled per volume, using PVC + and StorageClass labels. +* Multi-tenancy - fully supports standard Namespace and RBAC methods. +* Observability & instrumentation - Log streams for observability and + Prometheus support for instrumentation. +* Deployment flexibility - scale up or scale out storage based on application + requirements. Works with any infrastructure – on-premises, VM, bare metal + or cloud. + +## About StorageOS + +StorageOS is a software-defined cloud native storage platform delivering +persistent storage for Kubernetes. StorageOS is built from the ground-up with +no legacy restrictions to give enterprises working with cloud native workloads +a scalable storage platform with no compromise on performance, availability or +security. For additional information, visit www.ondat.io. + +## Installation + +StorageOS requires an etcd cluster in order to function. Find out more about +setting up an etcd cluster in our [etcd +docs](https://docs.ondat.io/docs/prerequisites/etcd/). + +By default, a minimal configuration of StorageOS is installed. To set advanced +configurations, disable the default installation of the StorageOS cluster +and create a custom StorageOSCluster resource, documentation +[here](https://github.com/ondat/charts/blob/main/charts/ondat-operator/README.md#creating-a-storageos-cluster-manually) + +Newly installed StorageOS clusters require a license to function. For +instructions on applying our free developer license, or obtaining a commercial +license, please see our documentation at +https://docs.ondat.io/docs/reference/licence/. diff --git a/charts/ondat-operator/ondat-operator/0.5.400/ci/std-values.yaml b/charts/ondat-operator/ondat-operator/0.5.400/ci/std-values.yaml new file mode 100644 index 000000000..eb6ed76f7 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/ci/std-values.yaml @@ -0,0 +1,5 @@ +podSecurityPolicy: + enabled: true +cluster: + # Disable cluster creation in CI, should install the operator only. + create: false diff --git a/charts/ondat-operator/ondat-operator/0.5.400/crds/storageoscluster_crd.yaml b/charts/ondat-operator/ondat-operator/0.5.400/crds/storageoscluster_crd.yaml new file mode 100644 index 000000000..e718ce545 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/crds/storageoscluster_crd.yaml @@ -0,0 +1,424 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + labels: + app: storageos + app.kubernetes.io/component: operator + name: storageosclusters.storageos.com +spec: + group: storageos.com + names: + kind: StorageOSCluster + listKind: StorageOSClusterList + plural: storageosclusters + shortNames: + - stos + singular: storageoscluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Ready status of the storageos nodes. + jsonPath: .status.ready + name: ready + type: string + - description: Status of the whole cluster. + jsonPath: .status.phase + name: status + type: string + - jsonPath: .metadata.creationTimestamp + name: age + type: date + name: v1 + schema: + openAPIV3Schema: + description: StorageOSCluster is the Schema for the storageosclusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: StorageOSClusterSpec defines the desired state of StorageOSCluster + properties: + csi: + description: CSI defines the configurations for CSI. + properties: + deploymentStrategy: + type: string + deviceDir: + type: string + driverRegisterationMode: + type: string + driverRequiresAttachment: + type: string + enable: + type: boolean + enableControllerExpandCreds: + type: boolean + enableControllerPublishCreds: + type: boolean + enableNodePublishCreds: + type: boolean + enableProvisionCreds: + type: boolean + endpoint: + type: string + kubeletDir: + type: string + kubeletRegistrationPath: + type: string + pluginDir: + type: string + registrarSocketDir: + type: string + registrationDir: + type: string + version: + type: string + type: object + debug: + description: Debug is to set debug mode of the cluster. + type: boolean + disableFencing: + description: "Disable Pod Fencing. With StatefulSets, Pods are only re-scheduled if the Pod has been marked as killed. In practice this means that failover of a StatefulSet pod is a manual operation. \n By enabling Pod Fencing and setting the `storageos.com/fenced=true` label on a Pod, StorageOS will enable automated Pod failover (by killing the application Pod on the failed node) if the following conditions exist: \n - Pod fencing has not been explicitly disabled. - StorageOS has determined that the node the Pod is running on is offline. StorageOS uses Gossip and TCP checks and will retry for 30 seconds. At this point all volumes on the failed node are marked offline (irrespective of whether fencing is enabled) and volume failover starts. - The Pod has the label `storageos.com/fenced=true` set. - The Pod has at least one StorageOS volume attached. - Each StorageOS volume has at least 1 healthy replica. \n When Pod Fencing is disabled, StorageOS will not perform any interaction with Kubernetes when it detects that a node has gone offline. Additionally, the Kubernetes permissions required for Fencing will not be added to the StorageOS role. Deprecated: Not used any more, fencing is enabled/disabled by storageos.com/fenced label on pod." + type: boolean + disableScheduler: + description: 'Disable StorageOS scheduler extender. Deprecated: Not used any more, scheduler is always enabled on Kubernetes.' + type: boolean + disableTCMU: + description: "Disable TCMU can be set to true to disable the TCMU storage driver. This is required when there are multiple storage systems running on the same node and you wish to avoid conflicts. Only one TCMU-based storage system can run on a node at a time. \n Disabling TCMU will degrade performance. Deprecated: Not used any more." + type: boolean + disableTelemetry: + description: Disable Telemetry. + type: boolean + enablePortalManager: + description: EnablePortalManager enables Portal Manager. + type: boolean + environment: + additionalProperties: + type: string + description: Environment contains environment variables that are passed to StorageOS. + type: object + forceTCMU: + description: "Force TCMU can be set to true to ensure that TCMU is enabled or cause StorageOS to abort startup. \n At startup, StorageOS will automatically fallback to non-TCMU mode if another TCMU-based storage system is running on the node. Since non-TCMU will degrade performance, this may not always be desired. Deprecated: Not used any more." + type: boolean + images: + description: Images defines the various container images used in the cluster. + properties: + apiManagerContainer: + type: string + csiClusterDriverRegistrarContainer: + type: string + csiExternalAttacherContainer: + type: string + csiExternalProvisionerContainer: + type: string + csiExternalResizerContainer: + type: string + csiLivenessProbeContainer: + type: string + csiNodeDriverRegistrarContainer: + type: string + hyperkubeContainer: + type: string + initContainer: + type: string + kubeSchedulerContainer: + type: string + nfsContainer: + type: string + nodeContainer: + type: string + nodeManagerContainer: + type: string + portalManagerContainer: + type: string + upgradeGuardContainer: + type: string + type: object + ingress: + description: 'Ingress defines the ingress configurations used in the cluster. Deprecated: Not used any more, please create your ingress for dashboard on your own.' + properties: + annotations: + additionalProperties: + type: string + type: object + enable: + type: boolean + hostname: + type: string + tls: + type: boolean + type: object + join: + description: 'Join is the join token used for service discovery. Deprecated: Not used any more.' + type: string + k8sDistro: + description: "K8sDistro is the name of the Kubernetes distribution where the operator is being deployed. It should be in the format: `name[-1.0]`, where the version is optional and should only be appended if known. Suitable names include: `openshift`, `rancher`, `aks`, `gke`, `eks`, or the deployment method if using upstream directly, e.g `minishift` or `kubeadm`. \n Setting k8sDistro is optional, and will be used to simplify cluster configuration by setting appropriate defaults for the distribution. The distribution information will also be included in the product telemetry (if enabled), to help focus development efforts." + type: string + kvBackend: + description: KVBackend defines the key-value store backend used in the cluster. + properties: + address: + type: string + backend: + type: string + required: + - address + type: object + namespace: + description: 'Namespace is the kubernetes Namespace where storageos resources are provisioned. Deprecated: StorageOS uses namespace of storageosclusters.storageos.com resource.' + type: string + nodeManagerFeatures: + additionalProperties: + type: string + description: Node manager feature list with optional configurations. + type: object + nodeSelectorTerms: + description: NodeSelectorTerms is to set the placement of storageos pods using node affinity requiredDuringSchedulingIgnoredDuringExecution. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + pause: + description: 'Pause is to pause the operator for the cluster. Deprecated: Not used any more, operator is always running.' + type: boolean + resources: + description: Resources is to set the resource requirements of the storageos containers. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + secretRefName: + description: SecretRefName is the name of the secret object that contains all the sensitive cluster configurations. + type: string + secretRefNamespace: + description: 'SecretRefNamespace is the namespace of the secret reference. Deprecated: StorageOS uses namespace of storageosclusters.storageos.com resource.' + type: string + service: + description: Service is the Service configuration for the cluster nodes. + properties: + annotations: + additionalProperties: + type: string + type: object + externalPort: + type: integer + internalPort: + type: integer + name: + type: string + type: + type: string + required: + - name + - type + type: object + sharedDir: + description: 'SharedDir is the shared directory to be used when the kubelet is running in a container. Typically: "/var/lib/kubelet/plugins/kubernetes.io~storageos". If not set, defaults will be used.' + type: string + storageClassName: + description: StorageClassName is the name of default StorageClass created for StorageOS volumes. + type: string + tlsEtcdSecretRefName: + description: TLSEtcdSecretRefName is the name of the secret object that contains the etcd TLS certs. This secret is shared with etcd, therefore it's not part of the main storageos secret. + type: string + tlsEtcdSecretRefNamespace: + description: 'TLSEtcdSecretRefNamespace is the namespace of the etcd TLS secret object. Deprecated: StorageOS uses namespace of storageosclusters.storageos.com resource.' + type: string + tolerations: + description: Tolerations is to set the placement of storageos pods using pod toleration. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - kvBackend + - secretRefName + type: object + status: + description: StorageOSClusterStatus defines the observed state of StorageOSCluster + properties: + conditions: + description: Conditions is a list of status of all the components of StorageOS. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + members: + description: Members is the list of StorageOS nodes in the cluster. + properties: + ready: + description: Ready are the storageos cluster members that are ready to serve requests. The member names are the same as the node IPs. + items: + type: string + type: array + unready: + description: Unready are the storageos cluster nodes not ready to serve requests. + items: + type: string + type: array + type: object + nodeHealthStatus: + additionalProperties: + description: NodeHealth contains health status of a node. + properties: + directfsInitiator: + type: string + director: + type: string + kv: + type: string + kvWrite: + type: string + nats: + type: string + presentation: + type: string + rdb: + type: string + type: object + type: object + nodes: + items: + type: string + type: array + phase: + description: Phase is the phase of the StorageOS cluster. + type: string + ready: + description: Ready is the ready status of the StorageOS control-plane pods. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/ondat-operator/ondat-operator/0.5.400/questions.yml b/charts/ondat-operator/ondat-operator/0.5.400/questions.yml new file mode 100644 index 000000000..1a5ed56b2 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/questions.yml @@ -0,0 +1,177 @@ +categories: +- storage +labels: + io.rancher.certified: partner + io.cattle.role: cluster +rancher_min_version: 2.4.0 +questions: +- variable: k8sDistro + default: rancher + description: "Kubernetes Distribution is used to fine-tune configuration for + specific Kubernetes distributions. It is also included in anonymized + telemetry data so that we can focus development effort most effectively. + Example values: rancher, openshift" + type: string + label: Kubernetes Distribution + +# Operator image configuration. +- variable: defaultImage + default: true + description: "Use default Docker images" + label: Use Default Images + type: boolean + show_subquestion_if: false + group: "Container Images" + subquestions: + - variable: operator.image.pullPolicy + default: IfNotPresent + description: "Operator Image pull policy" + type: enum + label: Operator Image pull policy + options: + - IfNotPresent + - Always + - Never + - variable: operator.image.repository + default: "storageos/operator" + description: "StorageOS operator image name" + type: string + label: StorageOS Operator Image Name + - variable: operator.image.tag + default: "v2.5.0" + description: "StorageOS Operator image tag" + type: string + label: StorageOS Operator Image Tag + +# Default minimal cluster configuration. +- variable: cluster.create + default: true + type: boolean + description: "Install StorageOS cluster with minimal configurations" + label: "Install StorageOS cluster" + show_subquestion_if: true + group: "StorageOS Cluster" + subquestions: + + # Cluster metadata. + - variable: cluster.name + default: "storageos" + description: "Name of the StorageOS cluster deployment" + type: string + label: Cluster Name + - variable: cluster.namespace + default: "storageos" + description: "Namespace of the StorageOS cluster deployment" + type: string + label: Cluster Namespace + - variable: cluster.createNamespace + default: true + description: "If true, create the namespace for the cluster deployment" + type: boolean + label: Create Cluster Namespace + + + # Node container image. + - variable: cluster.images.node.repository + default: "storageos/node" + description: "StorageOS node container image name" + type: string + label: StorageOS Node Container Image Name + - variable: cluster.images.node.tag + default: "v2.5.0" + description: "StorageOS Node container image tag" + type: string + label: StorageOS Node Container Image Tag + + # Telemetry. + - variable: cluster.disableTelemetry + default: false + type: boolean + description: "Disable telemetry data collection. See https://docs.storageos.com/docs/reference/telemetry for more information." + label: Disable Telemetry + + # Credentials. + - variable: cluster.admin.username + default: "admin" + description: "Username of the StorageOS administrator account" + type: string + label: Username + - variable: cluster.admin.password + default: "" + description: "Password of the StorageOS administrator account. Must be at + least 8 characters long" + type: password + label: Password + + # KV store backend. + - variable: cluster.kvBackend.address + required: true + default: "" + description: "List of etcd targets, in the form ip:port, separated by + commas. Prefer multiple direct endpoints over a single load-balanced + endpoint. See https://docs.storageos.com/docs/prerequisites/etcd/ for more + information." + type: string + label: External etcd address(es) + - variable: cluster.kvBackend.tls + default: false + type: boolean + description: "Enable etcd TLS" + label: "TLS should be configured for external etcd to protect configuration data (Optional)." + - variable: cluster.kvBackend.tlsSecretName + required: false + default: "" + description: "Name of the secret that contains the etcd TLS certs. This secret is typically shared with etcd." + type: string + label: External etcd TLS secret name + show_if: "cluster.kvBackend.tls=true" + - variable: cluster.kvBackend.tlsSecretNamespace + required: false + default: "" + description: "Namespace of the secret that contains the etcd TLS certs. This secret is typically shared with etcd." + type: string + label: External etcd TLS secret namespace + show_if: "cluster.kvBackend.tls=true" + + # Node Selector Term. + - variable: cluster.nodeSelectorTerm.key + required: false + default: "" + description: "Key of the node selector term match expression used to select the nodes to install StorageOS on, e.g. `node-role.kubernetes.io/worker`" + type: string + label: Node selector term key + - variable: cluster.nodeSelectorTerm.value + required: false + default: "" + description: "Value of the node selector term match expression used to select the nodes to install StorageOS on." + type: string + label: Node selector term value + + # Pod tolerations. + - variable: cluster.toleration.key + required: false + default: "" + description: "Key of pod toleration with operator 'Equal' and effect 'NoSchedule'" + type: string + label: Pod toleration key + - variable: cluster.toleration.value + required: false + default: "" + description: "Value of pod toleration with operator 'Equal' and effect 'NoSchedule'" + type: string + label: Pod toleration value + + # Shared Directory + - variable: cluster.sharedDir + required: false + default: "/var/lib/kubelet/plugins/kubernetes.io~storageos" + description: "Shared Directory should be set if running kubelet in a container. This should be the path shared into to kubelet container, typically: '/var/lib/kubelet/plugins/kubernetes.io~storageos'. If not set, defaults will be used." + type: string + label: Shared Directory + + # Cluster metadata. + - variable: cluster.storageClassName + default: "storageos" + description: "Name of the default StorageOS StorageClass" + type: string + label: StorageClass Name diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/NOTES.txt b/charts/ondat-operator/ondat-operator/0.5.400/templates/NOTES.txt new file mode 100644 index 000000000..e7e2ac3ac --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/NOTES.txt @@ -0,0 +1,51 @@ +{{- if .Values.cluster.create }} + +As you enabled automatic cluster creation, your StorageOS cluster is spinning +up in the {{ .Values.cluster.namespace }} namespace. + +{{- else }} + +StorageOS Operator deployed. + +As you disabled automatic cluster creation, you can deploy a StorageOS cluster +by creating a custom StorageOSCluster resource: + +1. Create a secret containing StorageOS cluster credentials. This secret +contains the API username and password that will be used to authenticate to the +StorageOS cluster. Base64 encode the username and password that you want to use +for your StorageOS cluster. + +apiVersion: v1 +kind: Secret +metadata: + name: storageos-api + namespace: storageos + labels: + app: storageos +type: kubernetes.io/storageos +data: + # echo -n '' | base64 + username: c3RvcmFnZW9z + password: c3RvcmFnZW9z + +2. Create a StorageOS custom resource that references the secret created +above (storageos-api in the above example). They must share a namespace. +When the resource is created, the cluster will be deployed. + +apiVersion: storageos.com/v1 +kind: StorageOSCluster +metadata: + name: example-storageos + namespace: storageos +spec: + secretRefName: storageos-api + storageClassName: storageos + kvBackend: + address: + +Newly installed StorageOS clusters require a license to function. For +instructions on applying our free developer license, or obtaining a commercial +license, please see our documentation at +https://docs.storageos.com/docs/reference/licence/. + +{{- end }} diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/_helpers.tpl b/charts/ondat-operator/ondat-operator/0.5.400/templates/_helpers.tpl new file mode 100644 index 000000000..cbe6e7116 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/_helpers.tpl @@ -0,0 +1,67 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "storageos.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "storageos.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "storageos.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "storageos.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "storageos.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Validate the admin username to be of minimum length +*/}} +{{- define "validate-username" -}} +{{ $length := len .Values.cluster.admin.username }} +{{- if ge $length 3 -}} +{{ .Values.cluster.admin.username }} +{{- else -}} +{{- fail "Invalid username. Must be at least 3 characters." -}} +{{- end -}} +{{- end -}} + +{{/* +Validate the admin password to be of minimum length +*/}} +{{- define "validate-password" -}} +{{ $length := len .Values.cluster.admin.password }} +{{- if ge $length 8 -}} +{{ .Values.cluster.admin.password }} +{{- else -}} +{{- fail "Invalid password. Must be at least 8 characters." -}} +{{- end -}} +{{- end -}} diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/cleanup.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/cleanup.yaml new file mode 100644 index 000000000..cb1180f64 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/cleanup.yaml @@ -0,0 +1,315 @@ + +# ClusterRole, ClusterRoleBinding and ServiceAccounts have hook-failed in +# hook-delete-policy to make it easy to rerun the whole setup even after a +# failure, else the rerun fails with existing resource error. +# Hook delete policy before-hook-creation ensures any other leftover resources +# from previous run gets deleted when run again. +# The Job resources will not be deleted to help investigage the failure. +# Since the resources created by the operator are not managed by the chart, each +# of them must be individually deleted in separate jobs. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: storageos-cleanup + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" + "helm.sh/hook-weight": "1" + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: storageos:cleanup + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" + "helm.sh/hook-weight": "1" +rules: +# Using apiGroup "apps" for daemonsets fails and the permission error indicates +# that it's in group "extensions". Not sure if it's a Job specific behavior, +# because the daemonsets deployed by the operator use "apps" apiGroup. +- apiGroups: + - "" + resources: + - pods + verbs: + - list +- apiGroups: + - extensions + resources: + - daemonsets + - deployments + verbs: + - delete +- apiGroups: + - apps + resources: + - statefulsets + - deployments + - daemonsets + verbs: + - delete +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + - clusterroles + - clusterrolebindings + verbs: + - delete +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - delete +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - delete +- apiGroups: + - "" + resources: + - serviceaccounts + - secrets + - services + - configmaps + verbs: + - delete +- apiGroups: + - storageos.com + resources: + - storageosclusters + verbs: + - get + - patch + - delete + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: storageos:cleanup + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" + "helm.sh/hook-weight": "2" +subjects: +- name: storageos-cleanup + kind: ServiceAccount + namespace: {{ .Release.Namespace }} +roleRef: + name: storageos:cleanup + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io + +--- + +{{- if .Values.cluster.create }} + +# Delete the CR +apiVersion: batch/v1 +kind: Job +metadata: + name: "storageos-storageoscluster-cleanup" + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": "hook-succeeded, before-hook-creation" + "helm.sh/hook-weight": "3" +spec: + template: + spec: + serviceAccountName: storageos-cleanup + containers: + - name: "storageos-storageoscluster-cleanup" + image: "{{ $.Values.cleanup.images.kubectl.repository }}:{{ $.Values.cleanup.images.kubectl.tag }}" + command: + - kubectl + - -n + - {{ .Values.cluster.namespace }} + - delete + - storageoscluster + - {{ .Values.cluster.name }} + - --ignore-not-found=true + restartPolicy: Never + backoffLimit: 4 +--- + +# Wait for the operator to appropriately delete resources based on CR deletion +apiVersion: batch/v1 +kind: Job +metadata: + name: "storageos-cleanup-wait" + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": "hook-succeeded, before-hook-creation" + "helm.sh/hook-weight": "4" +spec: + template: + spec: + serviceAccountName: storageos-cleanup + containers: + - name: "storageos-cleanup-wait" + image: "{{ $.Values.cleanup.images.kubectl.repository }}:{{ $.Values.cleanup.images.kubectl.tag }}" + command: + - "/bin/bash" + - "-c" + args: + - 'while [ -n "$(kubectl get pods -n {{ .Values.cluster.namespace }} -l app=storageos --ignore-not-found)" ]; do echo "Pods stil deleting"; sleep 5; done' + restartPolicy: Never + backoffLimit: 4 + +--- + +{{- end }} + +# Seperation between pre- & post-delete hooks +# The storageoscluster CR must be deleted before the operator, so the operator +# can handle cluster tear down. +# Some resources must be deleted after the operator otherwise the operator +# will re-create them. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: storageos-cleanup + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" + "helm.sh/hook-weight": "1" + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: storageos:cleanup + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" + "helm.sh/hook-weight": "1" +rules: +# Using apiGroup "apps" for daemonsets fails and the permission error indicates +# that it's in group "extensions". Not sure if it's a Job specific behavior, +# because the daemonsets deployed by the operator use "apps" apiGroup. +- apiGroups: + - "" + resources: + - pods + verbs: + - list +- apiGroups: + - extensions + resources: + - daemonsets + - deployments + verbs: + - delete +- apiGroups: + - apps + resources: + - statefulsets + - deployments + - daemonsets + verbs: + - delete +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + - clusterroles + - clusterrolebindings + verbs: + - delete +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - delete +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - delete +- apiGroups: + - "" + resources: + - serviceaccounts + - secrets + - services + - configmaps + verbs: + - delete +- apiGroups: + - storageos.com + resources: + - storageosclusters + verbs: + - get + - patch + - delete + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: storageos:cleanup + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation" + "helm.sh/hook-weight": "2" +subjects: +- name: storageos-cleanup + kind: ServiceAccount + namespace: {{ .Release.Namespace }} +roleRef: + name: storageos:cleanup + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io + +--- + +# Delete some misc operator files that aren't cleaned up otherwise. +# Needs to be done afterwards in a post-delete hook as otherwise the operator +# will sometimes recreate them before it's destroyed. +apiVersion: batch/v1 +kind: Job +metadata: + name: "storageos-operator-data-cleanup" + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": "hook-succeeded, before-hook-creation" + "helm.sh/hook-weight": "3" +spec: + template: + spec: + serviceAccountName: storageos-cleanup + containers: + - name: "storageos-operator-data-cleanup" + image: "{{ $.Values.cleanup.images.kubectl.repository }}:{{ $.Values.cleanup.images.kubectl.tag }}" + command: + - kubectl + - -n + - {{ .Release.Namespace }} + - delete + - configmap/operator + - configmap/storageos-api-manager-leader + - secret/storageos-operator-webhook + - secret/storageos-webhook + - --ignore-not-found=true + restartPolicy: Never + backoffLimit: 4 diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/config-maps.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/config-maps.yaml new file mode 100644 index 000000000..5b95a0400 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/config-maps.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +data: + operator_config.yaml: | + apiVersion: config.storageos.com/v1 + kind: OperatorConfig + health: + healthProbeBindAddress: :8081 + metrics: + bindAddress: 127.0.0.1:8080 + webhook: + port: 9443 + leaderElection: + leaderElect: true + resourceName: storageos-operator + webhookCertRefreshInterval: 15m + webhookServiceName: storageos-operator-webhook + webhookSecretRef: storageos-operator-webhook + validatingWebhookConfigRef: storageos-operator-validating-webhook +kind: ConfigMap +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos-operator + namespace: {{ .Release.Namespace }} +--- + +apiVersion: v1 +data: + {{- if and .Values.cluster.images.apiManager.repository .Values.cluster.images.apiManager.tag }} + RELATED_IMAGE_API_MANAGER: "{{ .Values.cluster.images.apiManager.repository }}:{{ .Values.cluster.images.apiManager.tag }}" + {{- end }} + {{- if and .Values.cluster.images.csiV1ExternalAttacherV3.repository .Values.cluster.images.csiV1ExternalAttacherV3.tag }} + RELATED_IMAGE_CSIV1_EXTERNAL_ATTACHER_V3: "{{ .Values.cluster.images.csiV1ExternalAttacherV3.repository }}:{{ .Values.cluster.images.csiV1ExternalAttacherV3.tag }}" + {{- end }} + {{- if and .Values.cluster.images.csiV1ExternalProvisioner.repository .Values.cluster.images.csiV1ExternalProvisioner.tag }} + RELATED_IMAGE_CSIV1_EXTERNAL_PROVISIONER: "{{ .Values.cluster.images.csiV1ExternalProvisioner.repository }}:{{ .Values.cluster.images.csiV1ExternalProvisioner.tag }}" + {{- end }} + {{- if and .Values.cluster.images.csiV1ExternalResizer.repository .Values.cluster.images.csiV1ExternalResizer.tag }} + RELATED_IMAGE_CSIV1_EXTERNAL_RESIZER: "{{ .Values.cluster.images.csiV1ExternalResizer.repository }}:{{ .Values.cluster.images.csiV1ExternalResizer.tag }}" + {{- end }} + {{- if and .Values.cluster.images.csiV1LivenessProbe.repository .Values.cluster.images.csiV1LivenessProbe.tag }} + RELATED_IMAGE_CSIV1_LIVENESS_PROBE: "{{ .Values.cluster.images.csiV1LivenessProbe.repository }}:{{ .Values.cluster.images.csiV1LivenessProbe.tag }}" + {{- end }} + {{- if and .Values.cluster.images.csiV1NodeDriverRegistrar.repository .Values.cluster.images.csiV1NodeDriverRegistrar.tag }} + RELATED_IMAGE_CSIV1_NODE_DRIVER_REGISTRAR: "{{ .Values.cluster.images.csiV1NodeDriverRegistrar.repository }}:{{ .Values.cluster.images.csiV1NodeDriverRegistrar.tag }}" + {{- end }} + {{- if and .Values.cluster.images.init.repository .Values.cluster.images.init.tag }} + RELATED_IMAGE_STORAGEOS_INIT: "{{ .Values.cluster.images.init.repository }}:{{ .Values.cluster.images.init.tag }}" + {{- end }} + {{- if and .Values.cluster.images.node.repository .Values.cluster.images.node.tag }} + RELATED_IMAGE_STORAGEOS_NODE: "{{ .Values.cluster.images.node.repository }}:{{ .Values.cluster.images.node.tag }}" + {{- end }} + {{- if and .Values.cluster.images.nodeManager.repository .Values.cluster.images.nodeManager.tag }} + RELATED_IMAGE_NODE_MANAGER: "{{ .Values.cluster.images.nodeManager.repository }}:{{ .Values.cluster.images.nodeManager.tag }}" + {{- end }} + {{- if and .Values.cluster.images.portalManager.repository .Values.cluster.images.portalManager.tag }} + RELATED_IMAGE_PORTAL_MANAGER: "{{ .Values.cluster.images.portalManager.repository }}:{{ .Values.cluster.images.portalManager.tag }}" + {{- end }} + {{- if and .Values.cluster.images.upgradeGuard.repository .Values.cluster.images.upgradeGuard.tag }} + RELATED_IMAGE_UPGRADE_GUARD: "{{ .Values.cluster.images.upgradeGuard.repository }}:{{ .Values.cluster.images.upgradeGuard.tag }}" + {{- end }} +kind: ConfigMap +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos-related-images + namespace: {{ .Release.Namespace }} diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/namespaces.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/namespaces.yaml new file mode 100644 index 000000000..b0e6a9daf --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/namespaces.yaml @@ -0,0 +1,22 @@ +{{- if .Values.cluster.createNamespace }} + +# Don't want to attempt to create the ns if user has specificied the same ns +# for both the release and the StorageOS cluster. +# As otherwise it would fail & this could be confusing UX for them. +{{- if not (eq .Release.Namespace .Values.cluster.namespace) }} + +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.cluster.namespace }} + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + control-plane: storageos-operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + +{{- end }} + +{{- end }} diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/operator.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/operator.yaml new file mode 100644 index 000000000..b4b1ee5f3 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/operator.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "storageos.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + control-plane: storageos-operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + control-plane: storageos-operator + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + control-plane: storageos-operator + release: {{ .Release.Name }} + spec: + containers: + - args: + - --config=operator_config.yaml + command: + - /manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: storageos-related-images + image: "{{ .Values.operator.image.repository }}:{{ .Values.operator.image.tag }}" + imagePullPolicy: {{ .Values.operator.image.pullPolicy }} + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 250m + memory: 200Mi + requests: + cpu: 10m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /operator_config.yaml + name: storageos-operator + subPath: operator_config.yaml + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=10 + image: quay.io/brancz/kube-rbac-proxy:v0.10.0 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + securityContext: + runAsNonRoot: true + serviceAccountName: {{ template "storageos.serviceAccountName" . }} + terminationGracePeriodSeconds: 10 + volumes: + - configMap: + name: storageos-operator + name: storageos-operator diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/psp.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/psp.yaml new file mode 100644 index 000000000..5d7170aea --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/psp.yaml @@ -0,0 +1,29 @@ +{{- if .Values.podSecurityPolicy.enabled }} + +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "storageos.fullname" . }}-psp + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "storageos.name" . }} + chart: {{ template "storageos.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: +{{- if .Values.podSecurityPolicy.annotations }} +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + volumes: + - '*' + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + +{{- end }} diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/rbac.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/rbac.yaml new file mode 100644 index 000000000..5f38b50df --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/rbac.yaml @@ -0,0 +1,840 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- + +# Role for storageos operator +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: storageos:operator + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - configmaps/status + - endpoints + - endpoints/status + - events + - namespaces + - persistentvolumeclaims + - persistentvolumeclaims/status + - persistentvolumes + - pods/binding + - pods/status + - replicationcontrollers + - secrets + - serviceaccounts + - services + - services/finalizers + - services/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - api.storageos.com + resources: + - nodes + - volumes + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - api.storageos.com + resources: + - nodes/status + - volumes/status + verbs: + - get + - patch + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - delete + - get + - patch +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - csi.storage.k8s.io + resources: + - csidrivers + - csistoragecapacities + verbs: + - create + - delete + - list + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - bind + - create + - delete + - get + - patch +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + resources: + - securitycontextconstraints + verbs: + - create + - delete + - get + - update + - use +- apiGroups: + - storage.k8s.io + resources: + - csidrivers + - csinodeinfos + - csinodes + - csistoragecapacities + - storageclasses + - volumeattachments + - volumeattachments/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - storageos.com + resources: + - storageosclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - storageos.com + resources: + - storageosclusters/finalizers + verbs: + - update +- apiGroups: + - storageos.com + resources: + - storageosclusters/status + verbs: + - get + - patch + - update +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:operator:api-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - endpoints/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - node + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - services/status + verbs: + - get + - patch + - update +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - api.storageos.com + resources: + - nodes + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - api.storageos.com + resources: + - nodes/status + verbs: + - get + - patch + - update +- apiGroups: + - api.storageos.com + resources: + - volumes + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - api.storageos.com + resources: + - volumes/status + verbs: + - get + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - delete + - get + - list + - watch +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:operator:node-manager +rules: +- apiGroups: + - api.storageos.com + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - api.storageos.com + resources: + - volumes + verbs: + - get + - list + - watch +- apiGroups: + - storageos.com + resources: + - storageosclusters + verbs: + - get + - list + - watch +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:operator:portal-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - persistentvolumeclaims + - persistentvolumes + - pods + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - api.storageos.com + resources: + - nodes + - volumes + verbs: + - list + - watch +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + - volumeattachments + verbs: + - list + - watch +- apiGroups: + - storageos.com + resources: + - storageosclusters + verbs: + - list + - watch +- apiGroups: + - storageos.com + resources: + - storageosportals + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:operator:scheduler-extender +rules: +- apiGroups: + - events.k8s.io + resources: + - events + verbs: + - create + - patch +- apiGroups: + - scheduling.k8s.io + resources: + - priorityclasses + verbs: + - get + - list + - create + - update + - patch + - delete + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:proxy:operator +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + +--- +# Bind operator service account to storageos-operator role +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: storageos:operator + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +subjects: +- kind: ServiceAccount + name: {{ template "storageos.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: storageos:operator + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:operator:api-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: storageos:operator:api-manager +subjects: +- kind: ServiceAccount + name: storageos-operator + namespace: {{ .Release.Namespace }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:operator:node-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: storageos:operator:node-manager +subjects: +- kind: ServiceAccount + name: storageos-operator + namespace: storageos + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:operator:portal-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: storageos:operator:portal-manager +subjects: +- kind: ServiceAccount + name: storageos-operator + namespace: storageos + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:operator:scheduler-extender +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: storageos:operator:scheduler-extender +subjects: +- kind: ServiceAccount + name: storageos-operator + namespace: {{ .Release.Namespace }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos:proxy:operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: storageos:proxy:operator +subjects: +- kind: ServiceAccount + name: storageos-operator + namespace: {{ .Release.Namespace }} + +{{- if .Values.podSecurityPolicy.enabled }} +--- + +# ClusterRole for using pod security policy. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: storageos:psp-user + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "storageos.fullname" . }}-psp + +--- + +# Bind pod security policy cluster role to the operator service account. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: storageos:psp-user + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: storageos:psp-user +subjects: + - kind: ServiceAccount + name: {{ template "storageos.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + +{{- end }} diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/secrets.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/secrets.yaml new file mode 100644 index 000000000..f3a3aba50 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/secrets.yaml @@ -0,0 +1,19 @@ +{{- if .Values.cluster.create }} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.cluster.secretRefName }} + namespace: {{ .Values.cluster.namespace }} + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: "kubernetes.io/storageos" +data: + username: {{ include "validate-username" . | b64enc | quote }} + password: {{ include "validate-password" . | b64enc | quote }} + +{{- end }} diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/service-account.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/service-account.yaml new file mode 100644 index 000000000..cf8fe0e95 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/service-account.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "storageos.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end }} diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/services.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/services.yaml new file mode 100644 index 000000000..deb93e39c --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/services.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + control-plane: storageos-operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos-operator + namespace: {{ .Release.Namespace }} +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + control-plane: storageos-operator + +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos-operator-webhook + namespace: {{ .Release.Namespace }} +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + control-plane: storageos-operator diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/storageoscluster_cr.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/storageoscluster_cr.yaml new file mode 100644 index 000000000..182e79397 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/storageoscluster_cr.yaml @@ -0,0 +1,52 @@ +{{- if .Values.cluster.create }} + +apiVersion: storageos.com/v1 +kind: StorageOSCluster +metadata: + name: {{ .Values.cluster.name }} + namespace: {{ .Values.cluster.namespace }} +spec: + secretRefName: {{ .Values.cluster.secretRefName }} + disableTelemetry: {{ .Values.cluster.disableTelemetry }} + storageClassName: {{ .Values.cluster.storageClassName }} + + + {{- if .Values.k8sDistro }} + k8sDistro: {{ .Values.k8sDistro }} + {{- end }} + + {{- if .Values.cluster.sharedDir }} + sharedDir: {{ .Values.cluster.sharedDir }} + {{- end }} + + kvBackend: + address: {{ required "kv backend address must be set" .Values.cluster.kvBackend.address }} + backend: {{ .Values.cluster.kvBackend.backend }} + {{- if .Values.cluster.kvBackend.tlsSecretName }} + tlsEtcdSecretRefName: {{ .Values.cluster.kvBackend.tlsSecretName }} + {{- end }} + {{- if .Values.cluster.kvBackend.tlsSecretNamespace }} + tlsEtcdSecretRefNamespace: {{ .Values.cluster.kvBackend.tlsSecretNamespace }} + {{- end }} + + resources: +{{ toYaml .Values.cluster.resources | indent 4 }} + + {{- if .Values.cluster.nodeSelectorTerm.key }} + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.cluster.nodeSelectorTerm.key }} + operator: In + values: + - "{{ .Values.cluster.nodeSelectorTerm.value }}" + {{- end }} + + {{- if .Values.cluster.toleration.key }} + tolerations: + - key: {{ .Values.cluster.toleration.key }} + operator: "Equal" + value: {{ .Values.cluster.toleration.value }} + effect: "NoSchedule" + {{- end }} + +{{- end }} diff --git a/charts/ondat-operator/ondat-operator/0.5.400/templates/validating-webhook-configuration.yaml b/charts/ondat-operator/ondat-operator/0.5.400/templates/validating-webhook-configuration.yaml new file mode 100644 index 000000000..f2fedc232 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/templates/validating-webhook-configuration.yaml @@ -0,0 +1,31 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + labels: + app: {{ template "storageos.name" . }} + app.kubernetes.io/component: operator + chart: {{ template "storageos.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: storageos-operator-validating-webhook +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: storageos-operator-webhook + namespace: {{ .Release.Namespace }} + path: /validate-storageoscluster + failurePolicy: Fail + name: cluster-validator.storageos.com + rules: + - apiGroups: + - storageos.com + apiVersions: + - v1 + operations: + - CREATE + resources: + - storageosclusters + sideEffects: None diff --git a/charts/ondat-operator/ondat-operator/0.5.400/values.yaml b/charts/ondat-operator/ondat-operator/0.5.400/values.yaml new file mode 100644 index 000000000..4f4b38c98 --- /dev/null +++ b/charts/ondat-operator/ondat-operator/0.5.400/values.yaml @@ -0,0 +1,144 @@ +# Default values for storageos. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +name: ondat-operator + +k8sDistro: default + +serviceAccount: + create: true + name: storageos-operator + +podSecurityPolicy: + enabled: false + annotations: + {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + +# operator-specific configuation parameters. +operator: + image: + repository: storageos/operator + tag: v2.6.0 + pullPolicy: IfNotPresent + +# cluster-specific configuation parameters. +cluster: + # set create to true if the operator should auto-create the StorageOS cluster. + create: true + + # Name of the deployment. + name: storageos + + # Namespace to install the StorageOS cluster into. + # This is opposed to the namespace of the operator, which is refered to + # with .Release.Namespace + namespace: storageos + # Set to false if you'd like to use a pre-existing namespace + createNamespace: true + + # Name of the secret containing StorageOS API credentials. + secretRefName: storageos-api + + # Default admin account. + admin: + # Username to authenticate to the StorageOS API with. + username: storageos + + # Password to authenticate to the StorageOS API with. This must be at least + # 8 characters long. + password: + + # sharedDir should be set if running kubelet in a container. This should + # be the path shared into to kubelet container, typically: + # "/var/lib/kubelet/plugins/kubernetes.io~storageos". If not set, defaults + # will be used. + sharedDir: + + # Key-Value store backend. + kvBackend: + address: + backend: etcd + tlsSecretName: + tlsSecretNamespace: + + # Resource requests and limits for the node container + resources: {} +# requests: +# cpu: 1 +# memory: 2Gi +# limits: +# cpu: +# memory: + + # Node selector terms to install StorageOS on. + nodeSelectorTerm: + key: + value: + + # Pod toleration for the StorageOS pods. + toleration: + key: + value: + + # To disable anonymous usage reporting across the cluster, set to true. + # Defaults to false. To help improve the product, data such as API usage and + # StorageOS configuration information is collected. + disableTelemetry: false + + # The name of the StorageClass to be created + # Using a YAML anchor to allow deletion of the custom storageClass + storageClassName: storageos + + images: + apiManager: + repository: storageos/api-manager + tag: v1.2.5 + csiV1ExternalAttacherV3: + repository: quay.io/k8scsi/csi-attacher + tag: v3.1.0 + csiV1ExternalProvisioner: + repository: storageos/csi-provisioner + tag: v2.1.1-patched + csiV1ExternalResizer: + repository: quay.io/k8scsi/csi-resizer + tag: v1.1.0 + csiV1LivenessProbe: + repository: quay.io/k8scsi/livenessprobe + tag: v2.2.0 + csiV1NodeDriverRegistrar: + repository: quay.io/k8scsi/csi-node-driver-registrar + tag: v2.1.0 + init: + repository: storageos/init + tag: v2.1.1 + # nodeContainer is the StorageOS node image to use, available from the + # [Docker Hub](https://hub.docker.com/r/storageos/node/). + node: + repository: storageos/node + tag: v2.6.0 + nodeManager: + repository: storageos/node-manager + tag: v0.0.2 + portalManager: + repository: storageos/portal-manager + tag: v1.0.1 + upgradeGuard: + repository: storageos/upgrade-guard + tag: v0.0.2 + +# The following is used for cleaning up unmanaged cluster resources when +# auto-install is enabled. +cleanup: + images: + kubectl: + repository: bitnami/kubectl + tag: 1.18.2 diff --git a/index.yaml b/index.yaml index 63b757153..deee68c6e 100755 --- a/index.yaml +++ b/index.yaml @@ -2698,6 +2698,36 @@ entries: - assets/nutanix-csi-storage/nutanix-csi-storage-2.3.100.tgz version: 2.3.100 ondat-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Ondat Operator + catalog.cattle.io/release-name: ondat-operator + apiVersion: v2 + appVersion: v2.6.0 + created: "2022-02-24T15:13:07.677580962Z" + description: Cloud Native storage for containers + digest: edfbee79757a2403fab03bcb3f220a205ac31c95330045a215c9a49d2c03c65a + home: https://ondat.io + icon: https://docs.ondat.io/images/generic/Ondat_logo.svg + keywords: + - storage + - block-storage + - volume + - operator + kubeVersion: '>= 1.19' + maintainers: + - email: david@ondat.io + name: DavidMarchant + - email: richard.kovacs@ondat.io + name: mhmxs + - email: angelos.perivolaropoulos@ondat.io + name: aeroniero33 + name: ondat-operator + sources: + - https://github.com/ondat + urls: + - assets/ondat-operator/ondat-operator-0.5.400.tgz + version: 0.5.400 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Ondat Operator