From efa28b810db95e22136a15e8ffb1a157c36a0f03 Mon Sep 17 00:00:00 2001 From: nicholasSSUSE Date: Tue, 24 Sep 2024 16:10:45 -0300 Subject: [PATCH] release chart: longhorn - version: 104.2.0+up1.7.1 --- assets/longhorn/longhorn-104.2.0+up1.7.1.tgz | Bin 0 -> 31917 bytes charts/longhorn/104.2.0+up1.7.1/.helmignore | 21 + charts/longhorn/104.2.0+up1.7.1/Chart.yaml | 40 + charts/longhorn/104.2.0+up1.7.1/README.md | 50 + charts/longhorn/104.2.0+up1.7.1/app-readme.md | 27 + .../longhorn/104.2.0+up1.7.1/questions.yaml | 986 ++++++++++++++++++ .../104.2.0+up1.7.1/templates/NOTES.txt | 5 + .../104.2.0+up1.7.1/templates/_helpers.tpl | 66 ++ .../templates/clusterrole.yaml | 77 ++ .../templates/clusterrolebinding.yaml | 49 + .../templates/daemonset-sa.yaml | 175 ++++ .../templates/default-setting.yaml | 244 +++++ .../templates/deployment-driver.yaml | 132 +++ .../templates/deployment-ui.yaml | 182 ++++ .../104.2.0+up1.7.1/templates/ingress.yaml | 37 + ...king-image-data-source-network-policy.yaml | 27 + .../backing-image-manager-network-policy.yaml | 27 + .../instance-manager-networking.yaml | 27 + .../manager-network-policy.yaml | 35 + .../recovery-backend-network-policy.yaml | 17 + .../ui-frontend-network-policy.yaml | 46 + .../webhook-network-policy.yaml | 33 + .../templates/postupgrade-job.yaml | 56 + .../templates/preupgrade-job.yaml | 64 ++ .../templates/priorityclass.yaml | 9 + .../104.2.0+up1.7.1/templates/psp.yaml | 66 ++ .../templates/registry-secret.yaml | 13 + .../templates/serviceaccount.yaml | 40 + .../templates/servicemonitor.yaml | 40 + .../104.2.0+up1.7.1/templates/services.yaml | 47 + .../templates/storageclass.yaml | 56 + .../templates/tls-secrets.yaml | 16 + .../templates/uninstall-job.yaml | 57 + .../104.2.0+up1.7.1/templates/userroles.yaml | 53 + .../templates/validate-install-crd.yaml | 35 + .../templates/validate-psp-install.yaml | 7 + charts/longhorn/104.2.0+up1.7.1/values.yaml | 524 ++++++++++ index.yaml | 44 + release.yaml | 2 + 39 files changed, 3432 insertions(+) create mode 100644 assets/longhorn/longhorn-104.2.0+up1.7.1.tgz create mode 100644 charts/longhorn/104.2.0+up1.7.1/.helmignore create mode 100644 charts/longhorn/104.2.0+up1.7.1/Chart.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/README.md create mode 100644 charts/longhorn/104.2.0+up1.7.1/app-readme.md create mode 100644 charts/longhorn/104.2.0+up1.7.1/questions.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/NOTES.txt create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/_helpers.tpl create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/clusterrole.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/clusterrolebinding.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/daemonset-sa.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/default-setting.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/deployment-driver.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/deployment-ui.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/ingress.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/network-policies/backing-image-data-source-network-policy.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/network-policies/backing-image-manager-network-policy.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/network-policies/instance-manager-networking.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/network-policies/manager-network-policy.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/network-policies/recovery-backend-network-policy.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/network-policies/ui-frontend-network-policy.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/network-policies/webhook-network-policy.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/postupgrade-job.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/preupgrade-job.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/priorityclass.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/psp.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/registry-secret.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/serviceaccount.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/servicemonitor.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/services.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/storageclass.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/tls-secrets.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/uninstall-job.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/userroles.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/validate-install-crd.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/templates/validate-psp-install.yaml create mode 100644 charts/longhorn/104.2.0+up1.7.1/values.yaml diff --git a/assets/longhorn/longhorn-104.2.0+up1.7.1.tgz b/assets/longhorn/longhorn-104.2.0+up1.7.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..12d950b91485076957200b2902a5bd6f9aafaad2 GIT binary patch literal 31917 zcmV)QK(xOfiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0POwkb{jXcFbdD#msf#JXP(#|HT7ZHp5cDH*Q-c!qKR#BiE?sg zC$ns@8zd1m8$AG;661KUa~tRO&Xt^n!bhXO$ReO>Py2^2ULL$W+<*T3@K1aD&-M-v z|Ah9oj!Dgv35ofi_HNx)xpRMz2PX-WSW=dVeg~ltOB}OFFT_&Dq(|8<&Lj(HA|#IE zKCtVh*tJEkvJna9gbT`& zKKlBy$j%7 zFHI($E3&v|JQDp*kW)OcJ?aF0Z##kRuX;=7G*q9d2nFo~C>e{6;@=;jhG`-( zO$djE$PClCkG!}4e~-aB2o)!bU$9eR4#6@W5`fx@!0n0!B$?2Jv@H#yBMC_`!wH@c z-nO2YV!n2zHH&AoZAnRl6e|jkaCk+NNkC_Kl4G9FuD!$FL2vIbS*mBzmzMncaoEps zoDp#+3c&L9|K*DpFJ6?_|7QpL59|MZJde-`8RIOLNHYKkXeV$wkI?0m3N73JarE|0 zFlKy)r4sLB8WUCEgoH8X1kEw0coY+XBts*DQY-|C5KSaQ3zl&t$t;aANc~C7MmSaq z$0HO3=#s^RgAxMGCoCcY;W%d3BvR0LmLMhNgNURtTaXBaY?iWwBvK%eg;Rt@E)n1A znYWX_d4INpa1!M&PB@(tj!rZ<-s>PF9S)+8e)^|H?H!8<5+o*}WcdSLaR6EM-9s=AUy+&B#JqrJ{sd#5Wj`e=)Y0wm!UE=8seme#tw&^*tRKu zD9x(?T@lsYT>W99YrY}P`hC;|%`bmV#$yu7KI*<>!*EKXEGGF!mZ~sgMOQ>U0tUYT zIg1evLn1)(Iowkg0oE19*OS$%j*9pIDk4%DAqjvcx}6132k=qYnO5Lkf@m_~M2H@G ze4ZM))xCNqx;v=LugHPl_mWXF`CC=qG%II3v{_UESKQr~VGz1k3V^zPFw_olh7$WO-ai}V7 zseeUFXjSKyv|R3)F<8!2nU)>=Mpr>lr_>7iaY_=DB_bsuIDb4HClf}jMZUJVE=lSw ztOknQ4XPFH$kLQ?`AwEYG5Ma#_8g)sB3<)#tYVM|MgS|gqH;Bxv}j)Ub+5PAJ3LSd zr!e9PONkXOtNMC8Jac95jYmR31KVg56}F0)rqW^Wh2aQr4l0ws9f1aU@q49_hvqUe zZ4*69sXap}rO|3z#~38I6~#b(G&Yi}c(1fmhzAj15O9mZGAiB;%9@@Bz5RpUUPt#q z&|llJ-GwfCuVgdmEhE&n(6VBBf8WaK1gB!kWZArJAkQ!_ey=3*&|F5QZK9`1^|H6u zJAgMaos)zJG2m>p(i%|6*a}MWjmW)`%Qg$GE+f{K@pH585M~`qNg}3nY*v1TGdUe_ zcC%PTy7Qwnmtt2BUY|PFXm2)U$54(@7QQ3P}<@=&PJd!b}if+T=RA-e^3Iv4H7r z3c&9^l00+Xq}Mbi#_bE#W8<|Tz8052RB|H^Ey&r`SfCVhJR_2Du`bBBS7R|ywS{pq zTQL~Z5TlS~iCh;X{5BAd6@Wf^=GuSaAd!BaG%O&SnJRF|6iNAPtHN zWH$Yy0z8t9F(OpplJ!VsF1kj;tSxG^NVFvL36wCba- zn*Cjm%IMweA)4uFmFApQ%lh!;;>Z8XG$=X26)-{#KElWb1-Mr-{fo(RltVW0DAjxuJQ;I47Zu7w8(eMayCfKbL(y z^!W4!&r*fb$DclR6Fei`es_e!t1ML$k&16Q%hGONjmqbLe$=b)FKqON3Tbu|eP>%c zLS}*>Z0l+L3(;gF%6(G+QewN16Rg#_BvK}%WI$uX9Lt*2vn*jnBsGQc9Mf1Kvr}B; zl%UpuDSD|=je5%r&>>gRFw9ehF<@CCh*^Rm?&W{HtyjHPB#=gZGdrDC3&K;CB~wbRrGkfhLkkfj0wql9raaJ^4gw@`CN%31&Pe%sa zih3q4ac7K$tS4YgI^j&R8J0A3UNgkWjLmV3q6}~td-evWv*k;$r@PWg!njtm&9=Du z(`nyYlbx|Su?{YA0uAQ&%_XO^J~D7Ros^UJ>4f8mz(mvbWUIK9l<+a*Go@+@;nttuoyP^fsKA)qf;9W;M;F) zk+q0g|AF71L)*?2y|WX2MuDGpfV`lyThE}dAgTv7j6p>Hqm!bgvhmCibw zkt7o6ijcGd){M5If$4(@h6S*O7#*|21SGl2M#_jmoH1oNaNy&`hJHH(?1C|QLb;a3 z%?9c5@C+T9?V$}CM31}Ax=&QIc8{rOw61R4+jO~A``#YUx+oY&{H>Zd{{D5F_MF?d zYTomumr>{x!=myypwlEu8BL^KtmZeI{l7dis_j;(L*zuYE|L8~f@7rT= z*&Y4&`-@W$Y`Xd-<`d$R;T@hie@z}3j|2%hkq8UHLW-4Y;?@QY2uXm{D?vWyBqE8V zI35B7uccK&#)*9##|y)|o_ivRtPXAoE0nM^RMFzGVDM=>fM*Fz1=Ze#XH@<;r7`&d zV4o0}Ceg;YI@`c<0_aGvuCk=Ow8nTuV$@Y{d;ZZ44J0ri0tMm~SlQ??9V;V8i*>E$ zfL8v*uxCNiQQ@3JHClTGMMM;pwM#|yl?sHTNb{a&TPPB07$><3`9}4GL`nq_9_?%# zXo}^upgDkP0I`}=CZzVe7v~NYsJlDId^e_}Jg~jnE4sl3SWau&i+mqk4;@uJNK-6| zg%DNRHT-fuk%Ak>8d^*nT0+egpcS@3a*-ojZVVY8P?PU&DU%5uu?Ut*fpw+?$8x%` zcshR(;AFi{mkpeNMhlCJ7K{L6$j0(0k@RRhrpYqY3e3*QNW1+-jtfjaoa9Egrjo`0 zyh(cPH5HCB$;{q?*XDrmkR(!>IBe|B?YLmo5dj*7AdEPPv7~db%%_+~dUphcIK?51 zem!sP@9hCI8Q|x0!rfX&lSz(2BU&?>&{;O~dI}RMZ`jO8Qd80vLgXeSB+@J-nZ$8T zp`F~=8Y$(52(=7p?tp?>B590fj4N#&htR>9QqFQ%YVu`*G*RQ=xlfF@3SS%_+1f3L zp-n+(+|Gi8NpF~p5UH0rK*MB)_$Omx}Ec}QBcH3G^QkutY@ic zNY!)EGhaM6=vpVs?NMtw!cV1;bb>IjST4iXGeVCR$xLBSNW{?K}lwq$5TlL%pS&G=Q!uvXqYoCZID z9A}yI9G?Ct72vVaFC=VSBJ8B02aL=eDPuWI-lb%K|+>9Ww+-wrc$6087tue z)5o%zr-Ql)W!^RNhmyIuSj<1{b$3w8v@z?gRk=n9%?wx^pCyv;IgV?)Fr!H(Nx2Kk z3WQBmOkwtEos|>qXl%qMcRfVPU^9}yM@<9#MytG+Z<^Lj?Z<{SvY?W}J`%S07%c;I zqJmoOsk;>rpbaKQH?ba(yn|}U72?JMQdnp;*Vlf1Zg`7tV4=!oj>y337-zB9wrN2j z=G-!Xk9OyC*cK#FGilM33Wz{yYC-o-uOSYczZW!gwNw|r)8O9F8I?=BV|d_>x>A9@ zvKz0V##&vdHF-+dmUhkF;+qR1l`Y1SHsmouyhsP}dZ&B48S8cE|OU{PsIGd|I$ z)*70F#ov!*)C=-;}{fw2&jq*=bIXjJm#8~5|yx}586V)P-;4kLg={}enBK!5Sd3|eY6X#4cJM2 zf_>DkE$>Yq{V&sEfwD@`xFBP1y$(5r8L`wE-t1@4@dU)Ff!gef<6KG9rUC+77U;1W zTC;Ptqf5gcggHtUvLqp)UF4`*WOnRPVd0ToPTYKzv9766B6QroP zdS-D84H+CZnPfxV-;Mn;u!a*~Df10>%j}~C!uB9tOn8ZE5%{eIkxzhtWKMWZaEekA zj28)IiQRmQ%R^umYftMlmVkKDLr366w|a-R|2(wRT8_EQ9S@G^9yo~XAtDdu&~CJu znw@%k9bzL4T+J&=;6EM}8M zuXtEh)qVB9{~IE$Jm0XQz<+fuTBT95_Db&JQT1EiNfjfSvGW zGzVTdT|~z2a4gnRGiM`w<;1Zk5`@gcE8Pc-lto6ySv(}5x#-jNFfr9+hDnjTVYQNE zPu`!f>tw*96C)aIl-=#ngvLCtw2)=`7AAp`aNH@H`sfU%Rb1*u*0-vZXfbgZGn`~f zPTTWRQE6dwPDCp#w4JNUzBN_NHd&+vmrxVIih)h-T~4$*=(sL-TRT->lZPfk5*#VG zo=NYC5lz5|fQXMgzp(j!LDHBcRDgW`5mQ-X=vMW0$Nj!*G-QcV{YfIVi#DUNEV%c< zgYODmSOiqTXh67UmTZv51Scp< zeTq#;LbQqPd@!1-mJ78Mi_9{v3_PW&Mpkh>Rxq=42++Q@!0hJzmQb~r<;a4yP;n*w z!bT;$d~chUNj0cdC9LLD&&Ta0`YA|S8;Vm9w3>j`qz}%{X!dF42A_@mEYih={+NwX zizhQC<+U@q{mEwcQ;L%M$U@e6HOv-HgRoxZ=`A{|74g-$m;o$CPHF*YVusx;F)A%= zPHCGKYPiq_8#QePwqc!KOUH=Vb)xLid|@jgi6@vQxy3FdmYFt6;@C#$DtsXj4u!e= z>S`UOt5nQxmRu$5I_Z{sa$zKSLsyEMeBLId-q!<*|9J4;dDBHHj=;Dj^%fq8YxrpM zWZ0uup!d2i{;;njfo-Z4r?@xnf+N zW6EbxM#1wv(sV}DGHj3P$(r;*+5uyY_Mr<;*F0@&cK_OilHF%}T`NnP=D=UOI>VC! zZ&Z4NNISj42HYl*)yDEAZ;Hj#p2`50qKPCE?LF0Ts$Ptnn!0E9`--+B7y0RJY~;=9 zSW^4iYAOm_h^SENEuEz}1oOrO0h{<#HTxT}I-AW%M6uMn?%-^*E3(yl8chUaQ-iR| zo!mOE05nEN8aB4X%KMN@>K+_Um}NrhVU|r@;V`7P*5#f$B`KAEGR?Ug$j54HDuoS58q)d;Y>8jLOr4QNhGMfWNy*I+Q z#snyA)`l^}Yk78d&!m(4_F{0XkU1|P^M*}OOy(puSkNS*p|Yqzxv|H<1ep^%iDNba z&9HiQyIvBXC1chtgyjx!Lc{J3>b}Mj$Lfz$&KaDQ{}FRIKns=NAnhods6Q?_4)uZB zm`&a&gxksS{HT+$M-(}&FqsiKWnNgtasKTsd|GOdsAUvPT~2W1d47s+pDfAsJmcl@ z+Os8BJDApQ&3K%0R`B8;npc*+&yJk9GPthcZe+IEYe5A z5lQBhvt(v>d*=u1_SFWODHUIj>@=GYltTV>wQxVC33^MvdE%yh5&3ef-(?e`Dlan$ zXrzM$xdV(f~P| z6TN+s@stVTpKB9v639Tahf8ONN)o~IngF~U-PlE6R^ z2V{LnPBrv5bj`bgDp<2rj;+t}yYAfLe znzPj?>{u&_Y=if7%m8{U(&*}goX97(&~kD2X00{9iqAO46R}-j1Sgz~8W_+bDx9W}%70<1c?%wthsdXfq8b3}+ zqJtu|+txSoGY-kdV>Y3o8iR;PEv_O2#*UZN0{tta;Z=SD2()Mu>BO&Zw48j6g?x=^ z%;q`@ypwV|$C9`N>1*r_AXC>RUN>jB@}oPI6It&s-smmLa@FduALcu1mBuN5bLsSm7?sPE{U>hxXYn}vO85(xwPb?L$ZjGzAb(GFD%KEe^36WxlEDp{h>HJf-y z zZRz^y^X=%WgQS8hve@sVuKuBed%LCY2gUCPj(kV%YP05a>?S`Sc>468N8W{K%Shv0 zoVF7DmPwO0ZRz8^DG;ex@}Q>VK{uB`dON<~LPVI2`|U~ylOOlK1w`q#6)!OC-$GK@ z7>o9TA|Kps$ceV({gML1&4Bl3{rdC-<=mLLFIM!?L5mq<5`}JWw!A;P_nGqk?B8v2 zygyrKV(2m4#-vz6$uBw&%#|{&ljzv69D5fH#HjJ8W<`8|=C8G-J8@jBnTDdLkK1K0 zMqk^ON{4E5mkNxd7dU;ES1_Uc`mhW28iOPpa3d2*V||>OlG2dt+|$UYJxrfH9|PJ8$F<(r`r z7o}kWsSu0uFsueANSJYdugV1?)eJbAQ>^po9v(i^nwF}qtb39~mDUxizS-wS-nX@` z$U+~{)ECX_y`*B);N&`@}<%Os>QplQX&N~P>D*_pdPWgzn1(0Gn?YUfdUK$yn zLZby5Gkd!exN*R$^+2FcpVXj#{`{9upVaTqpJ5$=3>>@ViKA;k)<1N~+|{7qdb!42 zpVGpT>S)59n&dgKm8K+$5;4HvT0ELV&I-OxAXs0h;|s&DT`U(|-W!t%(_6Q2s?17C z97TOyt2p0eECL*=d14b~J<`QqgUb;J1!n`5?QTzrTgrLL`3_295+Xs280BPKd*f{0 zzMbqrGNH-M?kHv>dy#C!Lb27zBOW24@els#%ii)0gqBlb>8^WrO%Jb*T@xQT^|7z>dKYID3>2K zvlqi;TQYxp=)EAh*QL0gW`gKTB-wT1R}y-$1!-ikHfLEioPL=hby8-3;ecrYA@Ctfv}1T-dvgT4^Au6P*&*Oy25o3J~fw9}+?o^ii-@EmAr)DazQ zpXUzCXe!T{52BG zag-qAEH+7T6xy+iMG;73qX0(M#XuS5DFJv z#>zQ1omLyiy$&w9i0my7N7}RAuR5S|F~ZE`(VCI*#g{}fz%(0@{oFJ4mL+s`%2Sv` zo?jcI0cSHJrz8{hiUk8HSFvk7d*#bFYi@AW-HaltNuzCxT>D?$vWAQu1W0LS9d9*c zTpM!5o97lz9d?bdG~37?ND*?JnoSln4(6!Qlz4gfkUp~w!Q18{-9jv?4yCL>w-H|& zzHZAXuX&^wG9O%Em*2p#-Fmc#rhx8n$Tu?=Fe{m{@bLYA|L^}tsJ>vGfDOppLqnpa z?oS?i=f>_f%|$HCoSv8O)TgdOU=8(NMCL|6N@>8!m~fJWME%-*G}FUFlSwaRoG_iW zdA{GX85bcY#k6TInTB9$A~B5(2j(5M9*ngK!t8<7RJ}KwqBNNh0kKPeG>T~MyeQG7 z^XO52FOijDqEoo4sFi)MBPrD~4J(Md->1fgFI{kR!7{1kif<&^V1PUd(L~VaHwRsl z5zG)4n<>l{q`sHLA*0?@fD2z>-w`Yi00)mm<) zv7gx$B=8pJ%)ir9GygwKNt_ZcdNPf-(#DGU|Lo=di_-jme)ur|@8|jSY4<6b(^+5p z3202@A|V+b66)|uX}J^4hr(m;BV|5 zLp|`k_kMsxP*BoZZ|FB9d6tCe#WVPm&W71|Om9#($bpe0g1_p+s_SI$Nd;RatoysWe-_sB+5g+~y}hmnbaSM&f2xa0O@8{hi!(VDKX?1+ z)6d;M+dtGx^{0OI`Sa(lBX}t$u;<0rp~pGl`#|_rbp+un3+aG3^cxzzI3!8<#QlZ5 z9&4@-(C*Vt?$FZaw^b{JJ-eJlm`CV463?vV*Lk|@&&c91>oS_aI895wYVAb(^ajd0uxv6M0C>6#}t zm9GgWtj_V>?nVGeEOLp_txlklM9q6-yYM07acwu2O_8POx2fo%?cy$*iQFp>PNi47 zPdm0}I&wQt(p7Xg)=d&q=z}X%OOJ{#(iK`=qA*GYPSsw}WuvzK`d_2}8I7MS!*+pZ zx7Ptz=zlK`pT8{We=n3t_@MvY!-G@$gB5jrgu|X%2pOlp>a%=Tf7Q}(zjH;C$X&a8 z!D7;}-o$j4Nz4zu**yj{e}{nL?n^uP|aTO+4VaC!YPG|6~PM* zKgo?GiOw3YQPYPFbqWPtD$ImrGy4+eEjX7&2cbFPBU7yVv|DHa{nYLLvj7eK)J<6= z;Ib+>O{4yD_xnn7&Yi<#X80sYclM?ZC+cK^A$<4!!4Do+SpJ{?mbt6*@NiYh3Vz*D1U zvhdm8oQB`tIY1|t!yJd z)@K;Cra2S!2fOTLkk`=HMpBzJ8;!8dcME#QCNRs>k=DQTSThvVv12HxzW%&L$IUUG z-YOK${4vEVn7YGh)mI@)WU&Y3FSZ-2koCMojR<H{jLu+b5G(5EA(^3qwRBzci z0PaPp%`Ej;wD6)*(EqcsahFxiO4Py&52nsF+kH$t#(s`4H9_kd{_>e*8 z_c~N~Q7LGzfT+G{L}qOvsoSn)x6O%(uc~O$VWlf!*+b2ZE09rdwD4GG zp{K^+T8<`8snb{r&;jfYGOJt_zKus|)ygcBaMtjejHZlT6#|3x0OFWNONumh9Fz)z zB>^qKuE;uCyNY$A`dmd>>BoA8v6ze~s5_=uy=d$+k(_aX$rK~^t&Ls zkt2ax&67=tUhdMGG{5_0vBkKQR>$ApjHp|_zr+M-*-%FBye+%=`Un*)kpNFfB1>GU zlX1929C$M>{esFx&HC>gc7ecK*#KC+{_h{YsPX^5e2D+Jm&X@=I88+^=UhB|LA-4! zxgZuhO}&Y1{3z8KUi`4eOcE&xb-27aoU0>bmDy&(%8j|-govF2eH&Wqdaa)Id;VA7 zh1JxkdK5?qu(fNIf1Jy~D4p8Mt4j6{Jd2x}F1gbzmDFPnXA$lIme0tP5JfuUPHFqU zxIL`K;?pPB6{yfFhmiC*NtvKJc+Qn?r)1loaV~>w(fP!BNuu4wRz~mM4 zD+JmX8vqTw58aIiOap8^UZ%Jnu5JM}>-V)p+ai**nCY=Kw&i+tXoHXW4sZsz>mppJ zREMqcIchxb0BipmgLRRwt6NL)f{Jw)r1vj6{hrG9-`UMA!exrHx!-I_Ika~ywrzPINIalLv~ zrE;~_ex+8(5{uULbZOm#N?qr%URCv3r7B$OWCI}GS59z56C%Wbvk~#vsi~Cdx1_Li zxr6}o{at;B+^@y2b)80#dRAfZ-R0$A=)UOfj<*mw;1k{&M{^GbTxL1YZBLnyyAhf1 zR_dgjh07|b@6}qC9UE%SG2e~pD9;mHDaTogC+1#(Z)ilhK81wi#-@S^PeXGcxUNkZ zivUjM>^9Ab@`0V=daYZVumFzw1QF#84f#cOmrW0n%&TKGI6wLD?&$5Q2OJUryyk3H zWE+TRHXen_dyR`#gCSzAv8mzU==cuW(_wgZjh+p*(wg3s>!mF60NQrmou8a;kC007 z?jh@oLXG`P3wV(v+8KIh!imx3v|vr6E-`dGu3Pz*b%W z7X*-S%20s--W7BtLjUW3ArTXjBAVzMO1u9wAkB7r*?RJYq>^yJ0KTcAXSpXVYenUf zs=ml<6qrluH%ZUd?6_2zEor!(KM^FY>7x@6(u^<8xjxKMZ^O2c+&XH`$3VY%w^HKU zgZ>3Guw}rr$+_Y1bW>-T=Ogj)_xQLi$OdF-3+^=a8v1@zTS2Z-rPjy!owK$2^gW9UFkFyR{w_cfbYyXCf59ft1 zx@y+`t&_hA4%lzD<1PEqo^|}hdyS~n7Fv&a3JL!1hkFWGug^Y(>W=Pz-IRaU zC-hPdS|41MhnDxH@}iiK<=v{isw|DLBtyxuB$I_6bI#(JCX@H@$+E!=-@H$-zL7O1 zebn9G+xv63vWw_^=w^rMo;~QfExD!#edStG&5^9D6Y&;I`Y!~WlWJhfs=r_*_h(`AtjB{tba+H5Ea zEkbpS+DE?y{=`g}M2}Uk&IIbx1VTac7j?j}If8pa#TD#5ljt3bh;V(=rQ95fsJIGZT`+)X z?4^BL+Qv3JV}74tiIqL?(06!ai34rfFG05iWt#|^(=|gjmPd)CM`P`vafq7lfnZ}9 z;6%~@n<7gwZUB1+m>*@5eWNoxSCGb;WPwTl+y-3_);2LU2f5DYgbz5IQ?*#oWRl-f z?hrR6190%9UfCdZg_m|@Wm9+1ueUUzGaMgTt$sA=hi=z4J^}} z6xXY}(%jbeH9a_`HehjVvy_&UJop}@ERKUb1L;cjt^#$Ni0jOy`o_M?%Qp^T{XVeD zsa%d|3CecYTj;-5{Cs^-c3PV4FsV*MG`cmJey;|AcopkT-Z1fi2To9?u5SG;RNtpsOd=qe6rd>ZufbRtP z*TBLp#>LExO~<6ZovrE-ez22nuC`01a4plX_FY=h&hjoXjvlYqEs1F&^IMgkJVB4w zap=2l1Dqh=`hY%uWh$K%8oewwiTbbf)fKS${Z-VeY*5}HxtpDcA~I!Sd01#X+V%I< zmfQAI9u;VEJ5H$fZu!u>Y72QK>R**B*imms-xYWQoBhIQg1Z0NMUUYUm{3+4&HKNt zsisfeYp;KF>@vdoccgDT{WR>rCWFiK&4T|V5cv&VO|#NRW9gKvE%;^TV+)FwxVTN{ zH=Cd9;j*PU+lHl9&Etl?Z_w20iJjE(PKzN~hH}$4Bz@2&yAnb0qn`-bz%+r~-XU_M~ck5iHqz0L0va+8u!k|D(yS<$^p(R=|=VG(|yxFE`1T zw{r(>+W%$=S?Of{l_g|DPjemoJLtv<6;~^}0hGbo%62_#N<_THNrvNiaU`|C(BeZV zNn$6O=V4Q2lp+RI0&MCS=_Rx%vy*#;tF2|@cQ7?BNE(xb3P|zvBc`(Ad$aR{`z3>G zOl3PS+^UAQx(msYFyovg)?a)+Muw^46bw0;Nc>jlv}W3UI{?RwZ{m7DK+Q(?vSM?K7~`jLD+ zdcY!aRyYq!TS!AX`5f!yDkbo0^BB>O8WwV^I+F6s+xa{4H+ zxdB&dvdde$qrUlGd_B3BcdzK2HiBi{E;cuMHlFbV1;B&7wu0xGB0}r|MeTX0Ik0<~Wgr zL}$fA`p1L!u19&HYIcDz&6aj%Sp(bbNPe$`@uMk*7a@)bKP~w*^P+)Ep5+P$?!vt# zom$=4HoK;izaCje0;ozOTWgtlcJuJe7p>}c&~DTaRit>Sf1hu1-S zybkUjukW%6N%5rQc_<wdt~St=L)CcXB{l^lOWl`6)k7-!&WM_)q= zcheKt$6MVKy;HbRPpU0!y(h;kR`6*uv9a6l1)h+%`cjNiheB644;9Y@9Qx4ZSNCgq zdv~$VZWOmc#wRtK>sH}S)#q0G={~%>n&aQL@#~bP{B7JkZQH&JgXAr~DW3XpV=}`x zEslJY;oQ}jyeTVKCHlA1x9T9?A;zy`JQ2$<9;*WDUG zn{8vp+ie^y0e@E;J_h`cQDUxlu3grFtfKs-&;0vOZT<&)C1ap3{kv@jh*kOD5BK)U`5z7s zpFQM%xQ}N^{&&X)OS3=#hXk5uHp$w)aTW-h7RFw%*hl;8Cx5S^e~(GvtBn`av(tpi z<4TJ5JY5ZJ+%={m|J~$j_wz1(Ev8@Zps#~a{R_z)&`;>A$6>}}6wJ^*3Iqz6PK}sk zag2g%)cy0Psg&u5kVWKk7xLfscX!L#j{1LlzPGnK-+zMs;}z;2?Co{YKmQX&3_*5B zMJk$1{?jCKJWyW}^0m$5SWSdqlVY%LxPE7Z-ZWEZO_KYiY3@sj?jaXFki`(GSg;|j z=XKK*cX ze0=`?-Q~7hu4Z=gs(dOY;|c|Z4e^-B3&vzAoo#xJ;(Ji~WKgW-4QmzP^7Q3I|`WU69(J5t4TPTWln%ZXJi zx|CdR&VD$3cRCy@arVvWmMPYVaudz!5N}I0e_gdpM_rbiDh=FPcX2v+b9Q|6p+V)S zEZ?=wRI*yHgSaQl+u`o7LhGX6)Yz?hV5CDk?5!+Y7xTJCZiTfsS=(XmizA}RE)Q`@yaK!$v+F5E6bel3hU%8zf_dPt9dcj@tK zNW5`&{2EwaC^^2tLF?9NP{B}d}DAiR(E>LqRS`qprB z&b?nJ^QHG+zVx$v|1YE4xBz^}Ih$irHv)hLw2U4Ot>N9qkGlkr3;x=FTIerz^q; zYFMd-j9`M5h9n~61W}2uX&j?W5G1DrX@t5^uj@ksF-B0xl0;twh$Mp=x`{c(2tO|UAbS|-phgGbc%~C0H2ac5E3p;g8iU^1l%iTXD( zaHMIl)mQYJ(<2TKpILJFHMv}ZhlZ#i4&#adiSeV!x7pn(<@3A zR}ADS-QL>lP~I@4A#V>kYW38%S8)r;37)-jF%BV) z6(jcly64@gt%}N6z)JUOm&kBe#$wmF;?&>0kjqNxE3&9Bd_@+8!XaZOeP3y*zlG^vuN>r4cDdEwq%+)NraEdUxzI9(CZj9oWy^r0WtWVjK1wDuxd}oX zPJJqtum6VcyHK`#eRvK7uCiJN1Patc>7zO30ne7*LC!Pp>hVgjpUN(dg zy*YpP?RV!F?>@XedUy2g=|%DW>wL3sS-etr8Qk}09|q?awVNPag>|>yZ*!;XQvGac zeYNk}bBySNNxY9f<(#TfsWN1n;y&*!Enj4_Ki*Et`?K5aqEzbN(GJS{vxnW2JKQ}f zk$2;rlM|w{`u6InzG~Rr-qzvg6J0(F7lss1@x7OXcH5?tuzg!Qk8%k+k ziRJzj?J8?S8FoDnP5Z$hoq%2FiPL19Hx zL$GZwa5AwMLN0SL*6!yZ-bj@XF1r6|fJR?|@Jvk1Q?Vbiw=zYU)v?1**8~e4m>guxA z>K4tbuKvlDBt>6ck;M-B3ao9^e^o&Mmv7rgaIMP~0dEcbt?QPIrcc!?^c%Wp{&zN- zhERT6bnYyzTIeg!VqzpcxEJfds1Fuz@L^c<5zA@8q2ewZwmM?BTLG4Omkj4%F0BIL zYM#x~n9N9`i91Y5NXIm+hj-~z4EtEuO2K7f)vc22zgfxhA6WP*n+i!b8xal*V#Dys z*f56Kq1ALYWD8c2+xMN+6qT{ay#n=HfnQpNJv^H~HS50-@;cu-C1SUjHyF}vXy18I zD7t9U7*?$R`v)&e=l>60?jPY39WjBwPi$L>IDP(`HyovLEuAF*B!PD9tqnUb~#xk$7- z2Jer_!tMQ{4IaT2bS|A#M1^8fI~%jXaB|3033m;W{5>HbB34H6Gh|3T{iANaJ9|Lbe~ z#b=fLfBvF${_Eh`i{s4IJTZx{EF;%KU*cl zq1{>)DTXB#k_GoIZULN9al&aWJ*O1+nu&SrQy3}o`v=)i4o{2vWg*8;kF&Wb<7k}&>o=U4H_v%*npcuDY zU+ni6Kh8tE3{fjhuRv@2Bt7`ONYy{iQzQRVCS;aQIF1N-Q0}A z>us7Yw`p^m4<|}IN!#)BRq=EBw6rHiNsvN~tP2FY(X%7b9>d&o6$ zcey6&XuNU4i8^3kDC>kpJk>3?YElGiDA;UAqnt-<@uhFgT3r@CcUhKfH zF{UwzYD|qnbN^PBMv0L3YG_p2{=?fDjTa<W%A^aAuudMVlLzbMk6@iV?6drvJhk?p z_Ns+3-cA;gNwoJZv+w=B1ZLg*7wPtL|NW^&{O|LZrSrf0hcEUX{6F{d)Lz&(7!Jx2KiaLb zJKrA>iJeqn(a_kwt0PlVk>1`bQe#i!IA+%a`~FmfI5t;PnF5^rE2Er5C!D3nIK?9x zQ%P;m$-C3b4;M#2T0kVK78(9y_~Gc}?b*AIwn9xXjuiy@<=No4_+oH&lB3{mN(5Dk zXEEuc3-$6SSwObbH#Es^8orLjx142ZQ{61JY3yPsCmITRMmAhbFs75YIE7ykyH2h# zkB$as9Y0k){Ll&FIwtz&7Z9c{%bc>vD}hXWgu@=rikbz-+U?hDq*}adf=f{RhDk>8`G6gb0yjsejHy;XfVngD6Lz9Rm zlT|bvnPT@yMivv}2C{@)(y5NUeeOaJb`u+jY@|33#;1^*`^Y&&l2sm)H9NARA!CNE z5BGOvJJqcJ)?sBX<+=0tue}!s<@~=d_n$rF|GSrGg+CN_N=PvQ=vwa6h6^EKMX8Fe zHQlK9wI`!<&lk@PL@c}_TrZ=)2$pngPkQAqrHu_etE#df|MNb_zh?d$!D-~?-S7V% zJbPBQ|6lGsod3I@XW50VcEwwwV%&O>y@g()DqK@JE$8(&Xjk)Q*Jk{-4&bHt8LYb6 zwgshXZ{z#7Bv#v7!CKx3W{M?DZi;)!kzJH;qIj{dz6+C+2(FHYYfVWK615!7Fkkf1 z*Pps-=)3*y`B5gP7pCNeE!o}a=A6>)cjx4l6FCg9rtvP3damij6gvQAMHhVFN@S7V`Uen%DpBT>!Rn{eNDz|DV4&c<}$-%d^ao zxSM+}8_)gUQEq_h#gfH^?6!FUO4k_HmMR?kN2O~G{TkK1wEG#9rnCWflUZP$Az44G zKxshg3Ken&l*hxX@TG>MAvfNgWY1e?FxJhSR~m|%GKI8w<&l8$uJD%@mH(Dc-TE&X z#}l%(@V}M$A6`DMod0^r|9MYOt!z7X!6D0hh#s5VqX*GqQ{mD+`V9n#5zqSbvB3^v z5%NGs{n3l7Kr0VTSsS-(MuS=B<5Eh;CIS;E5Wwf+9;ZS`NN_NlQng3fQ_kj8p-y-o z>Ab>zo@Cv>IA+&BK#0%jO^OrYhd?Q#FpM#s*-*u90~Up1=<{cNb*YXVG7%?lSw#Bi zY&Ihi#Zp60F~>6^y>nms*2?qo1xaHXV$s~B-*Hp=m}QA{ttir`qIeQM}c2Ec6|rD=>A{x-D}b)WCRqH5LO!G|vw-bMIE7P^i8M z8Z$cKSmIIabw7XC+qocoTr8RY!S-eH8)4LH3kvROO0Hp1H0YM&LP zBzklj_`kD}bre+MgGU~`JWfhUp(0V_wsBzKa0-7QvF;>&(fTa#pP_N z38J&C`EQnx#qSro*K{o9mjCd{Sk_CdQtf_#f?&*exFQGSoAY*P8zA!BoN` z|HJ!WuW~YKYwf$n#>Eu#Le0VpuNC9U&(SQrXoT;V@|&r9-_V+39y4f5K9wqGX)VH~ z*GtjPQXORCRW>gwZF!7Tcu^_n|Jm5M+mHB#jSPo*FFcrxr&FBxfBHOV9n*y$&5WSW z`skxRx=!JZ4;lR1UWW=VDg~8I!Z@jU(};{FAC?k_(-}#mH$}adw)#I^J_R-9pQ?$z zLAHdpGK)l1ZYZ);)dz|7Ah8}K)`P^lM~OA3!u{o*kC+zaH%EJ>36yKhGoM zH=RiII3@{t95Z&6rKr11P3fYpr=oOGx2vyr--GkvkHCZZ)tF^Tg!->gL__I)5JcLK zzJ_5_OYVMC8|&EBxfM5rWBjtW1F~-IgIyhyd>a`12F9lKE@92xw6+c>U%&P#C}A6R zSXW-Z-kH(bHXiGDEZSg<&p~%szqJc5Hg%KQpf;~xai|sQ4JsKjm`HmD9d(`J*%*1pgh*ZS4<{5f(;*ZNd%nt>)D&(BrcI~trBBU5-K=@?D1_?|3m zJC7wBO2v4n_r#R@|CLZF6j3Ik2ahCC<1dZXS08rJSLB8YDU|p(FbfzxCjW9pplbz8 zY{FjDy|8~BUz`YpIYDqVGnw?z0K$Bb(NQ3wcgQ#=B4uz)MO8uJ6mw}pjcif%O-`xM zAA7KRt?Zuv<4Z5uTYZ-4f2l~V4tSUP-`?}*FDv@rvxofO_ww9@{`YA~PVq-xI=9D- z@~ih97N}dyIlf!THr{>WAdWGOQTK96%=uvyYWz^dh=9i2jR6%f=xA`}Tw6}Cn9zDO z7!CxYi6=6wJV`kV2~o19*DXqk=D)x6a918T|9evs&*&s!oNT*|mGgi9d1?Ru*}>t1 z{dX_VBQ(HL5}pVo8PemAuBRkHqm0I|kfu1i!V@BTok!?W%>v`w5=cx*9HU9hMmm`j zO(r{#en`?efy>#v_c)0-0{BABIW^>pVioY&K&F`r&wpBFaUlH=%MD{?+JrdZSOeeeQANy0xlieJU zcD7`k2S)BbLOAo(BFS<_S$E3bccxmg(ecX+;ZMNHO6U zL_k?85- z2Z##vQ^Fu9m~e{{iT?R`DrGAAySw&a!zgAG_z@fgBNDP15kZ!?3cF8wozA02NPDo* zF^fp2ay>_U;0fk_h1Cqx=vH;#nf;O!4t7#gZ!R9CQZ{>&GXMd>_sCd;8D+vXfVi z)T?K`mw(woM25X5ijTc||Mg0JKiF4%^%h?d1b2>o9APp2xPv~18IRTfW12*>M5;e0 zoTTc1e0}v11j@%zd_|*=Xo>|IkzC-ANXbVfIbrWv&DGNFW5!nLDdzDSSArp+A{4X^ zC@54~>_VC7BZ@Fl`V19{QcA=k)~;dnnsM~)_oq9MacMRqNkk&_ev~CLQ`2LT5h2m} z&~UDxYBE!4sVj=O;#kS3g_eEqotTx(Z(33N`CmGnp~PImq(~~CchFA*79ne`p@F$& zCl|z(D;dOY$dZtxQtaBhd3IA41@?zIf42KX+k-$-ecU_J>r9D4M}xCor*ou4Ehi$2 z#Pg#OYU`%10CDYE-9o<#K3~_8Y9PUFL378pvLM3_gy$V|&+z>1yDWqzfl!yblcL_nJjLmI*LZ}B4MP!gfX*Hao! z)jWv+%?L}S@d^242VJX)A~}n)kSINpT-)*7L2y)8=@>u)J_9s@af~ND#n>>`>X%}O zr%!?9o<7|vQa3B~0sX>8nia>G#yvDtg2sW73_X3SWX{v4pw`DRyKaKiL;qkI3UQ)^ zm4lH9MGDK3samFJQcy}5M;{e;g)&A#f=aduO0f*5C`i%QpSo~< z9KAo=K?$DIi4wSyp`$bvFbsyBh~oR)Lj0)ckrdwrA&)+)Q9E-S3{WH<^*RtnIk1wD zpr2xzT;*b8nR59gpXD@BYp7J-)yi6BKwiBEn^t)4+-%sGSL?o$}^@~*tHLa?X3*k0_A^icI}r|a~__C$1R5c%7_5jv9)Vo+_O~w-#^@Y zxmV8rcK8tgbx)7dbtjBdeg4mAtQ{}6T1ma@I6hJ3=a^HSFj}YNG7=PazBklFuS@b^8abGVQ z9fT(O68dhwap~Ids+g-P%6(dr*bGiyxlVE|fOltxyJh_v8 zT^bB-*&)5VeY>$WJLKyPt=Xerivp(3T@0+D!!5`4)}7kYsAjZrMB5E%M%Nt9_h;+% z`u*9R3}Z%bJA@l|WJ^Owi^q9xH!~~YBkK)nCC+6n%2aCK#ekQ|b<5GelOAu0GlX{% z>2_>U4Bc6eCyGHwYmui|^)9%fK%!f+!j?O_B_m7 zWr;{hNXL{!JKlz-AVq%zY&W*0X??4A!9;Z=-i()if0WvmDDn>{eoy5_ELgmZKe(cD zT~TB_=o*a;r)m+#_f+1EU@8-=%20c~z24zLYw2{CeZL)-go5^T!i^1OQz&R)0kmU@ zyHSTUvDAeO#Sw(ccClX3-KJ?bdy`> zGE2NV#e9Fim1z6oDzl3r6x_v}dMaUgMRf{_CYdIzJ!hSPL`75uoHUTS)b=cc~zJjZ6+5H|G0@p3j`SD=){QDCZ zyLi~!M~tHwE19}EmCBq}C67;>rhA*-^ElsLALv5Jenjst-c)z$FWw+FM%aRMrPrHo zCMMWb?s}W^77vSU!MjqTRUTN-uEXXg%H}@nAT$Vns`T9p4K&3d^sX3Tn`UurLKkfG zYL!&YtOO_2?KOCc(&frFOb$_j%w;_KD9kJq(%liKXkDoZH*J~-La(29PJA7d?MS~W z7{s(#M_di5hNPxHy>3lJNUN?HemK?*nET8is;{Y5Q>=-(X@6NfN#k@BM&WGY&P>#5 zq^ce=HOw?CO>VjTh@M$f#*_<*B%RsCgaK?F!K!CJ z<5!4|3o*V^4AJrVX+C%E!Q)#GI~yTYowzH4qzvO$beB4S%eY%_emwsp>Q{p@;w&Yp_akGe(sIDWY9hu zIxjBrJ``zknnWq1R=bc)X&cJ;RlPJv#w6pUhaR7&dQm_>ci+7p{@mR`KX;GM>|aMe znm@ih9+)@({r=+g=kAkkeIM%J@16b)6#bdZu_NlWc~SmKVnhvq_7veR&MsM*AonWv} zmPJbyMJz$4ILgc4hN+yvXhm_Pn}Ph2CLNNSk${-K8K~#y;MPC_10ql$UV-U~9zz0K z#Ng5Cs^VE zmQ#N~-AH1Ik^5L2JzEA+f-2rt%Z4Hjq$Gi3rc)-Q=Jew7Nd@Ze&N1JO=_o(C+ufx9 zwPtP&?Dn*H!$KTWxoD(Ep+G%NOjZ_Mu7{4w$vxn#YoQR_+^l~&eZj0jriw|e&WzJX zEmFWySTEfviyU+5gXVeNO$bYpYz9AB>fKTvpsr>7u6;2gLIz|!W?Xhl1L2olMt;-d zAsfr1MAD=2m?otmu&NyFT)!c65+hX{sq#UgLUBm+!eumGy6*HeOH`Tw!^ z?ayu8NdDiS^H-o)_1#!|B17BBCgr+Zl`X%t>j%q{FTw=6}CJ zcjGlPd`Yx+uPXT^rUrun8jVK3@ag$Yyva}02SR(H{p`jM{SYrA-Sd{E!98ti{ft*! z6{3$s?lDWB=M;GXuEt!T<)YSz;W@r;BR$}dE~PE>r7)soTrqyhWd)sqp#7p;Rter> zv;D9hU5|TzDNL{drJ7BIDTJ!=&RbLg2>rpeW@&TQYTt5*g@=x-JcI=7)`BZ{^1IUy zAua;ca;@2%n-ZZMS;3dQ0AbQAP9KiPx6wN@Wuc`XJREP*4`?E9rIHI?J@w46o^;fj zh34x?cZ|Dm;ofQ%3CF?F>Z!uSbse_az@j+N&E=3us`cj)jhwEDt|4ynvmNY(sVFo>j0!iN&^1}P=jm}kY3cUMu`MS4CKz2B>Uq{n}FaJj^?7c z$X)&We%z9krxPrccDu;y+H_9{LcF+jm!SbrczX;58I`nZ7ZZYc$x0Z-ied! zQJcHBqrCs-N$pgGbl`pK5OeUMc{Wk@4x_QF)+Tv3rOw&FCcj-DX9M7GYvoL#z?l4m zut$@hjx3p!MugP_<6A{_uceT$_=G#1d1bW+>2S(6$N3&ItC<9Kd#SAjscvct96Qx4Ww)$9eg2CmRvB1lv81qtb+{cT_1aghp11Q< zf>&{(tLj+Q!d&vI>!pJx%(Y=zqn3Xz>|ZMitx8($Fr6!o=r9n(1Ig((ax#!0OX+W` zci?z@HV8fdkv~zDX(j5*`INs9XS>zLA%G z+c1)LAytEjX(?$hgjl*AfZy|e&sb+?C6$7GL2#IOJbfvGJ^c3DO%$VwyXgE({irs# zsE}F6N1C}$dpueL-@fI`3=l;`FtX_B?9IH#sjsxv3sqPGVZqAApm2z5;6wwUVBUh7 zcd|)olqU@Y5~fY%sPkI70o}Zj3t8vBLalz$=Pb4cVlrya;tOqMGf_&ShB?<)Y{AtH zv#~!Si8j##>%KULFSwVY$JGdck}pT$`>8_epZYj{;OhMwTiX&DxAQgxrj5T1af8P? zsz$WKBMTBy0bVhoDxPX7A-ghQFm@7v=zr7OJ2E&{^$K0x)%$&08vj|Jzvb5-L@gA6 zH7CUNgbEKCiA;B#Wa6QRg}YSljfS)cdIwJ1#g%)ruI@x}Yz*lu(uyt-_8OP+@F(k2f# zC)ceGpHM%+%mH^Euc~MSp#y}$_-@z(09L)`i>0$3ul%M~qK3?-LMtT-e~?S3t(j(7 zi}D~rH7!Lg%t9*ooGj)!Olo5T`YZrP`&hdbxnY+0v9~cz_jdYiW12b|KL;%Ai7uVw z%OkKKl?TkMZhDs&mIgaupS)2p(IpbxsifWTEbH`om#~^g@{Tr18c9I#1fVZZF>iD3 zz#=79)CVwbaVbn{1d+Pq6h1$NCYeIJGt_Jnqla89_F%-GPN4uK>5zmS8HeSxAb_{@ z5s|=LX%!rfvW14_bS+=Zm#};nx>1gV_w43CZTKXFD&DIjZAz^$;+mP-RLC;6W$zX9 ztyyTkMqm({z3pOffwr0iOkzYt0Ta+i5{iNs?q6fZCIgY(oyQ z&oG>4H;2K@cB}YL;fd$cUTnh?kjZezU?b)d+%eb)l)6`G1P8~9#j?I}On4$H$goA8 z&W$~%c3vGbgdj9u85n!LKC;|+$KZ;=VWCcas z!~3NOT!+_JYNE^MbR=W><&+#woAFN>Rc8hp@g3DabUpM1O|dDEq!d%oA}PG*dWsqi zzXC@n_Tw^)wD0M;uIE_~T$R1ABw_JvBpVmkaM`n@gdV`9U(kImA1p$W#AWNYsKgs< ziAHH|Z6)`w7UmUHy0{olwHb;_2?HLidpMNSp|6`abfX^9XghCh(Ga%3Yeg)cTl@(}v#?C{c7-w{g9ina6%0>6-wgZa6Kxmr0UH6n3#FM~W>&yzfg1|ef!&Du5bHT`8*8a}xV?-$ zejc$;Pd~gne)sALd;XridwJ>IFM}eSE@BUYwpDzk9_#9iP5tPft&u9=(3? zoc;9v&BwPdPWB(pD~trUz?NFH(~^af7EI?)T-JS&J+De#z7hm>A&@yE4sYq6OS?2 z;!Hf>7Sp*)0lo<@gZAw{{L@CqZJ|}s7$cO=kN6;<%Ktw%s2`i zK9|#}fT@DSGord7qs%X&^~HWlQE|BaU%9z^zmlgEPNIZLiWCcqf6BALgPwv$u*ulo z%b>tp_CmUYJr21WJsG2y%j+5-Ec4Xiso-^E1iJ!1!Id@_BlfHzv4^oP_W<1D&QOqx zB}e<*{fcv>JwV(LOGr_0aqrB%DWfee0BR8!!=@PP7W_>P7z%y+*z7I1JQ!N5nLJhv zi@E-oLF7ZUYog;x@HYKi{{EK?m~UDM`tApT39pc0l_$H!or+CZ+GdTaM?G8GQ}l9y zFMIc`5$88O4oeKH8-j%8RmHB1tVLQg3CfFkhYHXi;~+@bODe0GU-w%Y*Fh~~dF`_S zwj9m{FNfFH*CV-@oei+u`-5Prz)vebhnNQb8S0zkrSVFzD{e7MOp5XV>bQY9PxHqu zSjU8;n3)D8MK8sA%!yiWna6goegf3(Pys3@y0idnGHI%+Yg?Y69!QPBdw?)0uK8+n z6Wah{2NqQ8Q=gZ*v{5r_Bbw(LrNv_G@nYtP9nHC#p})nEQ+7V-G()dm!g6p3SAb`) zI}#tQ*sG^OPMv)}TpD48iiz@QE{2~FqG_9r-B&ky!>9@^VVr!D_53;MLzLbv+c#;@ zIYyG%zlbKkf0cFK-5FFwm7P0?KCf=hFTJ#gB_FAjqI(Ng3%0DD2K1Ku4=NtHcbg^d zhF3xfC?V;uAg+!j(tCazg*RFD>RkfKq3m12*0b=LkQ;)m-%=__TXF_gCF*@z<~!0g z&G;HQ zG&GdlN!~Be8Lj+GQws$uQ$-DVwG#-HA@tmv)1{G6(*gm+@T)BQ9E;!Yj!l;RrWDgWd)Ia9zCrteMn@z=Ghu8^p-OT18iESkz4ncqQ!@^)klJ=Hf z=+;#!Y`tP~R=4(l!BwtM=!$HQ7GMXYJz=@n$PUEEBleWcSvLWAI|bX#(#NQ_^n^pRpSHYU~ugIAKhYEUGF`_diD_T?~I~#QO_XlKPCxuj44Th$2 z{OVJ($?{|+lxF22%DP_a`bFptT4N?Hp?;ve&@827Al&**rP3hVi!5WQJa(SB!>${O;+D?J10&{oM- zfk_;zsUD<=MS}@fvKSn)!AoBA%KhVoG1|C)eBwsA|HDg%Z17y1H#7H-Q^N}}IM^2o z1@ymV&L8Rdlg7;Y6aBm)5L>Jtf0ZMLjVDalABtBgMm~a9xW;vJY;*H^12Df^tvO=WOr7O zEqEmi-WM*v$}kQouZ|kC*qW{vm+Zld>!mPq;dGPgU_AsYUI7-V{W&L>hsUVQ;6v7q zz=_mM#~6k0a0xjDe*0prpH-?8^!u)I%;3wqR)!Ms0G&aDQy2!}Fqm ztYu!YMiEkPh|Fbpy?5jGZ4L``2$4`dl?$Pp1n`{x5K!aDMsyN^Ug^xY{Yu?a;2CWJ zpMmbN?>T8({>`(&4`%~tdp3AHJ{x!k#7evL2l58&Ks8iYBgKALPc476JIQ_&s} zgEo90NanP`nwx4Qo1qYZ+Z1ShJMl=GTU03K+~Usl@p#N;sJ<|NByQG2$Z`?ijVL0p0;@JO3@YGjl4^1;F{x&(-xMz zlGnq+Xhn>~fUX1>(Y=caVm-PJ5OM}wsW~?%_g0fN+yifq*_i$DoA22l*&o*v4K+ef z6#E=*j71~j)X2qGNdlT{97t#xHfOOtqZc;IftY#_U#^W@gcO{P?zZ6OBK4HS_?Rl0 zbmxsKAp66*!`-X8U=P=NX?;L4ff9ujOgE-QQ`K@=30BNyRT^<$r_z+$R<2pca(T&Z zH@z&j+N=z_Z=_a4nrar!6Xelmv5F$+YZRZ^&WrFNjF?DgsCgkdu`3BN9`6Xtqh zC&FAps!l7XLglA=S7bP%LGK0iP9LS<7AC~FFM!v3XX~u306ZSA=gdm0*{cteBRBW= zDKXr)*zCjPh($W>7D@4ZNv>RqV3S(d6Xniu5{e2z0%Ky*omHVfeLLPj42%pP>BZ6r zYo%6iMLpNaCZ))A(t?F1TW4I@pdV5sx=6ZPKzDmM8LH3UZZ}yU>GhH0_Ebag85%&O z8J~6L+>J3iqYgaBM({F82Fx=78x>BM z{?+G9d_d=qFn@b6uj{3K^369hS;i%5On;SP6A0jaCb|RMqv}?Hr+NdDU+3PW!5LGoU+T$_Y5kGYW=jMnO}{N`4lW zJ%r$H0Tn(#zR%@s4u3=GTMxHkb5UX957ynbwYDX_97GfHud^sA$pGgL}U^A5u@D{P|ry3 zo9;mTmldDcFGKvP>-t}T_`8KZ(!6joE#0Aj@KHg9PjZ)IHn_J$;SeFbsTE3pPRVY# z&vufNL+(^^QAx!7?l0~Wxw^*t1TVR*UvgP#bY^(N=hJ`s`v%~a@+~$6&QBYo03b)t zWI=gZsLq!RJc_PkpP=>kMiv*b5)Fh=8?%B@?l}LYFjm@Hs6u#vW`Ci)o5Q`cA)n%| z_oFcnzUn|ed!@~fqci2KK{jzZ&C5`FEApiu>9ZC1_ z{V%!U3sDQ>`1=JWusvZv|Mk~_;tMf&GC)0ZaL5MIzH-Ykc;e>&+uwesOK%pEdt%@W+HT)`yP9+R$Y|pzR<3?~xLqcHehYfG z)CRcpod7q{LIJCUxJ{x^U?(E^u?Kw2Xd%PQm z<^YcO&Y>9`<$X!kJ|brP*~^{np|*MR-7P5D$D6G(skh-x@?suNxvf`Tme{Z8Om9-& zf+MjePk!MMuAc3a1N*hA>|W=6v0z3n)mw1s>t`7?S_VfrdXoKAA2=0*@yr3rR z->1RpCgg;oGpar1dq-EZicMYE}uGTn9a_ zV-=4=FBU>knPcnAHPHLh{h`hAUK=KnW4DoF91-NQbO63U10N9Di3Flm4*ifg7ubPu+e;0hb~l zv%tu=*hwovi-JSRLllHRAoat~u4HI~7_~1PZm~Bf;jHPoa+vPWnhlDY!v`aF69DLn*#L?_{9pWybX^ZW4@(x}Nql6uf1?p|wKr-eTJpr9}i)r2>rbQ{g-4fskyoFv&4INSZlR|OykB1 z>WX&^_^vaV(BC0Sg%{VnsPUf@Bc}42#KRU?`Rm4s*(0v+qu#5agU7} zIjWI#k#HZlD(1orCq`f2^u@s8)kv@u7BeFlT`xbLAYtC=QcNJ4;(j?s_I!+g$-YbU zTN_IY{RjQVe8HwBC$RGW{|M8FZlkS?*`o(2qVcIz3jQ4T*CMT0^4z|r&$WoWgilTv= zPLK;Bg=G&WClh;+yvCMBUdl>jWeDJ!xwGy8$8V5<_B_N0oA(NwICw{EFTu4i)0-*w&hsR2`MfOt21zB}^zXua8Dl94G5%y8#>IRJoVMPqBd@Td?}kOnUN zoeRdxvX!6Krb{e$LsRio>0jbDx!1|UHkoUG{IB>TNIW_b`}7}aF=Jb5b^Kw zp>en|46>h(lAjlkZQBX3F4OeYegFf3j-4FUP8cTvzpN@*ESoJ+0z03~Q36-p2v~)e z_3+Vm<35zYp~rim+$eZKPuV!;Xyr(O0GrYW5D3~gUas}-4i(V+{Q3LPAHP40$Kz27 zk^= 1.21.0-0' + catalog.cattle.io/namespace: longhorn-system + catalog.cattle.io/permits-os: linux,windows + catalog.cattle.io/provides-gvr: longhorn.io/v1beta1 + catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0' + catalog.cattle.io/release-name: longhorn + catalog.cattle.io/type: cluster-tool + catalog.cattle.io/upstream-version: 1.7.1 +apiVersion: v1 +appVersion: v1.7.1 +description: Longhorn is a distributed block storage system for Kubernetes. +home: https://github.com/longhorn/longhorn +icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png +keywords: +- longhorn +- storage +- distributed +- block +- device +- iscsi +- nfs +kubeVersion: '>=1.21.0-0' +maintainers: +- email: maintainers@longhorn.io + name: Longhorn maintainers +name: longhorn +sources: +- https://github.com/longhorn/longhorn +- https://github.com/longhorn/longhorn-engine +- https://github.com/longhorn/longhorn-instance-manager +- https://github.com/longhorn/longhorn-share-manager +- https://github.com/longhorn/longhorn-manager +- https://github.com/longhorn/longhorn-ui +- https://github.com/longhorn/longhorn-tests +- https://github.com/longhorn/backing-image-manager +version: 104.2.0+up1.7.1 diff --git a/charts/longhorn/104.2.0+up1.7.1/README.md b/charts/longhorn/104.2.0+up1.7.1/README.md new file mode 100644 index 0000000000..adb190be3b --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/README.md @@ -0,0 +1,50 @@ +# Longhorn Chart + +> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only. + +> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version. + +> **Note**: Use Helm 3 when installing and upgrading Longhorn. Helm 2 is [no longer supported](https://helm.sh/blog/helm-2-becomes-unsupported/). + +## Source Code + +Longhorn is 100% open source software. Project source code is spread across a number of repos: + +1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine +2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager +3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager +4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager +5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager +6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui + +## Prerequisites + +1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.) +2. Kubernetes >= v1.21 +3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster. +4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already. + +## Upgrading to Kubernetes v1.25+ + +Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API. + +As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`. + +> **Note:** +> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).** +> +> If your charts get stuck in this state, you may have to clean up your Helm release secrets. +Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart. + +As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards. + +## Uninstallation + +To prevent Longhorn from being accidentally uninstalled (which leads to data lost), we introduce a new setting, deleting-confirmation-flag. If this flag is **false**, the Longhorn uninstallation job will fail. Set this flag to **true** to allow Longhorn uninstallation. You can set this flag using setting page in Longhorn UI or `kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag` + +To prevent damage to the Kubernetes cluster, we recommend deleting all Kubernetes workloads using Longhorn volumes (PersistentVolume, PersistentVolumeClaim, StorageClass, Deployment, StatefulSet, DaemonSet, etc). + +From Rancher Cluster Explorer UI, navigate to Apps page, delete app `longhorn` then app `longhorn-crd` in Installed Apps tab. + +--- +Please see [link](https://github.com/longhorn/longhorn) for more information. diff --git a/charts/longhorn/104.2.0+up1.7.1/app-readme.md b/charts/longhorn/104.2.0+up1.7.1/app-readme.md new file mode 100644 index 0000000000..321e5193c4 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/app-readme.md @@ -0,0 +1,27 @@ +# Longhorn + +Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn. + +Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups! + +**Important**: Please install Longhorn chart in `longhorn-system` namespace only. + +**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version. + +[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md) + + +## Upgrading to Kubernetes v1.25+ + +Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API. + +As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`. + +> **Note:** +> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).** +> +> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets. + +Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart. + +As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards. \ No newline at end of file diff --git a/charts/longhorn/104.2.0+up1.7.1/questions.yaml b/charts/longhorn/104.2.0+up1.7.1/questions.yaml new file mode 100644 index 0000000000..bc31510d88 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/questions.yaml @@ -0,0 +1,986 @@ +categories: +- storage +namespace: longhorn-system +questions: +- variable: image.defaultImage + default: "true" + description: "Use default Longhorn images" + label: Use Default Images + type: boolean + show_subquestion_if: false + group: "Longhorn Images" + subquestions: + - variable: image.longhorn.manager.repository + default: rancher/mirrored-longhornio-longhorn-manager + description: "Repository for the Longhorn Manager image." + type: string + label: Longhorn Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.manager.tag + default: v1.7.1 + description: "Tag for the Longhorn Manager image." + type: string + label: Longhorn Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.engine.repository + default: rancher/mirrored-longhornio-longhorn-engine + description: "Repository for the Longhorn Engine image." + type: string + label: Longhorn Engine Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.engine.tag + default: v1.7.1 + description: "Tag for the Longhorn Engine image." + type: string + label: Longhorn Engine Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.ui.repository + default: rancher/mirrored-longhornio-longhorn-ui + description: "Repository for the Longhorn UI image." + type: string + label: Longhorn UI Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.ui.tag + default: v1.7.1 + description: "Tag for the Longhorn UI image." + type: string + label: Longhorn UI Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.instanceManager.repository + default: rancher/mirrored-longhornio-longhorn-instance-manager + description: "Repository for the Longhorn Instance Manager image." + type: string + label: Longhorn Instance Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.instanceManager.tag + default: v1.7.1 + description: "Tag for the Longhorn Instance Manager image." + type: string + label: Longhorn Instance Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.shareManager.repository + default: rancher/mirrored-longhornio-longhorn-share-manager + description: "Repository for the Longhorn Share Manager image." + type: string + label: Longhorn Share Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.shareManager.tag + default: v1.7.1 + description: "Tag for the Longhorn Share Manager image." + type: string + label: Longhorn Share Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.backingImageManager.repository + default: rancher/mirrored-longhornio-backing-image-manager + description: "Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn Backing Image Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.backingImageManager.tag + default: v1.7.1 + description: "Tag for the Backing Image Manager image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn Backing Image Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.supportBundleKit.repository + default: rancher/mirrored-longhornio-support-bundle-kit + description: "Repository for the Longhorn Support Bundle Manager image." + type: string + label: Longhorn Support Bundle Kit Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.supportBundleKit.tag + default: v0.0.42 + description: "Tag for the Longhorn Support Bundle Manager image." + type: string + label: Longhorn Support Bundle Kit Image Tag + group: "Longhorn Images Settings" + - variable: image.csi.attacher.repository + default: rancher/mirrored-longhornio-csi-attacher + description: "Repository for the CSI attacher image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Attacher Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.attacher.tag + default: v4.6.1 + description: "Tag for the CSI attacher image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Attacher Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.provisioner.repository + default: rancher/mirrored-longhornio-csi-provisioner + description: "Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Provisioner Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.provisioner.tag + default: v4.0.1 + description: "Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Provisioner Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.nodeDriverRegistrar.repository + default: rancher/mirrored-longhornio-csi-node-driver-registrar + description: "Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Node Driver Registrar Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.nodeDriverRegistrar.tag + default: v2.12.0 + description: "Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Node Driver Registrar Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.resizer.repository + default: rancher/mirrored-longhornio-csi-resizer + description: "Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Driver Resizer Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.resizer.tag + default: v1.11.1 + description: "Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Driver Resizer Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.snapshotter.repository + default: rancher/mirrored-longhornio-csi-snapshotter + description: "Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Driver Snapshotter Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.snapshotter.tag + default: v7.0.2 + description: "Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Driver Snapshotter Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.livenessProbe.repository + default: rancher/mirrored-longhornio-livenessprobe + description: "Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Liveness Probe Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.livenessProbe.tag + default: v2.14.0 + description: "Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Liveness Probe Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.openshift.oauthProxy.repository + default: rancher/mirrored-longhornio-openshift-origin-oauth-proxy + description: "Repository for the OAuth Proxy image. This setting applies only to OpenShift users" + type: string + label: OpenShift OAuth Proxy Image Repository + group: "OpenShift Images" + - variable: image.openshift.oauthProxy.tag + default: 4.15 + description: "Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later." + type: string + label: OpenShift OAuth Proxy Image Tag + group: "OpenShift Images" +- variable: privateRegistry.registryUrl + label: Private registry URL + description: "URL of a private registry. When unspecified, Longhorn uses the default system registry." + group: "Private Registry Settings" + type: string + default: "" +- variable: privateRegistry.registrySecret + label: Private registry secret name + description: "Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name." + group: "Private Registry Settings" + type: string + default: "" +- variable: privateRegistry.createSecret + default: "true" + description: "Setting that allows you to create a private registry secret." + type: boolean + group: "Private Registry Settings" + label: Create Secret for Private Registry Settings + show_subquestion_if: true + subquestions: + - variable: privateRegistry.registryUser + label: Private registry user + description: "User account used for authenticating with a private registry." + type: string + default: "" + - variable: privateRegistry.registryPasswd + label: Private registry password + description: "Password for authenticating with a private registry." + type: password + default: "" +- variable: longhorn.default_setting + default: "false" + description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn." + label: "Customize Default Settings" + type: boolean + show_subquestion_if: true + group: "Longhorn Default Settings" + subquestions: + - variable: csi.kubeletRootDir + default: + description: "kubelet root directory. When unspecified, Longhorn uses the default value." + type: string + label: Kubelet Root Directory + group: "Longhorn CSI Driver Settings" + - variable: csi.attacherReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value (\"3\")." + label: Longhorn CSI Attacher replica count + group: "Longhorn CSI Driver Settings" + - variable: csi.provisionerReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value (\"3\")." + label: Longhorn CSI Provisioner replica count + group: "Longhorn CSI Driver Settings" + - variable: csi.resizerReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value (\"3\")." + label: Longhorn CSI Resizer replica count + group: "Longhorn CSI Driver Settings" + - variable: csi.snapshotterReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value (\"3\")." + label: Longhorn CSI Snapshotter replica count + group: "Longhorn CSI Driver Settings" + - variable: defaultSettings.backupTarget + label: Backup Target + description: "Endpoint used to access the backupstore. (Options: \"NFS\", \"CIFS\", \"AWS\", \"GCP\", \"AZURE\")" + group: "Longhorn Default Settings" + type: string + default: + - variable: defaultSettings.backupTargetCredentialSecret + label: Backup Target Credential Secret + description: "Name of the Kubernetes secret associated with the backup target." + group: "Longhorn Default Settings" + type: string + default: + - variable: defaultSettings.allowRecurringJobWhileVolumeDetached + label: Allow Recurring Job While Volume Is Detached + description: 'Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.' + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.snapshotMaxCount + label: Snapshot Maximum Count + description: 'Maximum snapshot count for a volume. The value should be between 2 to 250.' + group: "Longhorn Default Settings" + type: int + min: 2 + max: 250 + default: 250 + - variable: defaultSettings.createDefaultDiskLabeledNodes + label: Create Default Disk on Labeled Nodes + description: 'Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.' + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.defaultDataPath + label: Default Data Path + description: 'Default path for storing data on a host. The default value is "/var/lib/longhorn/".' + group: "Longhorn Default Settings" + type: string + default: "/var/lib/longhorn/" + - variable: defaultSettings.defaultDataLocality + label: Default Data Locality + description: 'Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.' + group: "Longhorn Default Settings" + type: enum + options: + - "disabled" + - "best-effort" + default: "disabled" + - variable: defaultSettings.replicaSoftAntiAffinity + label: Replica Node Level Soft Anti-Affinity + description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default, false.' + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.replicaAutoBalance + label: Replica Auto Balance + description: 'Enable this setting automatically re-balances replicas when discovered an available node.' + group: "Longhorn Default Settings" + type: enum + options: + - "disabled" + - "least-effort" + - "best-effort" + default: "disabled" + - variable: defaultSettings.storageOverProvisioningPercentage + label: Storage Over Provisioning Percentage + description: "Percentage of storage that can be allocated relative to hard drive capacity. The default value is 100." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 100 + - variable: defaultSettings.storageMinimalAvailablePercentage + label: Storage Minimal Available Percentage + description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default, 25." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 100 + default: 25 + - variable: defaultSettings.storageReservedPercentageForDefaultDisk + label: Storage Reserved Percentage For Default Disk + description: "The reserved percentage specifies the percentage of disk space that will not be allocated to the default disk on each new Longhorn node." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 100 + default: 30 + - variable: defaultSettings.upgradeChecker + label: Enable Upgrade Checker + description: 'Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default.' + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.defaultReplicaCount + label: Default Replica Count + description: "Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is \"3\"." + group: "Longhorn Default Settings" + type: int + min: 1 + max: 20 + default: 3 + - variable: defaultSettings.defaultLonghornStaticStorageClass + label: Default Longhorn Static StorageClass Name + description: "Default Longhorn StorageClass. \"storageClassName\" is assigned to PVs and PVCs that are created for an existing Longhorn volume. \"storageClassName\" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is \"longhorn-static\"." + group: "Longhorn Default Settings" + type: string + default: "longhorn-static" + - variable: defaultSettings.backupstorePollInterval + label: Backupstore Poll Interval + description: "Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is \"300\". When the value is \"0\", polling is disabled." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 300 + - variable: defaultSettings.failedBackupTTL + label: Failed Backup Time to Live + description: "Number of minutes that Longhorn keeps a failed backup resource. When the value is \"0\", automatic deletion is disabled." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 1440 + - variable: defaultSettings.restoreVolumeRecurringJobs + label: Restore Volume Recurring Jobs + description: "Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.recurringSuccessfulJobsHistoryLimit + label: Cronjob Successful Jobs History Limit + description: "This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 1 + - variable: defaultSettings.recurringFailedJobsHistoryLimit + label: Cronjob Failed Jobs History Limit + description: 'Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.' + group: "Longhorn Default Settings" + type: int + min: 0 + default: 1 + - variable: defaultSettings.recurringJobMaxRetention + label: Maximum Retention Number for Recurring Job + description: "Maximum number of snapshots or backups to be retained." + group: "Longhorn Default Settings" + type: int + default: 100 + - variable: defaultSettings.supportBundleFailedHistoryLimit + label: SupportBundle Failed History Limit + description: "This setting specifies how many failed support bundles can exist in the cluster. Set this value to **0** to have Longhorn automatically purge all failed support bundles." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 1 + - variable: defaultSettings.autoSalvage + label: Automatic salvage + description: "Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly + label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly + description: 'Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.' + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.disableSchedulingOnCordonedNode + label: Disable Scheduling On Cordoned Node + description: "Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.replicaZoneSoftAntiAffinity + label: Replica Zone Level Soft Anti-Affinity + description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=` in the Kubernetes node object to identify the zone. By, default true." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.replicaDiskSoftAntiAffinity + label: Replica Disk Level Soft Anti-Affinity + description: 'Allow scheduling on disks with existing healthy replicas of the same volume. By default, true.' + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.allowEmptyNodeSelectorVolume + label: Allow Empty Node Selector Volume + description: "Setting that allows scheduling of empty node selector volumes to any node." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.allowEmptyDiskSelectorVolume + label: Allow Empty Disk Selector Volume + description: "Setting that allows scheduling of empty disk selector volumes to any disk." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.nodeDownPodDeletionPolicy + label: Pod Deletion Policy When Node is Down + description: "Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed." + group: "Longhorn Default Settings" + type: enum + options: + - "do-nothing" + - "delete-statefulset-pod" + - "delete-deployment-pod" + - "delete-both-statefulset-and-deployment-pod" + default: "do-nothing" + - variable: defaultSettings.nodeDrainPolicy + label: Node Drain Policy + description: "Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained." + group: "Longhorn Default Settings" + type: enum + options: + - "block-for-eviction" + - "block-for-eviction-if-contains-last-replica" + - "block-if-contains-last-replica" + - "allow-if-replica-is-stopped" + - "always-allow" + default: "block-if-contains-last-replica" + - variable: defaultSettings.detachManuallyAttachedVolumesWhenCordoned + label: Detach Manually Attached Volumes When Cordoned + description: "Setting that allows automatic detaching of manually-attached volumes when a node is cordoned." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.priorityClass + label: Priority Class + description: "PriorityClass for system-managed Longhorn components. This setting can help prevent Longhorn components from being evicted under Node Pressure. Longhorn system contains user deployed components (E.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (E.g, instance manager, engine image, CSI driver, etc.) Note that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`. WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." + group: "Longhorn Default Settings" + type: string + default: "longhorn-critical" + - variable: defaultSettings.replicaReplenishmentWaitInterval + label: Replica Replenishment Wait Interval + description: "The interval in seconds determines how long Longhorn will at least wait to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 600 + - variable: defaultSettings.concurrentReplicaRebuildPerNodeLimit + label: Concurrent Replica Rebuild Per Node Limit + description: "Maximum number of replicas that can be concurrently rebuilt on each node. + WARNING: + - The old setting \"Disable Replica Rebuild\" is replaced by this setting. + - Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped. + - When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 5 + - variable: defaultSettings.concurrentVolumeBackupRestorePerNodeLimit + label: Concurrent Volume Backup Restore Per Node Limit + description: "Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is \"0\", restoration of volumes using a backup is disabled." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 5 + - variable: defaultSettings.disableRevisionCounter + label: Disable Revision Counter + description: "Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the \"volume-head-xxx.img\" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.systemManagedPodsImagePullPolicy + label: System Managed Pod Image Pull Policy + description: "Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart." + group: "Longhorn Default Settings" + type: enum + options: + - "if-not-present" + - "always" + - "never" + default: "if-not-present" + - variable: defaultSettings.allowVolumeCreationWithDegradedAvailability + label: Allow Volume Creation with Degraded Availability + description: "Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.autoCleanupSystemGeneratedSnapshot + label: Automatically Cleanup System Generated Snapshot + description: "Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.autoCleanupRecurringJobBackupSnapshot + label: Automatically Cleanup Recurring Job Backup Snapshot + description: "Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit + label: Concurrent Automatic Engine Upgrade Per Node Limit + description: "Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is \"0\", Longhorn does not automatically upgrade volume engines to the new default engine image version." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 0 + - variable: defaultSettings.backingImageCleanupWaitInterval + label: Backing Image Cleanup Wait Interval + description: "Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 60 + - variable: defaultSettings.backingImageRecoveryWaitInterval + label: Backing Image Recovery Wait Interval + description: "Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to \"failed\" or \"unknown\"." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 300 + - variable: defaultSettings.guaranteedInstanceManagerCPU + label: Guaranteed Instance Manager CPU + description: "Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is \"12\". + WARNING: + - Value 0 means removing the CPU requests from spec of instance manager pods. + - Considering the possible number of new instance manager pods in a further system upgrade, this integer value ranges from 0 to 40. + - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then. + - This global setting will be ignored for a node if the field \"InstanceManagerCPURequest\" on the node is set. + - After this setting is changed, all instance manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 40 + default: 12 + - variable: defaultSettings.logLevel + label: Log Level + description: 'Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")' + group: "Longhorn Default Settings" + type: string + default: "Info" + - variable: defaultSettings.disableSnapshotPurge + label: Disable Snapshot Purge + description: "Setting that temporarily prevents all attempts to purge volume snapshots." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.freezeFilesystemForSnapshot + description: "Setting that freezes the filesystem on the root partition before a snapshot is created." + group: "Longhorn Default Settings" + type: boolean + default: "false" +- variable: defaultSettings.kubernetesClusterAutoscalerEnabled + label: Kubernetes Cluster Autoscaler Enabled (Experimental) + description: "Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler. + WARNING: + - Replica rebuilding could be expensive because nodes with reusable replicas could get removed by the Kubernetes Cluster Autoscaler." + group: "Longhorn Default Settings" + type: boolean + default: false +- variable: defaultSettings.orphanAutoDeletion + label: Orphaned Data Cleanup + description: "Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up." + group: "Longhorn Default Settings" + type: boolean + default: false +- variable: defaultSettings.storageNetwork + label: Storage Network + description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network. + WARNING: + - This setting should change after detaching all Longhorn volumes, as some of the Longhorn system component pods will get recreated to apply the setting. Longhorn will try to block this setting update when there are attached volumes." + group: "Longhorn Default Settings" + type: string + default: +- variable: defaultSettings.deletingConfirmationFlag + label: Deleting Confirmation Flag + description: "Flag that prevents accidental uninstallation of Longhorn." + group: "Longhorn Default Settings" + type: boolean + default: "false" +- variable: defaultSettings.engineReplicaTimeout + label: Timeout between Engine and Replica + description: "Timeout between the Longhorn Engine and replicas. Specify a value between \"8\" and \"30\" seconds. The default value is \"8\"." + group: "Longhorn Default Settings" + type: int + default: "8" +- variable: defaultSettings.snapshotDataIntegrity + label: Snapshot Data Integrity + description: "This setting allows users to enable or disable snapshot hashing and data integrity checking." + group: "Longhorn Default Settings" + type: string + default: "disabled" +- variable: defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation + label: Immediate Snapshot Data Integrity Check After Creating a Snapshot + description: "Hashing snapshot disk files impacts the performance of the system. The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot." + group: "Longhorn Default Settings" + type: boolean + default: "false" +- variable: defaultSettings.snapshotDataIntegrityCronjob + label: Snapshot Data Integrity Check CronJob + description: "Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files." + group: "Longhorn Default Settings" + type: string + default: "0 0 */7 * *" +- variable: defaultSettings.removeSnapshotsDuringFilesystemTrim + label: Remove Snapshots During Filesystem Trim + description: "This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children." + group: "Longhorn Default Settings" + type: boolean + default: "false" +- variable: defaultSettings.fastReplicaRebuildEnabled + label: Fast Replica Rebuild Enabled + description: "Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to \"enable\" or \"fast-check\"." + group: "Longhorn Default Settings" + type: boolean + default: false +- variable: defaultSettings.replicaFileSyncHttpClientTimeout + label: Timeout of HTTP Client to Replica File Sync Server + description: "In seconds. The setting specifies the HTTP client timeout to the file sync server." + group: "Longhorn Default Settings" + type: int + default: "30" +- variable: defaultSettings.longGRPCTimeOut + label: Long gRPC Timeout + description: "Number of seconds that Longhorn allows for the completion of replica rebuilding and snapshot cloning operations." + group: "Longhorn Default Settings" + type: int + default: "86400" +- variable: defaultSettings.backupCompressionMethod + label: Backup Compression Method + description: "Setting that allows you to specify a backup compression method." + group: "Longhorn Default Settings" + type: string + default: "lz4" +- variable: defaultSettings.backupConcurrentLimit + label: Backup Concurrent Limit Per Backup + description: "Maximum number of worker threads that can concurrently run for each backup." + group: "Longhorn Default Settings" + type: int + min: 1 + default: 2 +- variable: defaultSettings.restoreConcurrentLimit + label: Restore Concurrent Limit Per Backup + description: "This setting controls how many worker threads per restore concurrently." + group: "Longhorn Default Settings" + type: int + min: 1 + default: 2 +- variable: defaultSettings.allowCollectingLonghornUsageMetrics + label: Allow Collecting Longhorn Usage Metrics + description: "Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses." + group: "Longhorn Default Settings" + type: boolean + default: true +- variable: defaultSettings.v1DataEngine + label: V1 Data Engine + description: "Setting that allows you to enable the V1 Data Engine." + group: "Longhorn V1 Data Engine Settings" + type: boolean + default: true +- variable: defaultSettings.v2DataEngine + label: V2 Data Engine + description: "Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments. + WARNING: + - DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will block this setting update when there are attached volumes. + - When the V2 Data Engine is enabled, each instance-manager pod utilizes 1 CPU core. This high CPU usage is attributed to the spdk_tgt process running within each instance-manager pod. The spdk_tgt process is responsible for handling input/output (IO) operations and requires intensive polling. As a result, it consumes 100% of a dedicated CPU core to efficiently manage and process the IO requests, ensuring optimal performance and responsiveness for storage operations." + group: "Longhorn V2 Data Engine (Preview Feature) Settings" + type: boolean + default: false +- variable: defaultSettings.v2DataEngineHugepageLimit + label: V2 Data Engine + description: "This allows users to configure maximum huge page size (in MiB) for the V2 Data Engine." + group: "Longhorn V2 Data Engine (Preview Feature) Settings" + type: int + default: "2048" +- variable: defaultSettings.v2DataEngineLogLevel + label: V2 Data Engine + description: "Setting that allows you to configure the log level of the SPDK target daemon (spdk_tgt) of the V2 Data Engine." + group: "Longhorn V2 Data Engine (Preview Feature) Settings" + type: enum + options: + - "Disabled" + - "Error" + - "Warn" + - "Notice" + - "Info" + - "Debug" + default: "Notice" +- variable: defaultSettings.v2DataEngineLogFlags + label: V2 Data Engine + description: "Setting that allows you to configure the log flags of the SPDK target daemon (spdk_tgt) of the V2 Data Engine." + group: "Longhorn V2 Data Engine (Preview Feature) Settings" + type: string + default: +- variable: defaultSettings.autoCleanupSnapshotWhenDeleteBackup + label: Auto Cleanup Snapshot When Delete Backup + description: "Setting that automatically cleans up the snapshot when the backup is deleted." + group: "Longhorn Default Settings" + type: boolean + default: false +- variable: defaultSettings.rwxVolumeFastFailover + label: RWX Volume Fast Failover (Experimental) + description: "Turn on logic to detect and move RWX volumes quickly on node failure." + group: "Longhorn Default Settings" + type: boolean + default: false +- variable: persistence.defaultClass + default: "true" + description: "Setting that allows you to specify the default Longhorn StorageClass." + label: Default Storage Class + group: "Longhorn Storage Class Settings" + required: true + type: boolean +- variable: persistence.reclaimPolicy + label: Storage Class Retain Policy + description: "Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: \"Retain\", \"Delete\")" + group: "Longhorn Storage Class Settings" + required: true + type: enum + options: + - "Delete" + - "Retain" + default: "Delete" +- variable: persistence.disableRevisionCounter + label: Default Storage Class Disable Revision Counter + description: "Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the volume-head-xxx.img file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. (Options: \"true\", \"false\")" + group: "Longhorn Storage Class Settings" + required: true + type: enum + options: + - "true" + - "false" + default: "true" +- variable: persistence.defaultClassReplicaCount + description: "Replica count of the default Longhorn StorageClass." + label: Default Storage Class Replica Count + group: "Longhorn Storage Class Settings" + type: int + min: 1 + max: 10 + default: 3 +- variable: persistence.defaultDataLocality + description: "Data locality of the default Longhorn StorageClass. (Options: \"disabled\", \"best-effort\")" + label: Default Storage Class Data Locality + group: "Longhorn Storage Class Settings" + type: enum + options: + - "disabled" + - "best-effort" + default: "disabled" +- variable: persistence.recurringJobSelector.enable + description: "Setting that allows you to enable the recurring job selector for a Longhorn StorageClass." + group: "Longhorn Storage Class Settings" + label: Enable Storage Class Recurring Job Selector + type: boolean + default: false + show_subquestion_if: true + subquestions: + - variable: persistence.recurringJobSelector.jobList + description: 'Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)' + label: Storage Class Recurring Job Selector List + group: "Longhorn Storage Class Settings" + type: string + default: +- variable: persistence.defaultDiskSelector.enable + description: "Setting that allows you to enable the disk selector for the default Longhorn StorageClass." + group: "Longhorn Storage Class Settings" + label: Enable Storage Class Disk Selector + type: boolean + default: false + show_subquestion_if: true + subquestions: + - variable: persistence.defaultDiskSelector.selector + label: Storage Class Disk Selector + description: 'Disk selector for the default Longhorn StorageClass. Longhorn uses only disks with the specified tags for storing volume data. (Examples: "nvme,sata")' + group: "Longhorn Storage Class Settings" + type: string + default: +- variable: persistence.defaultNodeSelector.enable + description: "Setting that allows you to enable the node selector for the default Longhorn StorageClass." + group: "Longhorn Storage Class Settings" + label: Enable Storage Class Node Selector + type: boolean + default: false + show_subquestion_if: true + subquestions: + - variable: persistence.defaultNodeSelector.selector + label: Storage Class Node Selector + description: 'Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")' + group: "Longhorn Storage Class Settings" + type: string + default: +- variable: persistence.backingImage.enable + description: "Setting that allows you to use a backing image in a Longhorn StorageClass." + group: "Longhorn Storage Class Settings" + label: Default Storage Class Backing Image + type: boolean + default: false + show_subquestion_if: true + subquestions: + - variable: persistence.backingImage.name + description: 'Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.' + label: Storage Class Backing Image Name + group: "Longhorn Storage Class Settings" + type: string + default: + - variable: persistence.backingImage.expectedChecksum + description: 'Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass. + WARNING: + - If the backing image name is not specified, setting this field is meaningless. + - It is not recommended to set this field if the data source type is \"export-from-volume\".' + label: Storage Class Backing Image Expected SHA512 Checksum + group: "Longhorn Storage Class Settings" + type: string + default: + - variable: persistence.backingImage.dataSourceType + description: 'Data source type of a backing image used in a Longhorn StorageClass. If the backing image exists in the cluster, Longhorn uses this setting to verify the image. If the backing image does not exist, Longhorn creates one using the specified data source type. + WARNING: + - If the backing image name is not specified, setting this field is meaningless. + - As for backing image creation with data source type \"upload\", it is recommended to do it via UI rather than StorageClass here. Uploading requires file data sending to the Longhorn backend after the object creation, which is complicated if you want to handle it manually.' + label: Storage Class Backing Image Data Source Type + group: "Longhorn Storage Class Settings" + type: enum + options: + - "" + - "download" + - "upload" + - "export-from-volume" + default: "" + - variable: persistence.backingImage.dataSourceParameters + description: "Data source parameters of a backing image used in a Longhorn StorageClass. You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`) + WARNING: + - If the backing image name is not specified, setting this field is meaningless. + - Be careful of the quotes here." + label: Storage Class Backing Image Data Source Parameters + group: "Longhorn Storage Class Settings" + type: string + default: +- variable: persistence.removeSnapshotsDuringFilesystemTrim + description: "Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: \"ignored\", \"enabled\", \"disabled\")" + label: Default Storage Class Remove Snapshots During Filesystem Trim + group: "Longhorn Storage Class Settings" + type: enum + options: + - "ignored" + - "enabled" + - "disabled" + default: "ignored" +- variable: ingress.enabled + default: "false" + description: "Expose app using Layer 7 Load Balancer - ingress" + type: boolean + group: "Services and Load Balancing" + label: Expose app using Layer 7 Load Balancer + show_subquestion_if: true + subquestions: + - variable: ingress.host + default: "xip.io" + description: "Hostname of the Layer 7 load balancer." + type: hostname + required: true + label: Layer 7 Load Balancer Hostname + - variable: ingress.path + default: "/" + description: "Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}." + type: string + required: true + label: Ingress Path + - variable: ingress.pathType + default: "ImplementationSpecific" + description: "Path type for the ingress. (Options: \"ImplementationSpecific\", \"Exact\", \"Prefix\")" + type: enum + options: + - "ImplementationSpecific" + - "Exact" + - "Prefix" + required: true + label: Ingress Path Type +- variable: service.ui.type + default: "Rancher-Proxy" + description: "Service type for Longhorn UI. (Options: \"ClusterIP\", \"NodePort\", \"LoadBalancer\", \"Rancher-Proxy\")" + type: enum + options: + - "ClusterIP" + - "NodePort" + - "LoadBalancer" + - "Rancher-Proxy" + label: Longhorn UI Service + show_if: "ingress.enabled=false" + group: "Services and Load Balancing" + show_subquestion_if: "NodePort" + subquestions: + - variable: service.ui.nodePort + default: "" + description: "NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767." + type: int + min: 30000 + max: 32767 + show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer" + label: UI Service NodePort number +- variable: enablePSP + default: "false" + description: "Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled." + label: Pod Security Policy + type: boolean + group: "Other Settings" +- variable: global.cattle.windowsCluster.enabled + default: "false" + description: "Setting that allows Longhorn to run on a Rancher Windows cluster." + label: Rancher Windows Cluster + type: boolean + group: "Other Settings" +- variable: networkPolicies.enabled + description: "Setting that allows you to enable network policies that control access to Longhorn pods. + Warning: The Rancher Proxy will not work if this feature is enabled and a custom NetworkPolicy must be added." + group: "Other Settings" + label: Network Policies + default: "false" + type: boolean + subquestions: + - variable: networkPolicies.type + label: Network Policies for Ingress + description: "Distribution that determines the policy for allowing access for an ingress. (Options: \"k3s\", \"rke2\", \"rke1\")" + show_if: "networkPolicies.enabled=true&&ingress.enabled=true" + type: enum + default: "rke2" + options: + - "rke1" + - "rke2" + - "k3s" + - variable: defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU + label: Guaranteed Instance Manager CPU for V2 Data Engine + description: 'Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250". + WARNING: + - Specifying a value of 0 disables CPU requests for instance manager pods. You must specify an integer between 1000 and 8000. + - This is a global setting. Modifying the value triggers an automatic restart of the instance manager pods. Do not modify the value while volumes are still attached." + group: "Longhorn Default Settings' + type: int + min: 1000 + max: 8000 + default: 1250 diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/NOTES.txt b/charts/longhorn/104.2.0+up1.7.1/templates/NOTES.txt new file mode 100644 index 0000000000..cca7cd77b9 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/NOTES.txt @@ -0,0 +1,5 @@ +Longhorn is now installed on the cluster! + +Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized. + +Visit our documentation at https://longhorn.io/docs/ diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/_helpers.tpl b/charts/longhorn/104.2.0+up1.7.1/templates/_helpers.tpl new file mode 100644 index 0000000000..3fbc2ac02f --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/_helpers.tpl @@ -0,0 +1,66 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "longhorn.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "longhorn.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{- define "longhorn.managerIP" -}} +{{- $fullname := (include "longhorn.fullname" .) -}} +{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{- define "secret" }} +{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }} +{{- end }} + +{{- /* +longhorn.labels generates the standard Helm labels. +*/ -}} +{{- define "longhorn.labels" -}} +app.kubernetes.io/name: {{ template "longhorn.name" . }} +helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- end -}} + + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{- define "registry_url" -}} +{{- if .Values.privateRegistry.registryUrl -}} +{{- printf "%s/" .Values.privateRegistry.registryUrl -}} +{{- else -}} +{{ include "system_default_registry" . }} +{{- end -}} +{{- end -}} + +{{- /* + define the longhorn release namespace +*/ -}} +{{- define "release_namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/clusterrole.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/clusterrole.yaml new file mode 100644 index 0000000000..c065f1726c --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/clusterrole.yaml @@ -0,0 +1,77 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: longhorn-role + labels: {{- include "longhorn.labels" . | nindent 4 }} +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - "*" +- apiGroups: [""] + resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps", "serviceaccounts"] + verbs: ["*"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] +- apiGroups: ["apps"] + resources: ["daemonsets", "statefulsets", "deployments"] + verbs: ["*"] +- apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["*"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets", "podsecuritypolicies"] + verbs: ["*"] +- apiGroups: ["scheduling.k8s.io"] + resources: ["priorityclasses"] + verbs: ["watch", "list"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "volumeattachments", "volumeattachments/status", "csinodes", "csidrivers"] + verbs: ["*"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"] + verbs: ["*"] +- apiGroups: ["longhorn.io"] + resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", "settings/status", + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + {{- if .Values.openshift.enabled }} + "engineimages/finalizers", "nodes/finalizers", "instancemanagers/finalizers", + {{- end }} + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", + "backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", + "backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status", + "recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status", + "supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status", + "volumeattachments", "volumeattachments/status", "backupbackingimages", "backupbackingimages/status"] + verbs: ["*"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["*"] +- apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list"] +- apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["list", "watch"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + verbs: ["get", "list", "create", "patch", "delete"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings", "clusterrolebindings", "clusterroles"] + verbs: ["*"] +{{- if .Values.openshift.enabled }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: longhorn-ocp-privileged-role + labels: {{- include "longhorn.labels" . | nindent 4 }} +rules: +- apiGroups: ["security.openshift.io"] + resources: ["securitycontextconstraints"] + resourceNames: ["anyuid", "privileged"] + verbs: ["use"] +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/clusterrolebinding.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/clusterrolebinding.yaml new file mode 100644 index 0000000000..2e34f014ce --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/clusterrolebinding.yaml @@ -0,0 +1,49 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: longhorn-bind + labels: {{- include "longhorn.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: longhorn-role +subjects: +- kind: ServiceAccount + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: longhorn-support-bundle + labels: {{- include "longhorn.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: longhorn-support-bundle + namespace: {{ include "release_namespace" . }} +{{- if .Values.openshift.enabled }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: longhorn-ocp-privileged-bind + labels: {{- include "longhorn.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: longhorn-ocp-privileged-role +subjects: +- kind: ServiceAccount + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} +- kind: ServiceAccount + name: longhorn-ui-service-account + namespace: {{ include "release_namespace" . }} +- kind: ServiceAccount + name: default # supportbundle-agent-support-bundle uses default sa + namespace: {{ include "release_namespace" . }} +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/daemonset-sa.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/daemonset-sa.yaml new file mode 100644 index 0000000000..41800d9503 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/daemonset-sa.yaml @@ -0,0 +1,175 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-manager + name: longhorn-manager + namespace: {{ include "release_namespace" . }} +spec: + selector: + matchLabels: + app: longhorn-manager + template: + metadata: + labels: {{- include "longhorn.labels" . | nindent 8 }} + app: longhorn-manager + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + containers: + - name: longhorn-manager + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + privileged: true + command: + - longhorn-manager + - -d + {{- if eq .Values.longhornManager.log.format "json" }} + - -j + {{- end }} + - daemon + - --engine-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}" + - --instance-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}" + - --share-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}" + - --backing-image-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}" + - --support-bundle-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.supportBundleKit.repository }}:{{ .Values.image.longhorn.supportBundleKit.tag }}" + - --manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}" + - --service-account + - longhorn-service-account + {{- if .Values.preUpgradeChecker.upgradeVersionCheck}} + - --upgrade-version-check + {{- end }} + ports: + - containerPort: 9500 + name: manager + - containerPort: 9501 + name: conversion-wh + - containerPort: 9502 + name: admission-wh + - containerPort: 9503 + name: recov-backend + readinessProbe: + httpGet: + path: /v1/healthz + port: 9501 + scheme: HTTPS + volumeMounts: + - name: dev + mountPath: /host/dev/ + - name: proc + mountPath: /host/proc/ + - name: longhorn + mountPath: /var/lib/longhorn/ + mountPropagation: Bidirectional + - name: longhorn-grpc-tls + mountPath: /tls-files/ + {{- if .Values.enableGoCoverDir }} + - name: go-cover-dir + mountPath: /go-cover-dir/ + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{- if .Values.enableGoCoverDir }} + - name: GOCOVERDIR + value: /go-cover-dir/ + {{- end }} + - name: pre-pull-share-manager-image + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }} + command: ["sh", "-c", "echo share-manager image pulled && sleep infinity"] + volumes: + - name: dev + hostPath: + path: /dev/ + - name: proc + hostPath: + path: /proc/ + - name: longhorn + hostPath: + path: /var/lib/longhorn/ + {{- if .Values.enableGoCoverDir }} + - name: go-cover-dir + hostPath: + path: /go-cover-dir/ + type: DirectoryOrCreate + {{- end }} + - name: longhorn-grpc-tls + secret: + secretName: longhorn-grpc-tls + optional: true + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote }} + {{- end }} + {{- if or .Values.global.tolerations .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if or .Values.global.tolerations .Values.longhornManager.tolerations }} +{{ default .Values.global.tolerations .Values.longhornManager.tolerations | toYaml | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornManager.nodeSelector }} +{{ default .Values.global.nodeSelector .Values.longhornManager.nodeSelector | toYaml | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: longhorn-service-account + updateStrategy: + rollingUpdate: + maxUnavailable: "100%" +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-manager + name: longhorn-backend + namespace: {{ include "release_namespace" . }} + {{- if .Values.longhornManager.serviceAnnotations }} + annotations: +{{ toYaml .Values.longhornManager.serviceAnnotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.service.manager.type }} + selector: + app: longhorn-manager + ports: + - name: manager + port: 9500 + targetPort: manager + {{- if .Values.service.manager.nodePort }} + nodePort: {{ .Values.service.manager.nodePort }} + {{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/default-setting.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/default-setting.yaml new file mode 100644 index 0000000000..315cdc6ec9 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/default-setting.yaml @@ -0,0 +1,244 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-default-setting + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +data: + default-setting.yaml: |- + {{- if not (kindIs "invalid" .Values.defaultSettings.backupTarget) }} + backup-target: {{ .Values.defaultSettings.backupTarget }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backupTargetCredentialSecret) }} + backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.allowRecurringJobWhileVolumeDetached) }} + allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.createDefaultDiskLabeledNodes) }} + create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.defaultDataPath) }} + default-data-path: {{ .Values.defaultSettings.defaultDataPath }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaSoftAntiAffinity) }} + replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaAutoBalance) }} + replica-auto-balance: {{ .Values.defaultSettings.replicaAutoBalance }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.storageOverProvisioningPercentage) }} + storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.storageMinimalAvailablePercentage) }} + storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.storageReservedPercentageForDefaultDisk) }} + storage-reserved-percentage-for-default-disk: {{ .Values.defaultSettings.storageReservedPercentageForDefaultDisk }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.upgradeChecker) }} + upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.defaultReplicaCount) }} + default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.defaultDataLocality) }} + default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.defaultLonghornStaticStorageClass) }} + default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backupstorePollInterval) }} + backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.failedBackupTTL) }} + failed-backup-ttl: {{ .Values.defaultSettings.failedBackupTTL }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.restoreVolumeRecurringJobs) }} + restore-volume-recurring-jobs: {{ .Values.defaultSettings.restoreVolumeRecurringJobs }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit) }} + recurring-successful-jobs-history-limit: {{ .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.recurringJobMaxRetention) }} + recurring-job-max-retention: {{ .Values.defaultSettings.recurringJobMaxRetention }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.recurringFailedJobsHistoryLimit) }} + recurring-failed-jobs-history-limit: {{ .Values.defaultSettings.recurringFailedJobsHistoryLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.supportBundleFailedHistoryLimit) }} + support-bundle-failed-history-limit: {{ .Values.defaultSettings.supportBundleFailedHistoryLimit }} + {{- end }} + {{- if or (not (kindIs "invalid" .Values.defaultSettings.taintToleration)) (.Values.global.cattle.windowsCluster.enabled) }} + taint-toleration: {{ $windowsDefaultSettingTaintToleration := list }}{{ $defaultSettingTaintToleration := list -}} + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}} + {{- $windowsDefaultSettingTaintToleration = .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}} + {{- end -}} + {{- if not (kindIs "invalid" .Values.defaultSettings.taintToleration) -}} + {{- $defaultSettingTaintToleration = .Values.defaultSettings.taintToleration -}} + {{- end -}} + {{- $taintToleration := list $windowsDefaultSettingTaintToleration $defaultSettingTaintToleration }}{{ join ";" (compact $taintToleration) -}} + {{- end }} + {{- if or (not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector)) (.Values.global.cattle.windowsCluster.enabled) }} + system-managed-components-node-selector: {{ $windowsDefaultSettingNodeSelector := list }}{{ $defaultSettingNodeSelector := list -}} + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}} + {{ $windowsDefaultSettingNodeSelector = .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}} + {{- end -}} + {{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) -}} + {{- $defaultSettingNodeSelector = .Values.defaultSettings.systemManagedComponentsNodeSelector -}} + {{- end -}} + {{- $nodeSelector := list $windowsDefaultSettingNodeSelector $defaultSettingNodeSelector }}{{ join ";" (compact $nodeSelector) -}} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.priorityClass) }} + priority-class: {{ .Values.defaultSettings.priorityClass }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.autoSalvage) }} + auto-salvage: {{ .Values.defaultSettings.autoSalvage }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly) }} + auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.disableSchedulingOnCordonedNode) }} + disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaZoneSoftAntiAffinity) }} + replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaDiskSoftAntiAffinity) }} + replica-disk-soft-anti-affinity: {{ .Values.defaultSettings.replicaDiskSoftAntiAffinity }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.nodeDownPodDeletionPolicy) }} + node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.nodeDrainPolicy) }} + node-drain-policy: {{ .Values.defaultSettings.nodeDrainPolicy }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.detachManuallyAttachedVolumesWhenCordoned) }} + detach-manually-attached-volumes-when-cordoned: {{ .Values.defaultSettings.detachManuallyAttachedVolumesWhenCordoned }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaReplenishmentWaitInterval) }} + replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit) }} + concurrent-replica-rebuild-per-node-limit: {{ .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit) }} + concurrent-volume-backup-restore-per-node-limit: {{ .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.disableRevisionCounter) }} + disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedPodsImagePullPolicy) }} + system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability) }} + allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot) }} + auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.autoCleanupRecurringJobBackupSnapshot) }} + auto-cleanup-recurring-job-backup-snapshot: {{ .Values.defaultSettings.autoCleanupRecurringJobBackupSnapshot }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit) }} + concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backingImageCleanupWaitInterval) }} + backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backingImageRecoveryWaitInterval) }} + backing-image-recovery-wait-interval: {{ .Values.defaultSettings.backingImageRecoveryWaitInterval }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.guaranteedInstanceManagerCPU) }} + guaranteed-instance-manager-cpu: {{ .Values.defaultSettings.guaranteedInstanceManagerCPU }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.kubernetesClusterAutoscalerEnabled) }} + kubernetes-cluster-autoscaler-enabled: {{ .Values.defaultSettings.kubernetesClusterAutoscalerEnabled }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.orphanAutoDeletion) }} + orphan-auto-deletion: {{ .Values.defaultSettings.orphanAutoDeletion }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.storageNetwork) }} + storage-network: {{ .Values.defaultSettings.storageNetwork }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.deletingConfirmationFlag) }} + deleting-confirmation-flag: {{ .Values.defaultSettings.deletingConfirmationFlag }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.engineReplicaTimeout) }} + engine-replica-timeout: {{ .Values.defaultSettings.engineReplicaTimeout }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrity) }} + snapshot-data-integrity: {{ .Values.defaultSettings.snapshotDataIntegrity }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation) }} + snapshot-data-integrity-immediate-check-after-snapshot-creation: {{ .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityCronjob) }} + snapshot-data-integrity-cronjob: {{ .Values.defaultSettings.snapshotDataIntegrityCronjob }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim) }} + remove-snapshots-during-filesystem-trim: {{ .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.fastReplicaRebuildEnabled) }} + fast-replica-rebuild-enabled: {{ .Values.defaultSettings.fastReplicaRebuildEnabled }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaFileSyncHttpClientTimeout) }} + replica-file-sync-http-client-timeout: {{ .Values.defaultSettings.replicaFileSyncHttpClientTimeout }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.longGRPCTimeOut) }} + long-grpc-timeout: {{ .Values.defaultSettings.longGRPCTimeOut }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.logLevel) }} + log-level: {{ .Values.defaultSettings.logLevel }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backupCompressionMethod) }} + backup-compression-method: {{ .Values.defaultSettings.backupCompressionMethod }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backupConcurrentLimit) }} + backup-concurrent-limit: {{ .Values.defaultSettings.backupConcurrentLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.restoreConcurrentLimit) }} + restore-concurrent-limit: {{ .Values.defaultSettings.restoreConcurrentLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v1DataEngine) }} + v1-data-engine: {{ .Values.defaultSettings.v1DataEngine }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngine) }} + v2-data-engine: {{ .Values.defaultSettings.v2DataEngine }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineHugepageLimit) }} + v2-data-engine-hugepage-limit: {{ .Values.defaultSettings.v2DataEngineHugepageLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.allowEmptyNodeSelectorVolume) }} + allow-empty-node-selector-volume: {{ .Values.defaultSettings.allowEmptyNodeSelectorVolume }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.allowEmptyDiskSelectorVolume) }} + allow-empty-disk-selector-volume: {{ .Values.defaultSettings.allowEmptyDiskSelectorVolume }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.allowCollectingLonghornUsageMetrics) }} + allow-collecting-longhorn-usage-metrics: {{ .Values.defaultSettings.allowCollectingLonghornUsageMetrics }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.disableSnapshotPurge) }} + disable-snapshot-purge: {{ .Values.defaultSettings.disableSnapshotPurge }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU) }} + v2-data-engine-guaranteed-instance-manager-cpu: {{ .Values.defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.snapshotMaxCount) }} + snapshot-max-count: {{ .Values.defaultSettings.snapshotMaxCount }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineLogLevel) }} + v2-data-engine-log-level: {{ .Values.defaultSettings.v2DataEngineLogLevel }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineLogFlags) }} + v2-data-engine-log-flags: {{ .Values.defaultSettings.v2DataEngineLogFlags }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.freezeFilesystemForSnapshot) }} + freeze-filesystem-for-snapshot: {{ .Values.defaultSettings.freezeFilesystemForSnapshot }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.autoCleanupSnapshotWhenDeleteBackup) }} + auto-cleanup-when-delete-backup: {{ .Values.defaultSettings.autoCleanupSnapshotWhenDeleteBackup }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.rwxVolumeFastFailover) }} + rwx-volume-fast-failover: {{ .Values.defaultSettings.rwxVolumeFastFailover}} + {{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/deployment-driver.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/deployment-driver.yaml new file mode 100644 index 0000000000..3ac582dcbc --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/deployment-driver.yaml @@ -0,0 +1,132 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: longhorn-driver-deployer + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + app: longhorn-driver-deployer + template: + metadata: + labels: {{- include "longhorn.labels" . | nindent 8 }} + app: longhorn-driver-deployer + spec: + initContainers: + - name: wait-longhorn-manager + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] + containers: + - name: longhorn-driver-deployer + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - longhorn-manager + - -d + - deploy-driver + - --manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}" + - --manager-url + - http://longhorn-backend:9500/v1 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + {{- if .Values.csi.kubeletRootDir }} + - name: KUBELET_ROOT_DIR + value: {{ .Values.csi.kubeletRootDir }} + {{- end }} + {{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }} + - name: CSI_ATTACHER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}" + {{- end }} + {{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }} + - name: CSI_PROVISIONER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}" + {{- end }} + {{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }} + - name: CSI_NODE_DRIVER_REGISTRAR_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}" + {{- end }} + {{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }} + - name: CSI_RESIZER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}" + {{- end }} + {{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }} + - name: CSI_SNAPSHOTTER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}" + {{- end }} + {{- if and .Values.image.csi.livenessProbe.repository .Values.image.csi.livenessProbe.tag }} + - name: CSI_LIVENESS_PROBE_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.livenessProbe.repository }}:{{ .Values.image.csi.livenessProbe.tag }}" + {{- end }} + {{- if .Values.csi.attacherReplicaCount }} + - name: CSI_ATTACHER_REPLICA_COUNT + value: {{ .Values.csi.attacherReplicaCount | quote }} + {{- end }} + {{- if .Values.csi.provisionerReplicaCount }} + - name: CSI_PROVISIONER_REPLICA_COUNT + value: {{ .Values.csi.provisionerReplicaCount | quote }} + {{- end }} + {{- if .Values.csi.resizerReplicaCount }} + - name: CSI_RESIZER_REPLICA_COUNT + value: {{ .Values.csi.resizerReplicaCount | quote }} + {{- end }} + {{- if .Values.csi.snapshotterReplicaCount }} + - name: CSI_SNAPSHOTTER_REPLICA_COUNT + value: {{ .Values.csi.snapshotterReplicaCount | quote }} + {{- end }} + {{- if .Values.enableGoCoverDir }} + - name: GOCOVERDIR + value: /go-cover-dir/ + volumeMounts: + - name: go-cover-dir + mountPath: /go-cover-dir/ + {{- end }} + + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornDriver.priorityClass }} + priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }} + {{- end }} + {{- if or .Values.global.tolerations .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if or .Values.global.tolerations .Values.longhornDriver.tolerations }} +{{ default .Values.global.tolerations .Values.longhornDriver.tolerations | toYaml | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornDriver.nodeSelector }} +{{ default .Values.global.nodeSelector .Values.longhornDriver.nodeSelector | toYaml | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: longhorn-service-account + securityContext: + runAsUser: 0 + {{- if .Values.enableGoCoverDir }} + volumes: + - name: go-cover-dir + hostPath: + path: /go-cover-dir/ + type: DirectoryOrCreate + {{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/deployment-ui.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/deployment-ui.yaml new file mode 100644 index 0000000000..22c443aeba --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/deployment-ui.yaml @@ -0,0 +1,182 @@ +{{- if .Values.openshift.enabled }} +{{- if .Values.openshift.ui.route }} +# https://github.com/openshift/oauth-proxy/blob/master/contrib/sidecar.yaml +# Create a proxy service account and ensure it will use the route "proxy" +# Create a secure connection to the proxy via a route +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ui + name: {{ .Values.openshift.ui.route }} + namespace: {{ include "release_namespace" . }} +spec: + to: + kind: Service + name: longhorn-ui + tls: + termination: reencrypt +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ui + name: longhorn-ui + namespace: {{ include "release_namespace" . }} + annotations: + service.alpha.openshift.io/serving-cert-secret-name: longhorn-ui-tls +spec: + ports: + - name: longhorn-ui + port: {{ .Values.openshift.ui.port | default 443 }} + targetPort: {{ .Values.openshift.ui.proxy | default 8443 }} + selector: + app: longhorn-ui +--- +{{- end }} +{{- end }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ui + name: longhorn-ui + namespace: {{ include "release_namespace" . }} +spec: + replicas: {{ .Values.longhornUI.replicas }} + selector: + matchLabels: + app: longhorn-ui + template: + metadata: + labels: {{- include "longhorn.labels" . | nindent 8 }} + app: longhorn-ui + spec: + serviceAccountName: longhorn-ui-service-account + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - longhorn-ui + topologyKey: kubernetes.io/hostname + containers: + {{- if .Values.openshift.enabled }} + {{- if .Values.openshift.ui.route }} + - name: oauth-proxy + image: {{ template "registry_url" . }}{{ .Values.image.openshift.oauthProxy.repository }}:{{ .Values.image.openshift.oauthProxy.tag }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: {{ .Values.openshift.ui.proxy | default 8443 }} + name: public + args: + - --https-address=:{{ .Values.openshift.ui.proxy | default 8443 }} + - --provider=openshift + - --openshift-service-account=longhorn-ui-service-account + - --upstream=http://localhost:8000 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - --openshift-sar={"namespace":"{{ include "release_namespace" . }}","group":"longhorn.io","resource":"setting","verb":"delete"} + volumeMounts: + - mountPath: /etc/tls/private + name: longhorn-ui-tls + {{- end }} + {{- end }} + - name: longhorn-ui + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - name : nginx-cache + mountPath: /var/cache/nginx/ + - name : nginx-config + mountPath: /var/config/nginx/ + - name: var-run + mountPath: /var/run/ + ports: + - containerPort: 8000 + name: http + env: + - name: LONGHORN_MANAGER_IP + value: "http://longhorn-backend:9500" + - name: LONGHORN_UI_PORT + value: "8000" + volumes: + {{- if .Values.openshift.enabled }} + {{- if .Values.openshift.ui.route }} + - name: longhorn-ui-tls + secret: + secretName: longhorn-ui-tls + {{- end }} + {{- end }} + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-config + - emptyDir: {} + name: var-run + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornUI.priorityClass }} + priorityClassName: {{ .Values.longhornUI.priorityClass | quote }} + {{- end }} + {{- if or .Values.global.tolerations .Values.longhornUI.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if or .Values.global.tolerations .Values.longhornUI.tolerations }} +{{ default .Values.global.tolerations .Values.longhornUI.tolerations | toYaml | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornUI.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornUI.nodeSelector }} +{{ default .Values.global.nodeSelector .Values.longhornUI.nodeSelector | toYaml | indent 8 }} + {{- end }} + {{- end }} +--- +kind: Service +apiVersion: v1 +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ui + {{- if eq .Values.service.ui.type "Rancher-Proxy" }} + kubernetes.io/cluster-service: "true" + {{- end }} + name: longhorn-frontend + namespace: {{ include "release_namespace" . }} +spec: + {{- if eq .Values.service.ui.type "Rancher-Proxy" }} + type: ClusterIP + {{- else }} + type: {{ .Values.service.ui.type }} + {{- end }} + {{- if and .Values.service.ui.loadBalancerIP (eq .Values.service.ui.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.ui.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.ui.type "LoadBalancer") .Values.service.ui.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.ui.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + selector: + app: longhorn-ui + ports: + - name: http + port: 80 + targetPort: http + {{- if .Values.service.ui.nodePort }} + nodePort: {{ .Values.service.ui.nodePort }} + {{- else }} + nodePort: null + {{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/ingress.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/ingress.yaml new file mode 100644 index 0000000000..61175e827b --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/ingress.yaml @@ -0,0 +1,37 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: longhorn-ingress + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ingress + annotations: + {{- if .Values.ingress.secureBackends }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end }} + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - path: {{ default "" .Values.ingress.path }} + pathType: {{ default "ImplementationSpecific" .Values.ingress.pathType }} + backend: + service: + name: longhorn-frontend + port: + number: 80 +{{- if .Values.ingress.tls }} + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: {{ .Values.ingress.tlsSecret }} +{{- end }} +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/backing-image-data-source-network-policy.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/backing-image-data-source-network-policy.yaml new file mode 100644 index 0000000000..7204d63caa --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/backing-image-data-source-network-policy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: backing-image-data-source + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + longhorn.io/component: backing-image-data-source + policyTypes: + - Ingress + ingress: + - from: + - podSelector: + matchLabels: + app: longhorn-manager + - podSelector: + matchLabels: + longhorn.io/component: instance-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-data-source +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/backing-image-manager-network-policy.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/backing-image-manager-network-policy.yaml new file mode 100644 index 0000000000..119ebf08a1 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/backing-image-manager-network-policy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: backing-image-manager + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + longhorn.io/component: backing-image-manager + policyTypes: + - Ingress + ingress: + - from: + - podSelector: + matchLabels: + app: longhorn-manager + - podSelector: + matchLabels: + longhorn.io/component: instance-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-data-source +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/instance-manager-networking.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/instance-manager-networking.yaml new file mode 100644 index 0000000000..332aa2c2fe --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/instance-manager-networking.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: instance-manager + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + longhorn.io/component: instance-manager + policyTypes: + - Ingress + ingress: + - from: + - podSelector: + matchLabels: + app: longhorn-manager + - podSelector: + matchLabels: + longhorn.io/component: instance-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-data-source +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/manager-network-policy.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/manager-network-policy.yaml new file mode 100644 index 0000000000..6f94029a53 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/manager-network-policy.yaml @@ -0,0 +1,35 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: longhorn-manager + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + app: longhorn-manager + policyTypes: + - Ingress + ingress: + - from: + - podSelector: + matchLabels: + app: longhorn-manager + - podSelector: + matchLabels: + app: longhorn-ui + - podSelector: + matchLabels: + app: longhorn-csi-plugin + - podSelector: + matchLabels: + longhorn.io/managed-by: longhorn-manager + matchExpressions: + - { key: recurring-job.longhorn.io, operator: Exists } + - podSelector: + matchExpressions: + - { key: longhorn.io/job-task, operator: Exists } + - podSelector: + matchLabels: + app: longhorn-driver-deployer +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/recovery-backend-network-policy.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/recovery-backend-network-policy.yaml new file mode 100644 index 0000000000..37bf5f9bcf --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/recovery-backend-network-policy.yaml @@ -0,0 +1,17 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: longhorn-recovery-backend + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + longhorn.io/recovery-backend: longhorn-recovery-backend + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 9503 +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/ui-frontend-network-policy.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/ui-frontend-network-policy.yaml new file mode 100644 index 0000000000..6f37065980 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/ui-frontend-network-policy.yaml @@ -0,0 +1,46 @@ +{{- if and .Values.networkPolicies.enabled .Values.ingress.enabled (not (eq .Values.networkPolicies.type "")) }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: longhorn-ui-frontend + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + app: longhorn-ui + policyTypes: + - Ingress + ingress: + - from: + {{- if eq .Values.networkPolicies.type "rke1"}} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + {{- else if eq .Values.networkPolicies.type "rke2" }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: rke2-ingress-nginx + app.kubernetes.io/name: rke2-ingress-nginx + {{- else if eq .Values.networkPolicies.type "k3s" }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + app.kubernetes.io/name: traefik + ports: + - port: 8000 + protocol: TCP + - port: 80 + protocol: TCP + {{- end }} +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/webhook-network-policy.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/webhook-network-policy.yaml new file mode 100644 index 0000000000..3812e0ffa3 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/network-policies/webhook-network-policy.yaml @@ -0,0 +1,33 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: longhorn-conversion-webhook + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + longhorn.io/conversion-webhook: longhorn-conversion-webhook + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 9501 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: longhorn-admission-webhook + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + longhorn.io/admission-webhook: longhorn-admission-webhook + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 9502 +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/postupgrade-job.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/postupgrade-job.yaml new file mode 100644 index 0000000000..56efd38e9b --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/postupgrade-job.yaml @@ -0,0 +1,56 @@ +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation + name: longhorn-post-upgrade + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + activeDeadlineSeconds: 900 + backoffLimit: 1 + template: + metadata: + name: longhorn-post-upgrade + labels: {{- include "longhorn.labels" . | nindent 8 }} + spec: + containers: + - name: longhorn-post-upgrade + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - longhorn-manager + - post-upgrade + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: OnFailure + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote }} + {{- end }} + serviceAccountName: longhorn-service-account + {{- if or .Values.global.tolerations .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if or .Values.global.tolerations .Values.longhornManager.tolerations }} +{{ default .Values.global.tolerations .Values.longhornManager.tolerations | toYaml | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornManager.nodeSelector }} +{{ default .Values.global.nodeSelector .Values.longhornManager.nodeSelector | toYaml | indent 8 }} + {{- end }} + {{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/preupgrade-job.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/preupgrade-job.yaml new file mode 100644 index 0000000000..9f7a8a6aa6 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/preupgrade-job.yaml @@ -0,0 +1,64 @@ +{{- if and .Values.preUpgradeChecker.jobEnabled .Values.preUpgradeChecker.upgradeVersionCheck}} +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed + name: longhorn-pre-upgrade + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + activeDeadlineSeconds: 900 + backoffLimit: 1 + template: + metadata: + name: longhorn-pre-upgrade + labels: {{- include "longhorn.labels" . | nindent 8 }} + spec: + containers: + - name: longhorn-pre-upgrade + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + privileged: true + command: + - longhorn-manager + - pre-upgrade + volumeMounts: + - name: proc + mountPath: /host/proc/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: proc + hostPath: + path: /proc/ + restartPolicy: OnFailure + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + serviceAccountName: longhorn-service-account + {{- if or .Values.global.tolerations .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if or .Values.global.tolerations .Values.longhornManager.tolerations }} +{{ default .Values.global.tolerations .Values.longhornManager.tolerations | toYaml | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornManager.nodeSelector }} +{{ default .Values.global.nodeSelector .Values.longhornManager.nodeSelector | toYaml | indent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/priorityclass.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/priorityclass.yaml new file mode 100644 index 0000000000..208adc84a2 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/priorityclass.yaml @@ -0,0 +1,9 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: "longhorn-critical" + labels: {{- include "longhorn.labels" . | nindent 4 }} +description: "Ensure Longhorn pods have the highest priority to prevent any unexpected eviction by the Kubernetes scheduler under node pressure" +globalDefault: false +preemptionPolicy: PreemptLowerPriority +value: 1000000000 diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/psp.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/psp.yaml new file mode 100644 index 0000000000..a2dfc05bef --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/psp.yaml @@ -0,0 +1,66 @@ +{{- if .Values.enablePSP }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: longhorn-psp + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + privileged: true + allowPrivilegeEscalation: true + requiredDropCapabilities: + - NET_RAW + allowedCapabilities: + - SYS_ADMIN + hostNetwork: false + hostIPC: false + hostPID: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + fsGroup: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - secret + - projected + - hostPath +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: longhorn-psp-role + labels: {{- include "longhorn.labels" . | nindent 4 }} + namespace: {{ include "release_namespace" . }} +rules: +- apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - longhorn-psp +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: longhorn-psp-binding + labels: {{- include "longhorn.labels" . | nindent 4 }} + namespace: {{ include "release_namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: longhorn-psp-role +subjects: +- kind: ServiceAccount + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} +- kind: ServiceAccount + name: default + namespace: {{ include "release_namespace" . }} +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/registry-secret.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/registry-secret.yaml new file mode 100644 index 0000000000..3c6b1dc510 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/registry-secret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.privateRegistry.createSecret }} +{{- if .Values.privateRegistry.registrySecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.privateRegistry.registrySecret }} + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ template "secret" . }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/serviceaccount.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/serviceaccount.yaml new file mode 100644 index 0000000000..b0d6dd505b --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/serviceaccount.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-ui-service-account + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.openshift.enabled }} + {{- if .Values.openshift.ui.route }} + {{- if not .Values.serviceAccount.annotations }} + annotations: + {{- end }} + serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"longhorn-ui"}}' + {{- end }} + {{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-support-bundle + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} \ No newline at end of file diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/servicemonitor.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/servicemonitor.yaml new file mode 100644 index 0000000000..3f32961332 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/servicemonitor.yaml @@ -0,0 +1,40 @@ +{{- if .Values.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: longhorn-prometheus-servicemonitor + namespace: {{ include "release_namespace" . }} + labels: + {{- include "longhorn.labels" . | nindent 4 }} + name: longhorn-prometheus-servicemonitor + {{- with .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app: longhorn-manager + namespaceSelector: + matchNames: + - {{ include "release_namespace" . }} + endpoints: + - port: manager + {{- with .Values.metrics.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/services.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/services.yaml new file mode 100644 index 0000000000..4c8c6bc687 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/services.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-conversion-webhook + name: longhorn-conversion-webhook + namespace: {{ include "release_namespace" . }} +spec: + type: ClusterIP + selector: + longhorn.io/conversion-webhook: longhorn-conversion-webhook + ports: + - name: conversion-webhook + port: 9501 + targetPort: conversion-wh +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-admission-webhook + name: longhorn-admission-webhook + namespace: {{ include "release_namespace" . }} +spec: + type: ClusterIP + selector: + longhorn.io/admission-webhook: longhorn-admission-webhook + ports: + - name: admission-webhook + port: 9502 + targetPort: admission-wh +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-recovery-backend + name: longhorn-recovery-backend + namespace: {{ include "release_namespace" . }} +spec: + type: ClusterIP + selector: + longhorn.io/recovery-backend: longhorn-recovery-backend + ports: + - name: recovery-backend + port: 9503 + targetPort: recov-backend diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/storageclass.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/storageclass.yaml new file mode 100644 index 0000000000..64be249f9b --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/storageclass.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-storageclass + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +data: + storageclass.yaml: | + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: longhorn + annotations: + storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }} + provisioner: driver.longhorn.io + allowVolumeExpansion: true + reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}" + volumeBindingMode: Immediate + parameters: + numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}" + staleReplicaTimeout: "30" + fromBackup: "" + {{- if .Values.persistence.defaultFsType }} + fsType: "{{ .Values.persistence.defaultFsType }}" + {{- end }} + {{- if .Values.persistence.defaultMkfsParams }} + mkfsParams: "{{ .Values.persistence.defaultMkfsParams }}" + {{- end }} + {{- if .Values.persistence.migratable }} + migratable: "{{ .Values.persistence.migratable }}" + {{- end }} + {{- if .Values.persistence.nfsOptions }} + nfsOptions: "{{ .Values.persistence.nfsOptions }}" + {{- end }} + {{- if .Values.persistence.backingImage.enable }} + backingImage: {{ .Values.persistence.backingImage.name }} + backingImageDataSourceType: {{ .Values.persistence.backingImage.dataSourceType }} + backingImageDataSourceParameters: {{ .Values.persistence.backingImage.dataSourceParameters }} + backingImageChecksum: {{ .Values.persistence.backingImage.expectedChecksum }} + {{- end }} + {{- if .Values.persistence.recurringJobSelector.enable }} + recurringJobSelector: '{{ .Values.persistence.recurringJobSelector.jobList }}' + {{- end }} + dataLocality: {{ .Values.persistence.defaultDataLocality | quote }} + {{- if .Values.persistence.defaultDiskSelector.enable }} + diskSelector: "{{ .Values.persistence.defaultDiskSelector.selector }}" + {{- end }} + {{- if .Values.persistence.defaultNodeSelector.enable }} + nodeSelector: "{{ .Values.persistence.defaultNodeSelector.selector }}" + {{- end }} + {{- if .Values.persistence.removeSnapshotsDuringFilesystemTrim }} + unmapMarkSnapChainRemoved: "{{ .Values.persistence.removeSnapshotsDuringFilesystemTrim }}" + {{- end }} + {{- if .Values.persistence.disableRevisionCounter }} + disableRevisionCounter: "{{ .Values.persistence.disableRevisionCounter }}" + {{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/tls-secrets.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/tls-secrets.yaml new file mode 100644 index 0000000000..74c43426de --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/tls-secrets.yaml @@ -0,0 +1,16 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ include "release_namespace" $ }} + labels: {{- include "longhorn.labels" $ | nindent 4 }} + app: longhorn +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/uninstall-job.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/uninstall-job.yaml new file mode 100644 index 0000000000..1ab46207c3 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/uninstall-job.yaml @@ -0,0 +1,57 @@ +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + name: longhorn-uninstall + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + activeDeadlineSeconds: 900 + backoffLimit: 1 + template: + metadata: + name: longhorn-uninstall + labels: {{- include "longhorn.labels" . | nindent 8 }} + spec: + containers: + - name: longhorn-uninstall + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - longhorn-manager + - uninstall + - --force + env: + - name: LONGHORN_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: Never + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote }} + {{- end }} + serviceAccountName: longhorn-service-account + {{- if or .Values.global.tolerations .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if or .Values.global.tolerations .Values.longhornManager.tolerations }} +{{ default .Values.global.tolerations .Values.longhornManager.tolerations | toYaml | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if or .Values.global.nodeSelector .Values.longhornManager.nodeSelector }} +{{ default .Values.global.nodeSelector .Values.longhornManager.nodeSelector | toYaml | indent 8 }} + {{- end }} + {{- end }} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/userroles.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/userroles.yaml new file mode 100644 index 0000000000..57a68e130c --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/userroles.yaml @@ -0,0 +1,53 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "longhorn-admin" + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: +- apiGroups: [ "longhorn.io" ] + resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", "settings/status", + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", + "backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", "backupbackingimages", "backupbackingimages/status", + "backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status", + "recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status", + "supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status", + "volumeattachments", "volumeattachments/status"] + verbs: [ "*" ] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "longhorn-edit" + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: +- apiGroups: [ "longhorn.io" ] + resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", "settings/status", + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", + "backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", "backupbackingimages", "backupbackingimages/status", + "backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status", + "recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status", + "supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status", + "volumeattachments", "volumeattachments/status"] + verbs: [ "*" ] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "longhorn-view" + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: +- apiGroups: [ "longhorn.io" ] + resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", "settings/status", + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", + "backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", "backupbackingimages", "backupbackingimages/status", + "backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status", + "recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status", + "supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status", + "volumeattachments", "volumeattachments/status"] + verbs: [ "get", "list", "watch" ] diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/validate-install-crd.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/validate-install-crd.yaml new file mode 100644 index 0000000000..7bf81816d0 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/validate-install-crd.yaml @@ -0,0 +1,35 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "longhorn.io/v1beta1/BackingImageDataSource" false -}} +# {{- set $found "longhorn.io/v1beta1/BackingImageManager" false -}} +# {{- set $found "longhorn.io/v1beta1/BackingImage" false -}} +# {{- set $found "longhorn.io/v1beta2/BackupBackingImage" false -}} +# {{- set $found "longhorn.io/v1beta1/Backup" false -}} +# {{- set $found "longhorn.io/v1beta1/BackupTarget" false -}} +# {{- set $found "longhorn.io/v1beta1/BackupVolume" false -}} +# {{- set $found "longhorn.io/v1beta1/EngineImage" false -}} +# {{- set $found "longhorn.io/v1beta1/Engine" false -}} +# {{- set $found "longhorn.io/v1beta1/InstanceManager" false -}} +# {{- set $found "longhorn.io/v1beta1/Node" false -}} +# {{- set $found "longhorn.io/v1beta2/Orphan" false -}} +# {{- set $found "longhorn.io/v1beta1/RecurringJob" false -}} +# {{- set $found "longhorn.io/v1beta1/Replica" false -}} +# {{- set $found "longhorn.io/v1beta1/Setting" false -}} +# {{- set $found "longhorn.io/v1beta1/ShareManager" false -}} +# {{- set $found "longhorn.io/v1beta2/Snapshot" false -}} +# {{- set $found "longhorn.io/v1beta2/SupportBundle" false -}} +# {{- set $found "longhorn.io/v1beta2/SystemBackup" false -}} +# {{- set $found "longhorn.io/v1beta2/SystemRestore" false -}} +# {{- set $found "longhorn.io/v1beta2/VolumeAttachment" false -}} +# {{- set $found "longhorn.io/v1beta1/Volume" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} diff --git a/charts/longhorn/104.2.0+up1.7.1/templates/validate-psp-install.yaml b/charts/longhorn/104.2.0+up1.7.1/templates/validate-psp-install.yaml new file mode 100644 index 0000000000..0df98e3657 --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/templates/validate-psp-install.yaml @@ -0,0 +1,7 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +#{{- if .Values.enablePSP }} +#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}} +#{{- end }} +#{{- end }} +#{{- end }} \ No newline at end of file diff --git a/charts/longhorn/104.2.0+up1.7.1/values.yaml b/charts/longhorn/104.2.0+up1.7.1/values.yaml new file mode 100644 index 0000000000..6ee0d58b6a --- /dev/null +++ b/charts/longhorn/104.2.0+up1.7.1/values.yaml @@ -0,0 +1,524 @@ +# Default values for longhorn. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + # -- Toleration for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer. + tolerations: [] + # -- Node selector for nodes allowed to run user-deployed components such as Longhorn Manager, Longhorn UI, and Longhorn Driver Deployer. + nodeSelector: {} + cattle: + # -- Default system registry. + systemDefaultRegistry: "" + windowsCluster: + # -- Setting that allows Longhorn to run on a Rancher Windows cluster. + enabled: false + # -- Toleration for Linux nodes that can run user-deployed Longhorn components. + tolerations: + - key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" + # -- Node selector for Linux nodes that can run user-deployed Longhorn components. + nodeSelector: + kubernetes.io/os: "linux" + defaultSetting: + # -- Toleration for system-managed Longhorn components. + taintToleration: cattle.io/os=linux:NoSchedule + # -- Node selector for system-managed Longhorn components. + systemManagedComponentsNodeSelector: kubernetes.io/os:linux + +networkPolicies: + # -- Setting that allows you to enable network policies that control access to Longhorn pods. + enabled: false + # -- Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1") + type: "k3s" + +image: + longhorn: + engine: + # -- Repository for the Longhorn Engine image. + repository: rancher/mirrored-longhornio-longhorn-engine + # -- Tag for the Longhorn Engine image. + tag: v1.7.1 + manager: + # -- Repository for the Longhorn Manager image. + repository: rancher/mirrored-longhornio-longhorn-manager + # -- Tag for the Longhorn Manager image. + tag: v1.7.1 + ui: + # -- Repository for the Longhorn UI image. + repository: rancher/mirrored-longhornio-longhorn-ui + # -- Tag for the Longhorn UI image. + tag: v1.7.1 + instanceManager: + # -- Repository for the Longhorn Instance Manager image. + repository: rancher/mirrored-longhornio-longhorn-instance-manager + # -- Tag for the Longhorn Instance Manager image. + tag: v1.7.1 + shareManager: + # -- Repository for the Longhorn Share Manager image. + repository: rancher/mirrored-longhornio-longhorn-share-manager + # -- Tag for the Longhorn Share Manager image. + tag: v1.7.1 + backingImageManager: + # -- Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value. + repository: rancher/mirrored-longhornio-backing-image-manager + # -- Tag for the Backing Image Manager image. When unspecified, Longhorn uses the default value. + tag: v1.7.1 + supportBundleKit: + # -- Repository for the Longhorn Support Bundle Manager image. + repository: rancher/mirrored-longhornio-support-bundle-kit + # -- Tag for the Longhorn Support Bundle Manager image. + tag: v0.0.42 + csi: + attacher: + # -- Repository for the CSI attacher image. When unspecified, Longhorn uses the default value. + repository: rancher/mirrored-longhornio-csi-attacher + # -- Tag for the CSI attacher image. When unspecified, Longhorn uses the default value. + tag: v4.6.1 + provisioner: + # -- Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value. + repository: rancher/mirrored-longhornio-csi-provisioner + # -- Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value. + tag: v4.0.1 + nodeDriverRegistrar: + # -- Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value. + repository: rancher/mirrored-longhornio-csi-node-driver-registrar + # -- Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value. + tag: v2.12.0 + resizer: + # -- Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value. + repository: rancher/mirrored-longhornio-csi-resizer + # -- Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value. + tag: v1.11.1 + snapshotter: + # -- Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value. + repository: rancher/mirrored-longhornio-csi-snapshotter + # -- Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value. + tag: v7.0.2 + livenessProbe: + # -- Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value. + repository: rancher/mirrored-longhornio-livenessprobe + # -- Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value. + tag: v2.14.0 + openshift: + oauthProxy: + # -- Repository for the OAuth Proxy image. This setting applies only to OpenShift users. + repository: rancher/mirrored-longhornio-openshift-origin-oauth-proxy + # -- Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later. The latest stable version is 4.15. + tag: 4.15 + # -- Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI. + pullPolicy: IfNotPresent + +service: + ui: + # -- Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy") + type: ClusterIP + # -- NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767. + nodePort: null + manager: + # -- Service type for Longhorn Manager. + type: ClusterIP + # -- NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767. + nodePort: "" + +persistence: + # -- Setting that allows you to specify the default Longhorn StorageClass. + defaultClass: true + # -- Filesystem type of the default Longhorn StorageClass. + defaultFsType: ext4 + # -- mkfs parameters of the default Longhorn StorageClass. + defaultMkfsParams: "" + # -- Replica count of the default Longhorn StorageClass. + defaultClassReplicaCount: 3 + # -- Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort") + defaultDataLocality: disabled + # -- Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete") + reclaimPolicy: Delete + # -- Setting that allows you to enable live migration of a Longhorn volume from one node to another. + migratable: false + # -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the volume-head-xxx.img file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. + disableRevisionCounter: "true" + # -- Set NFS mount options for Longhorn StorageClass for RWX volumes + nfsOptions: "" + recurringJobSelector: + # -- Setting that allows you to enable the recurring job selector for a Longhorn StorageClass. + enable: false + # -- Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`) + jobList: [] + backingImage: + # -- Setting that allows you to use a backing image in a Longhorn StorageClass. + enable: false + # -- Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image. + name: ~ + # -- Data source type of a backing image used in a Longhorn StorageClass. + # If the backing image exists in the cluster, Longhorn uses this setting to verify the image. + # If the backing image does not exist, Longhorn creates one using the specified data source type. + dataSourceType: ~ + # -- Data source parameters of a backing image used in a Longhorn StorageClass. + # You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`) + dataSourceParameters: ~ + # -- Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass. + expectedChecksum: ~ + defaultDiskSelector: + # -- Setting that allows you to enable the disk selector for the default Longhorn StorageClass. + enable: false + # -- Disk selector for the default Longhorn StorageClass. Longhorn uses only disks with the specified tags for storing volume data. (Examples: "nvme,sata") + selector: "" + defaultNodeSelector: + # -- Setting that allows you to enable the node selector for the default Longhorn StorageClass. + enable: false + # -- Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast") + selector: "" + # -- Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled") + removeSnapshotsDuringFilesystemTrim: ignored + +preUpgradeChecker: + # -- Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions. + jobEnabled: true + # -- Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled. + upgradeVersionCheck: true + +csi: + # -- kubelet root directory. When unspecified, Longhorn uses the default value. + kubeletRootDir: ~ + # -- Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3"). + attacherReplicaCount: ~ + # -- Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3"). + provisionerReplicaCount: ~ + # -- Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3"). + resizerReplicaCount: ~ + # -- Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3"). + snapshotterReplicaCount: ~ + +defaultSettings: + # -- Endpoint used to access the backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE") + backupTarget: ~ + # -- Name of the Kubernetes secret associated with the backup target. + backupTargetCredentialSecret: ~ + # -- Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run. + allowRecurringJobWhileVolumeDetached: ~ + # -- Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster. + createDefaultDiskLabeledNodes: ~ + # -- Default path for storing data on a host. The default value is "/var/lib/longhorn/". + defaultDataPath: ~ + # -- Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume. + defaultDataLocality: ~ + # -- Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default. + replicaSoftAntiAffinity: ~ + # -- Setting that automatically rebalances replicas when an available node is discovered. + replicaAutoBalance: ~ + # -- Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100". + storageOverProvisioningPercentage: ~ + # -- Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25". + storageMinimalAvailablePercentage: ~ + # -- Percentage of disk space that is not allocated to the default disk on each new Longhorn node. + storageReservedPercentageForDefaultDisk: ~ + # -- Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default + upgradeChecker: ~ + # -- Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3". + defaultReplicaCount: ~ + # -- Default Longhorn StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is "longhorn-static". + defaultLonghornStaticStorageClass: ~ + # -- Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is "300". When the value is "0", polling is disabled. + backupstorePollInterval: ~ + # -- Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled. + failedBackupTTL: ~ + # -- Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration. + restoreVolumeRecurringJobs: ~ + # -- Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained. + recurringSuccessfulJobsHistoryLimit: ~ + # -- Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained. + recurringFailedJobsHistoryLimit: ~ + # -- Maximum number of snapshots or backups to be retained. + recurringJobMaxRetention: ~ + # -- Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles. + supportBundleFailedHistoryLimit: ~ + # -- Taint or toleration for system-managed Longhorn components. + # Specify values using a semicolon-separated list in `kubectl taint` syntax (Example: key1=value1:effect; key2=value2:effect). + taintToleration: ~ + # -- Node selector for system-managed Longhorn components. + systemManagedComponentsNodeSelector: ~ + # -- PriorityClass for system-managed Longhorn components. + # This setting can help prevent Longhorn components from being evicted under Node Pressure. + # Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`. + priorityClass: &defaultPriorityClassNameRef "longhorn-critical" + # -- Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default. + autoSalvage: ~ + # -- Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting. + autoDeletePodWhenVolumeDetachedUnexpectedly: ~ + # -- Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default. + disableSchedulingOnCordonedNode: ~ + # -- Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=" in the Kubernetes node object. + replicaZoneSoftAntiAffinity: ~ + # -- Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default. + replicaDiskSoftAntiAffinity: ~ + # -- Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed. + nodeDownPodDeletionPolicy: ~ + # -- Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained. + nodeDrainPolicy: ~ + # -- Setting that allows automatic detaching of manually-attached volumes when a node is cordoned. + detachManuallyAttachedVolumesWhenCordoned: ~ + # -- Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume. + replicaReplenishmentWaitInterval: ~ + # -- Maximum number of replicas that can be concurrently rebuilt on each node. + concurrentReplicaRebuildPerNodeLimit: ~ + # -- Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled. + concurrentVolumeBackupRestorePerNodeLimit: ~ + # -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI. + disableRevisionCounter: "true" + # -- Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart. + systemManagedPodsImagePullPolicy: ~ + # -- Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation. + allowVolumeCreationWithDegradedAvailability: ~ + # -- Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed. + autoCleanupSystemGeneratedSnapshot: ~ + # -- Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job. + autoCleanupRecurringJobBackupSnapshot: ~ + # -- Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version. + concurrentAutomaticEngineUpgradePerNodeLimit: ~ + # -- Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it. + backingImageCleanupWaitInterval: ~ + # -- Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown". + backingImageRecoveryWaitInterval: ~ + # -- Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12". + guaranteedInstanceManagerCPU: ~ + # -- Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler. + kubernetesClusterAutoscalerEnabled: ~ + # -- Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up. + orphanAutoDeletion: ~ + # -- Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network. + storageNetwork: ~ + # -- Flag that prevents accidental uninstallation of Longhorn. + deletingConfirmationFlag: ~ + # -- Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8". + engineReplicaTimeout: ~ + # -- Setting that allows you to enable and disable snapshot hashing and data integrity checks. + snapshotDataIntegrity: ~ + # -- Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance. + snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~ + # -- Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format. + snapshotDataIntegrityCronjob: ~ + # -- Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files. + removeSnapshotsDuringFilesystemTrim: ~ + # -- Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check". + fastReplicaRebuildEnabled: ~ + # -- Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed. + replicaFileSyncHttpClientTimeout: ~ + # -- Number of seconds that Longhorn allows for the completion of replica rebuilding and snapshot cloning operations. + longGRPCTimeOut: ~ + # -- Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace") + logLevel: ~ + # -- Setting that allows you to specify a backup compression method. + backupCompressionMethod: ~ + # -- Maximum number of worker threads that can concurrently run for each backup. + backupConcurrentLimit: ~ + # -- Maximum number of worker threads that can concurrently run for each restore operation. + restoreConcurrentLimit: ~ + # -- Setting that allows you to enable the V1 Data Engine. + v1DataEngine: ~ + # -- Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments. + v2DataEngine: ~ + # -- Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine. + v2DataEngineHugepageLimit: ~ + # -- Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250". + v2DataEngineGuaranteedInstanceManagerCPU: ~ + # -- Setting that allows scheduling of empty node selector volumes to any node. + allowEmptyNodeSelectorVolume: ~ + # -- Setting that allows scheduling of empty disk selector volumes to any disk. + allowEmptyDiskSelectorVolume: ~ + # -- Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses. + allowCollectingLonghornUsageMetrics: ~ + # -- Setting that temporarily prevents all attempts to purge volume snapshots. + disableSnapshotPurge: ~ + # -- Maximum snapshot count for a volume. The value should be between 2 to 250 + snapshotMaxCount: ~ + # -- Setting that allows you to configure the log level of the SPDK target daemon (spdk_tgt) of the V2 Data Engine. + v2DataEngineLogLevel: ~ + # -- Setting that allows you to configure the log flags of the SPDK target daemon (spdk_tgt) of the V2 Data Engine. + v2DataEngineLogFlags: ~ + # -- Setting that freezes the filesystem on the root partition before a snapshot is created. + freezeFilesystemForSnapshot: ~ + # -- Setting that automatically cleans up the snapshot when the backup is deleted. + autoCleanupSnapshotWhenDeleteBackup: ~ + # -- Turn on logic to detect and move RWX volumes quickly on node failure. + rwxVolumeFastFailover: ~ + +privateRegistry: + # -- Setting that allows you to create a private registry secret. + createSecret: ~ + # -- URL of a private registry. When unspecified, Longhorn uses the default system registry. + registryUrl: ~ + # -- User account used for authenticating with a private registry. + registryUser: ~ + # -- Password for authenticating with a private registry. + registryPasswd: ~ + # -- Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name. + registrySecret: ~ + +longhornManager: + log: + # -- Format of Longhorn Manager logs. (Options: "plain", "json") + format: plain + # -- PriorityClass for Longhorn Manager. + priorityClass: *defaultPriorityClassNameRef + # -- Toleration for Longhorn Manager on nodes allowed to run Longhorn components. + tolerations: [] + ## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # -- Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager. + nodeSelector: {} + ## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + # -- Annotation for the Longhorn Manager service. + serviceAnnotations: {} + ## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above + ## and uncomment this example block + # annotation-key1: "annotation-value1" + # annotation-key2: "annotation-value2" + +longhornDriver: + # -- PriorityClass for Longhorn Driver. + priorityClass: *defaultPriorityClassNameRef + # -- Toleration for Longhorn Driver on nodes allowed to run Longhorn components. + tolerations: [] + ## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # -- Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver. + nodeSelector: {} + ## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + +longhornUI: + # -- Replica count for Longhorn UI. + replicas: 2 + # -- PriorityClass for Longhorn UI. + priorityClass: *defaultPriorityClassNameRef + # -- Toleration for Longhorn UI on nodes allowed to run Longhorn components. + tolerations: [] + ## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # -- Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI. + nodeSelector: {} + ## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + +ingress: + # -- Setting that allows Longhorn to generate ingress records for the Longhorn UI service. + enabled: false + + # -- IngressClass resource that contains ingress configuration, including the name of the Ingress controller. + # ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases. + ingressClassName: ~ + + # -- Hostname of the Layer 7 load balancer. + host: sslip.io + + # -- Setting that allows you to enable TLS on ingress records. + tls: false + + # -- Setting that allows you to enable secure connections to the Longhorn UI service via port 443. + secureBackends: false + + # -- TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records. + tlsSecret: longhorn.local-tls + + # -- Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}. + path: / + + # -- Ingress path type. To maintain backward compatibility, the default value is "ImplementationSpecific". + pathType: ImplementationSpecific + + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + # -- Ingress annotations in the form of key-value pairs. + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + # -- Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses. + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: longhorn.local-tls + # key: + # certificate: + +# -- Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled. +enablePSP: false + +# -- Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`. +namespaceOverride: "" + +# -- Annotation for the Longhorn Manager DaemonSet pods. This setting is optional. +annotations: {} + +serviceAccount: + # -- Annotations to add to the service account + annotations: {} + +metrics: + serviceMonitor: + # -- Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components. + enabled: false + # -- Additional labels for the Prometheus ServiceMonitor resource. + additionalLabels: {} + # -- Annotations for the Prometheus ServiceMonitor resource. + annotations: {} + # -- Interval at which Prometheus scrapes the metrics from the target. + interval: "" + # -- Timeout after which Prometheus considers the scrape to be failed. + scrapeTimeout: "" + # -- Configures the relabeling rules to apply the target’s metadata labels. See the [Prometheus Operator + # documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for + # formatting details. + relabelings: [] + # -- Configures the relabeling rules to apply to the samples before ingestion. See the [Prometheus Operator + # documentation](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint) for + # formatting details. + metricRelabelings: [] + +## openshift settings +openshift: + # -- Setting that allows Longhorn to integrate with OpenShift. + enabled: false + ui: + # -- Route for connections between Longhorn and the OpenShift web console. + route: "longhorn-ui" + # -- Port for accessing the OpenShift web console. + port: 443 + # -- Port for proxy that provides access to the OpenShift web console. + proxy: 8443 + +# -- Setting that allows Longhorn to generate code coverage profiles. +enableGoCoverDir: false diff --git a/index.yaml b/index.yaml index 84584b05d8..8415e64d04 100755 --- a/index.yaml +++ b/index.yaml @@ -2672,6 +2672,50 @@ entries: - assets/harvester-csi-driver/harvester-csi-driver-101.0.0+up0.1.14.tgz version: 101.0.0+up0.1.14 longhorn: + - annotations: + catalog.cattle.io/auto-install: longhorn-crd=match + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Longhorn + catalog.cattle.io/kube-version: '>= 1.21.0-0' + catalog.cattle.io/namespace: longhorn-system + catalog.cattle.io/permits-os: linux,windows + catalog.cattle.io/provides-gvr: longhorn.io/v1beta1 + catalog.cattle.io/rancher-version: '>= 2.9.0-0 < 2.10.0-0' + catalog.cattle.io/release-name: longhorn + catalog.cattle.io/type: cluster-tool + catalog.cattle.io/upstream-version: 1.7.1 + apiVersion: v1 + appVersion: v1.7.1 + created: "2024-09-24T16:10:43.203714502-03:00" + description: Longhorn is a distributed block storage system for Kubernetes. + digest: f5d6c453a241e42e9483ccfe0a812d4ba75f917b92b193ab1eb0b22ee2ac5fa6 + home: https://github.com/longhorn/longhorn + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png + keywords: + - longhorn + - storage + - distributed + - block + - device + - iscsi + - nfs + kubeVersion: '>=1.21.0-0' + maintainers: + - email: maintainers@longhorn.io + name: Longhorn maintainers + name: longhorn + sources: + - https://github.com/longhorn/longhorn + - https://github.com/longhorn/longhorn-engine + - https://github.com/longhorn/longhorn-instance-manager + - https://github.com/longhorn/longhorn-share-manager + - https://github.com/longhorn/longhorn-manager + - https://github.com/longhorn/longhorn-ui + - https://github.com/longhorn/longhorn-tests + - https://github.com/longhorn/backing-image-manager + urls: + - assets/longhorn/longhorn-104.2.0+up1.7.1.tgz + version: 104.2.0+up1.7.1 - annotations: catalog.cattle.io/auto-install: longhorn-crd=match catalog.cattle.io/certified: rancher diff --git a/release.yaml b/release.yaml index d3d02e898c..b0e9392187 100644 --- a/release.yaml +++ b/release.yaml @@ -1,2 +1,4 @@ +longhorn: + - 104.2.0+up1.7.1 longhorn-crd: - 104.2.0+up1.7.1