From 1c4d8bf98145e2a4c0955e154d74aa579c6d19ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Thu, 10 Aug 2023 16:52:08 +0300 Subject: [PATCH] gguf : start implementing libllama in GGUF (WIP) --- Makefile | 8 +- examples/gguf/gguf-llama-simple.cpp | 182 ++ gguf-llama-simple | Bin 0 -> 607488 bytes gguf-llama.cpp | 4060 +++++++++++++++++++++++++++ gguf-llama.h | 468 +++ 5 files changed, 4717 insertions(+), 1 deletion(-) create mode 100644 examples/gguf/gguf-llama-simple.cpp create mode 100644 gguf-llama-simple create mode 100644 gguf-llama.cpp create mode 100644 gguf-llama.h diff --git a/Makefile b/Makefile index a3600e4f2..f5922c95d 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # Define the default target now so that it is always the first target -BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch simple server embd-input-test gguf gptneox-main +BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch simple server embd-input-test gguf gguf-llama-simple gptneox-main # Binaries only useful for tests TEST_TARGETS = tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0 @@ -337,6 +337,9 @@ OBJS += ggml-alloc.o llama.o: llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h llama.h llama-util.h $(CXX) $(CXXFLAGS) -c $< -o $@ +gguf-llama.o: gguf-llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h gguf-llama.h gguf-util.h + $(CXX) $(CXXFLAGS) -c $< -o $@ + common.o: examples/common.cpp examples/common.h $(CXX) $(CXXFLAGS) -c $< -o $@ @@ -393,6 +396,9 @@ embd-input-test: $(LIB_PRE)embdinput$(DSO_EXT) examples/embd-input/embd-input-te gguf: examples/gguf/gguf.cpp build-info.h ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) +gguf-llama-simple: examples/gguf/gguf-llama-simple.cpp build-info.h ggml.o gguf-llama.o common.o $(OBJS) + $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + gptneox-main: gptneox-main.cpp ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) diff --git a/examples/gguf/gguf-llama-simple.cpp b/examples/gguf/gguf-llama-simple.cpp new file mode 100644 index 000000000..35c3c8183 --- /dev/null +++ b/examples/gguf/gguf-llama-simple.cpp @@ -0,0 +1,182 @@ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include "common.h" +#include "gguf-llama.h" +#include "build-info.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) +#include +#include +#elif defined (_WIN32) +#define WIN32_LEAN_AND_MEAN +#define NOMINMAX +#include +#include +#endif + + + +int main(int argc, char ** argv) +{ + gpt_params params; + + //--------------------------------- + // Print help : + //--------------------------------- + + if ( argc == 1 || argv[1][0] == '-' ) + { + printf( "usage: %s MODEL_PATH [PROMPT]\n" , argv[0] ); + return 1 ; + } + + //--------------------------------- + // Load parameters : + //--------------------------------- + + if ( argc >= 2 ) + { + params.model = argv[1]; + } + + if ( argc >= 3 ) + { + params.prompt = argv[2]; + } + + if ( params.prompt.empty() ) + { + params.prompt = "Hello my name is"; + } + + //--------------------------------- + // Init LLM : + //--------------------------------- + + llama_backend_init(params.numa); + + llama_context_params ctx_params = llama_context_default_params(); + + llama_model * model = llama_load_model_from_file(params.model.c_str(), ctx_params); + + if ( model == NULL ) + { + fprintf( stderr , "%s: error: unable to load model\n" , __func__ ); + return 1; + } + + llama_context * ctx = llama_new_context_with_model(model, ctx_params); + + //--------------------------------- + // Tokenize the prompt : + //--------------------------------- + + std::vector tokens_list; + tokens_list = ::llama_tokenize( ctx , params.prompt , true ); + + const int max_context_size = llama_n_ctx( ctx ); + const int max_tokens_list_size = max_context_size - 4 ; + + if ( (int)tokens_list.size() > max_tokens_list_size ) + { + fprintf( stderr , "%s: error: prompt too long (%d tokens, max %d)\n" , + __func__ , (int)tokens_list.size() , max_tokens_list_size ); + return 1; + } + + fprintf( stderr, "\n\n" ); + + // Print the tokens from the prompt : + + for( auto id : tokens_list ) + { + printf( "%s" , llama_token_to_str( ctx , id ) ); + } + + fflush(stdout); + + + //--------------------------------- + // Main prediction loop : + //--------------------------------- + + // The LLM keeps a contextual cache memory of previous token evaluation. + // Usually, once this cache is full, it is required to recompute a compressed context based on previous + // tokens (see "infinite text generation via context swapping" in the main example), but in this minimalist + // example, we will just stop the loop once this cache is full or once an end of stream is detected. + + while ( llama_get_kv_cache_token_count( ctx ) < max_context_size ) + { + //--------------------------------- + // Evaluate the tokens : + //--------------------------------- + + if ( llama_eval( ctx , tokens_list.data() , int(tokens_list.size()) , llama_get_kv_cache_token_count( ctx ) , params.n_threads ) ) + { + fprintf( stderr, "%s : failed to eval\n" , __func__ ); + return 1; + } + + tokens_list.clear(); + + //--------------------------------- + // Select the best prediction : + //--------------------------------- + + llama_token new_token_id = 0; + + auto logits = llama_get_logits( ctx ); + auto n_vocab = llama_n_vocab( ctx ); // the size of the LLM vocabulary (in tokens) + + std::vector candidates; + candidates.reserve( n_vocab ); + + for( llama_token token_id = 0 ; token_id < n_vocab ; token_id++ ) + { + candidates.emplace_back( llama_token_data{ token_id , logits[ token_id ] , 0.0f } ); + } + + llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; + + // Select it using the "Greedy sampling" method : + new_token_id = llama_sample_token_greedy( ctx , &candidates_p ); + + + // is it an end of stream ? + if ( new_token_id == llama_token_eos() ) + { + fprintf(stderr, " [end of text]\n"); + break; + } + + // Print the new token : + printf( "%s" , llama_token_to_str( ctx , new_token_id ) ); + fflush( stdout ); + + // Push this new token for next evaluation : + tokens_list.push_back( new_token_id ); + + } // wend of main loop + + llama_free( ctx ); + llama_free_model( model ); + + llama_backend_free(); + + return 0; +} + +// EOF diff --git a/gguf-llama-simple b/gguf-llama-simple new file mode 100644 index 0000000000000000000000000000000000000000..d7600282e3315a4bc805a4f6c01550d58bdb80a5 GIT binary patch literal 607488 zcmeFa3tUxI7C(N#i=t`w`YL+giW;0=;1e?IA`j2ii;|LQTA&C*ArM>?O%uCNo^E%d zvKectvB%8xm>Hd>2lOyd5wsUtQ&v-wf8wJPxGWlMnow3PcP+ag($~Nj=01H*SNkSrzkEZQ8@*_&r{A- zdIPoLze`*-rv;Jxf}TbTzFtV_zNf`?b9%Pm(^I{Ml+t}xKb;f!6+KlPoTmGtD4WS? zb9&L2)DlI}Q~5sjJXarnqbcn_h}TR{<@=~a$925Lj=b(^j-3}5UD%&zL_J9ybYJ|9 zoIid+_tYumrKkKVmkHH#lelh9ow!CnPWC%2!^|(e^XR_lfAIF1)1pEA20hhfe=Y9g z5)%cUAQ8<|ri$lT$)%$oFGx?hbQR@_qQbeOM_y4h@3Nx8;)=@4DicRvHhSdc{dn)H!RJSMGZc(3oP)tw!Pg}_6EsN(S!4P^X$A)(GB)3Ht?Hvtg z(Nj6Ez_5D4>n+NeZBhSgEy_=YetRnaJPZ3-Y$4|lE%*;u;4TaKKWBkwqo4Lv&r*x_ z_O;OadJFzHEbO__LhnCW$lnh+_oVj^E$WkHVL#Vd)bmUW{7sAcJZ@n>KUm-!2J|fF z&n@s_7W$2~z}Hy#f#<+~K@a}tVT<-UEXsFT)U&TeIk^_N(?Tz!E$m^ZMLFMC;8QH@ z;c*N8wHE%cw?#cQi++8X1^@jPa%-}v&ms%_Hj94RYJqRH(Bn1>{1S`jeq@0^41dy7 zf4s$lzn6vFR$1`xwD22mSm^g`i+;DoLO!+V*FDw0z``DE7IN#bsOL=<{&t^5eb!mX zA>IOCW`Vy9yr*`(WFh}YEc#uY#kdw>@!a7S_J67cKE|S)D=g&ZwBY~F0&lbMhXXC} z6pM0JL+?HL&-X0&&$B51MGL$7(Za3zUdmifJ#DDY8wHEcc z+Ct7lE%@KIsLyzd`UEWG-_N4_Wfp#HwuK#zx6tF67Uh2qKhu+6`pSYI#rG)Bu@?AS z7V_+{cjO=-NWqI=p%e{GJSs5usCB=DJbLJN1 z@oPO^DfJeV<;|Iwor~XIFlOt(EqRM|P={_VDJ)j9Z z5!vpN^6V+4dBqD#=Aj-pOrCU{JCAu7FKA3|$%4{3WqE0tlX7`QM;Ddk&MC@EO-Ne^ zV48P$e0DZkT(T&;D6e?Fw;(&OtgNJrvrqGuq24_-WGdB9G+4AZD?2MbJ8jzN?5Wd6 zK=e0cO`qmX%B#%HEAoy>jKU+hy>NgeA74>icxy!-6kq0^JS}^4_B1anfv$|4mzO`MqR0!YD$4WbxuLMM zw9NE{3)7e^lVD{F3X8!^vPD%#^FoKm!w%*xf*!JSONw(V%F6PJa~CrOOrBO)R-QJs zkn<-NLgkQaUg~gmHOp-hVpbN_;v`Isl~*{bJTLE-`KbBG{L+$gcS1sT_5$=6R5061 zRVKq7QC3mxEnJYt6@Nk#wGc8&gH6xLo?n5MX3xpJwW6?$8SS(Z(qdt8d0v^a>!q|4@-B!%2a#1ZH5eB_zuI$hrgaK7l3*(@)BtIv3qDFHh7jdrtAZ?6SPMbBg8^ z=jP4Lgme-OmSXfmrkiQ;uyN6ZlhOqqhiC?sKcHW@fZvHZd!G&*feVR?2594GY`*1Oq2!9S*~Ty04gvfw?$4caAr=fRz~@ipm#bz+S*T z61hh5gC>jZ?r-LnlzG7?3*sboKe{Zhyl@HBPUGXeqD<}@{-4l1YjeVAS&q4L%DuX* zxhE~dK!fpsw@nNuVn7o8?muXAVrfY!^p<;zp;)Fs8WyAxJ9~a*Wp-&^S$Rn@9Fuo( z_QH5nBMU>KKCbB_Bd?uFLE-Kwsy&isOU(8D2UVUkEj)!WJmAl=buqGKA2aD1o{a&; zV#+nGkoktZN=)K%#rRb?l?`C9Na(JFk2d-McY?{1;C366rA!*k$g$6x!z_>Hc%yQu zr=a^`yvWF2R5&lM7{wQQx#+-Owb4|5?`B!~9%sfEcX*gHd#(c@Whn^5pX^#+tOSs|io(c@3GsF}0T5 zxHM;)mj~sr$qaf+4^t_~9~Y8z7g$s>AA?VL0m2H;r}(0GMv_-a+_#vZ zied(${9N9a{QRPd@&YBFMq5;`ys$ERLEZwm-TB4J0>O>?7ZoY+H^sT7iyU$78m9f%~SF*-6>bFI>gPy zgax7KQ5YF23%v@egqAA#w5m`RutzK`FD}L6m4xq|2Mbm5sZPX6mCQ#yFnF;CR^W+x z(O8t1QM;%HWOv0SO8%m)jXyiSfk}D6H-#L zM?mKjJnr!+*~2d%o)pfG31=@KW#mU(K9c8W%)p%^FCQ*$7G#3BOD7Y%Gmq{@CM0wt zla!PhGm^)ksUH)9~SP89a}M==DH0+>V5i6mf}Edf{9`B>qJq*IVg> z-}J9H`$tzJk>hkU%SS7H#T|t9WA_qYFD07&r?Mh(?xvs8pV586-y3%fK-x<=m0gWe z>`19>`F|wtIfY%P68MvO$-MO5IJ81_8EEkIMvSPpLGjY59+9kugGCt*&{T&=We9## z8jXK8ly(ZMGeTU-P@t!=tJFSfStPDg9ds!XML1z3Ed&(t6M=szh_&hQyv&OiBa%5( znN0w@-b*=vm>ZEU{MYXLEgpB8G7kV@cQM+blnA=w%tAzR`zlN5B7P$9tQ=8(JZSG3 zbbW|&XE%PE#D5Cz;bk`}9mhV(K|$Zj?uXP6L_*pjRYWg68szG~}V5XTn3+Plq@! z79-9%>G--C{JPVGw+TGXgs&6js3ts~4v64qh6!)Y6OvpD{=33r{t@dgu~`2u-0MQJhN@h@|{)r7Bk zgyTC*xMMIj2JjO!;cZQvzr%!QZsB;R2`?4TRpxisd(Kc^zQcq&TX{J#CcNq+jyp|w zt$1#n32%tuUmfftzYr2;QC;VT4Q zX~ORmc$EqF3w)&suN8Q$34c)Ft4;XB0$*do{~++EP54s+UuVLf5qO;m|BJvIO!zv1 zx0>+9)4BY2nD7?_f13%f6L`>szb5bw6W%EBP7~fFaHX)jooo?!vxN5==2|Uw;9~Jlv6W;Geu9qAW9wYDq z6Fx-Xr6&9wfmfRFO9WnJ!bb>vr3t@E;I$@vyueqR@N|K%G2s&h{HZ!qEW1m0r8iv`|l!WRjAhY7C{c$*2oTi`(xzFOcNCj8F=?=<1-1g_lN-Tv1L zJlceB6u85Lzb)_>6aJpSohJMvfybHfodSwhfe9Za@KO_=An-~PK33pWCOlo>D^2)Rf!CVwn*_evgx@UiH70zS zz#B~Xg92|c;V%fh)r3DR@EsO-n+bnR@CQx!O9JmO;jak1(}b@VxN=K(yWJr0XcN9! z;0_c1rodxN_;!IiP51`_k2B%_5_r4`-zD%w6aKBhT_*gXz*Q3-l`ZVngr6qx876#? zz;jIaxdJaR;a3Q})B>+G;Uffpl?fjs@RcS!Mc}n2+%53cCj44~uQB0c1^%=NpDplp zCValY>rA*;;0-3cO5iOf{62xVn(&7NzQcq+EATcG{+hspCcH)99VYxEfp?nlodQ>i zy4(L=fk&J0Ljrf0a3zP^Rg4KgMc_^oewx7JO!)Z%k2m3$3Ovz-Uny{x2~QWeYQkp< zJkx~d34DeLFBf=@2~QSyfeBwK@KO`*6L_Tw4+y-qI#Y6W%ItrwLyt%Fpz6w?mz7 z5YMI6JiVJ`{D^YQxZZBUr{h1x#OOz_uNeK2_MWM z_B%(y=^Zour$EBb&_Sf768@NkS4ud&r>6f@NqC$NB3&urDH2{Q;gcnNwS;F&_!vmGB&)chZZD7g*p8QaSfa<+Mon0}|dU;SWmq4hjFAgttlf zY6%ZY_(KxjA>j{8_zbE3%OpIel-m#WDr&R-3wtlr<7QRKlN@@Y5uGorIq*;dK%oBjF7a zF8i?-2|rWfZ+eL<nlPT>t5maHkGZdTZEj&zJCM z3BN$X9TI+_gvUsDtb{uy{2~dDlkkfrJYK?oCEumS0(%s3D1=9OC@}U zgkL7%ITC)kgcnHo6%t-5;qek)Dd7ncUM1nfC48lXkC5H!s{e_wuCoG_>B_YBH=elc&mixNcav3 zpCjRI5(&6D53sgu5iXSi)5aFOl#}2``oK84`Z0gy%?jnS>Wec)5g^O1M|TDC*k)>c)Wz)C*g?_zDmMf68>8W|6lfh68N74{wIO|N#K7H_@4y+|0{v- z?dKj*ecwi_{;0p77pQVWRsIoyq9?iS9?Vi_v!zeG1X>jJ|{D{zN+&y@coiL^~K=Ml^*cLkgpB zCfZJP$58+m%_W*bk)bw5-$3+0qFWg~m1qh*h8h?>k?6rhuVeIhqK6Q@hS66NO`*q7 zEu)7MO(Dln6{CMmG=&;NrHsCSXbLfgau|Ik(Pt2y$>>2uQ%Eu7Vst;EDU=wBXLKad z6haI+8GYnj&=fihIT*d4XbKsI6h?nTG=&O79Y0a~cMt&HAI^aVsW zFnTM|7ZSaW(Hn@S5MXExqhBHVBBE;<{XEeZ6J5pVXNmq5(WQ)jg6KG+a~SNLUExgM*o^< z3c-a+8GQlK6nYEgF#1fQDdZN)Wb`1SDbyBnF}feo6k-d-GdhxJ3ay2lj6QM>XbP!? z9E{#iG=y`N|bNreCYLYa&nL^OqRLM}%4Bbq`u zp?F3|5>26-kdx6z&H}xRXa}SB6MZ|;3ZuUvnnF6EjzdiUL{|~r#^{fUUO{v#qqh@% z2hk0T-b%EO=yiJJ;|^8(JlF>R ztop|dI~`t!=wW z)pn`A-5r@(ovjt_9_G4J|`42Wr*8xM5UKAnF22k~+OZF_oH3kHA!_ zZ&#cByeg{4Mv;R5@R`IvYCk2zj;dOR+PL=`wXw688nId3cGNor1zaWyi0*1bQ{XN2 z`>B3a<6gwQO2sADslIU!4@FkhzV!BoC~gNHtjDk&oCj5HjtYQ|xDoeuyFauD7e#%p zOTIpNdh&J2S<}=N-^TQ!qQ2LDQUkBW22tx^1}0*@pCY_{ReyPO)zz0(j8Xj~(1we? z>a8R+1kXL#)%AJ=7>ygw?u$5CP+hT!%9@otJNd@so079{-01r$a_NP{RR{Lq#qg}Y zpL&%IM6nJ+I|(KKs|Frngz9@O7O!Zbm3GMLpZGd6X?!nKWq;|l*uf|~IOld7d+r*l z=x%(7O7-*NReyDC8$eY{jdeBs=%|h*7G&;;ZD5&!o>*toj{|M7O`VH^H@{_5+}dWS zt0ESX_YHws{E2wzt@O~qP(KnhwHGSKW{4+Wx<@-i519EaJ)p^>y%1Z4$E$&*u>~p~ zsC`^dq6>a>NpHpD2k7z7i!D|Csj-z*^GuKST`*%>l#&{7{pitldWIcV8$*$*e`wp_ zXxz{*IjLpY1*qqDyIDQIK;qHfMO9}&N=z%EDyGot*p=+L+vIJucj|&p#+b%G*%71enk{G?_m{4YOx32;}?T}1fDVMM|b15k*Uzu zx2O}l=V+?7OKS`s92ce3G}voC5bWu%kO((X#m+)jYr@5qAzZXqS3rhp?x$+N_9zO0 zbgyy+PlqE>wMJmG)WF#*w7qEwU2g4dkM>mTYT|(=YLT|b=GUfbNlixvCfk~m(T%7Q zdsJUze|J~2C+VoY`fL=wX~hzU?KL&&ql))D!`jt4a*Fs_t|o1?SI{d#^%e zQe)>$4A}li)nTo1qm!vxQ_~R#uC$-6YTLTqSk~;;-om{Vn-ca+35;Brt}T60+4T65 zfvGRr#y6+F$l9gWd~*A#Drsde>@GMRBU?tz9&a>Bbj!cHx+VrZFDjD)ws~q`b`4sz z$Kw~W@^e{9QmOd{4yN6fuqR=Q3gs85!$NA~VX6J9eQk@Xd16a9!<3=^rLi;AfS2r> zOycfFo06<;4&MJaN}H0CHruQ3rM41Zs&+7_;R0ODQy2AtrpWy2_CWytsK*`L>grKn z>Lr=(q-O8HG;K^qV7^VQ*;8>%Xdyg2eK{B{Xbuh}K5b*F77x>L`!?C=37Kk5mp3Z- zPgt}^n};&A&vC20`YIY5AdJ^=A#?>BD55Ia1Ec8^#k|L$R$K9YQ*aE4Py3Oc`DQ2* z%p}ICd$%XYlr*2j`$rc$)K( zpt%o0-=V9}G4Z%Z=y8erS-y($mm<%)R{NRlG3}?d4{0C7$y0b@YhT#zC3oh7tQGG? z?JC28wD)hH)jqQwqS;JVj=G=2nJO+K56s;CP4E}!^}#=}4)k5{Z{Op|yrfS^dTQ_y zLUi^k1p6Ye(=&Dabb)6B&%azBirF~EhMTNunX2|fb06wQ!JEI2QnVCXQ)gV%^3QuK zO`QW%qSZPGkuASfEhVO@}5HC8-hsap5;TlZY!0<3Djd)U>%{#BIXDHnmO+4l%2 z`oZ&jYmj;ZPg2D^+LKQsQJa11kkHR&AFFV)uZ59Rt5;i*RRg{@N;Ds%nr*Q$jQlEL zd$5dkYktAsKUUTJ#M;#9h+4?2sHm^tJ6T1S&n+xgwM+*XcF@g!cFSfPIzzxmXUR}{ z2QD--qN4bg;$~kjz{Mqtic!T*T%zd7W?vr?5AJdDdmNNB`(g;i&t`_xbr-)L&#xyM z*9oWVIsAGizdpmbPB>k!;@3<0^-AM9;dFfszh29)uQsj|j`m;Q0JZtn@v9BuDsJ0B zh~{hI7h1)I%?zhvJNfk>zusY7CmgMdhNq_XI*`(%^U-vIaEV?GjcE2=z(~dK{ zCt%N=&2iR$0<72Y`lJF!Pt8>Qw=oxex|pAO{MQ8I=$eKBtE-{|*IlZAnyNPL>XmRX zn6am;i%e!yW97i!?3!<5shaeISDi&V3GLYn4}>4X3Aufn9gv=Pu-kt%!(nBLM9yng%K?aPMUKV$_XAzVhG7ol zhosZ*)O05$^U__Ec+zv+z6O^kX+uSgn$+lx!>~6DH8JFyYJb@_1+y22?G8+ZY`;;v z8r7thiXbM^SjM=Nge}yvX!||P7Y5W$x3$qr7rIOXJ24yWRYs=l`h<#E8pfi(t@$$%{W&ouSeWT0mYwWbwRASi6!0Ipt@&}tyA8snj4}$NmwvsK(u5khCQKgbdw+UI$~%t8vO=bl@k2xH*k-2pNxFz z!#h4ZD@8q26zfn6J+aY2ws2}y8-rMx9~q=Zw5ky;aP~1LDDOsA-n)mUeeuWNZ*qpq zbB(uS@Y2icV_jb8ln{9=!(Y`=O*V$?68Xke z&W*-Gt0oac9)c+??EV*G8z4Rpma_5Gy~6cdPeT9(B&;$}^50N$SIamuO1p0c3Sd(A zXa_yok>Con6pQY`{sE`s30;la06S9O;pnng`~#wqU2(Vzg)Pd%Q&*dxs@rq-=Upv7 zkA`>oc?0o0|A4h~qOr8BaM1IfM1I9#3Nw_w?2o#SU9nexfZEnK4?M&8!km&4oNupF!xrJ9ihiKcY z$^IIAIz|bo5y@JBa>-gzjne*PbJR0ApzzZ^3-hIg)3L(1V;7mls2hLPM+uhTehOP% zjb)RneZ8J`QB-Y1@MW@U;OExG_fwP+wpja1p4iCzzKsYv1;)J?iCgCp3nDc4e2sL) zCbAqU`U69}q!$@tH27y@9kYHIie^!WW0*r9p^3QRNThDLoo&o=`{F)W?tCQmTd^jC zG>W0p;LI->|5k&)h4X)e1pH%&pZ-h&_^&4ZVVwVAgMT&We}?n-A^tkfe=6}G-Yvxw zed7wV@f{@l0}U9C5SQ~Wic$TitNtm`9{&V{vc_G9+Y=f>wlAnVxfIOyu+}-^A-=%4 zN?e4n1B@rwwu{DrgWk(vUgrX0{QiY8ZvSa+|K#Wle>x`ppQG9@6Qh5j1I!hlp%}hi z9F@+B;Zx9?Lid9yzgG26h{5_BRlzSO{fZ-(W%v_ZXDDNe+erLzsJ%*ScC;ff>XuJY zMZEyVMf|o8aH)eX<&OLFtxST|IOV!GP_D4q`l4?GrHGG}u*NTL+hq zn!A~m=0_NBsGXN>+lu^?tg!xfv%{Ma%pGDyHWA+38#OUUK zp(1`;LVbx`V}Vf_RMYM%>d#aJ~xl~;)B<50(@J1U&W#UK|`aT;{33sEcL~fmgo$+}e(=0cRi|+=NbQTewN0&BF9$Bj#KvsWUE>o-{M+ z>vs{=`HJ1{81MlI6w}zW|0b-0<0wpx(rHS*>5D>R&cDfrGA>r|1hgyoEPwQXhv{BX z>`mMB$8Tlpge?l))BkcMxRki}eMf7ENPP{#>U23quV$=>A}P@8UI8nQz@YyrRzQ-V zZ{OLCEMerhM0nCy;%2~-4+dZRycVgo*>rl>29(4bx658DLGZ`7&9ZPzC4H_KNXv7vEfu zWv#XY!ywlD`hJs`{l0XB9DO35_QDQ&=+Gu4@V)Mj;d|o;(iQ+bbgjYl8_x9y&NY^} zhJz~sopDhx0=69d_FpWZVE9LhGyTIk<+m>OQP~$<+N9)U*z;AonFi7AvDb+{%1HnPp6;c^b0!uD5q@{$0YvWaXL$< z@8k4SI_>B50iCYlbSkZH=>FxLUZvBEIlWt_%Q-!=x8PsE>1v&x&*}GddJd;!`w0G7 zoc8MUb)4R$(^EJ-BwFyhIXzFOQ#k#CPLJcXjaC;_|1q4-(&+?FKc&-`aQc8wU&QIu ze&YV~IK4`z&*1cKou(~Q*w@HY1pfd|SL<{iPQS0y5uA?gFZh3eW5NAio&GncH|g|u zoE|bj@PEbWc{=?mr(e+N_c?7lRq(&b=`5Yz!0D%Sx}MVqbb1}9Q|;pZKXZDOPCv%! z-8%gsr$^GJ3(1@Qa4FhTte5j71{GoQr(HbwblXBRNS3)0B4huIW^fu*Aq1;Bw^`{(dLe~wT+*-<=O1VE$ zj<&r*zo#5I|ImGugELe@cTsK-<(5%yFy+cAH-vJ9lp9L9Y|5QRx$7v$&M;1*90pJ& zluWrZC^v?3XHxDm%AG~I3n_Ot<<6koIh3Z&2=H%DqUrUs3Kk%EeLcamvv$HS{~m(ONhZpxh;t^HJ_n z{AyoXcNrxWb(d38Tz3T}^XuX%$*D`AWM)cMo}`ZZZsvM>&8%W zMO`8#adk+Z+m= z*SIY@xOPWZ7Y3k~%puAw3W=rS2LBYNTC>HA1HZ}9YOe@3+n2^GHINmHMf#LD)weuR z@kXisi1x^Y2Gw7Nwdk!5Y)?gEdnUqIpV?oUFc51jV?1=NT-1%m3Lsj(uKN3!)}xDJ zu#b$8d-WY?560$i-k~9SfDhBz_KRv0w!05~?@rp_wollguPoU@GrdK{iWoO~Gkmwl zC>8s#I>8$J+=>3wGvg-vv(8kp4c|J^zBctt2m2kvemlqehn(rgg0?jy_w4(y`1JJs zm=>QEzfIVleCYf3RjSq^*B8O^_EV(xvG{+owk=uvoZeeDw|{!H zyVq$f=w7vapn|`o0)`2R#JdPJjPv-fk4N;+lhkUzle@@qA0o1Z8Ok|0oPlXAYNT!B z6OM-M*n@Zx-5q}BeaxLE`)xa*4@_h7A*0E;Lmp=J-%jzjS7$~GVG}vv=PlI?o{B7K>22vvnCi;goPSmm* zS|%b28%?=Z$~h>vgK{yHYonZ#azV<)QLck>@s#VNT;fD+NCS^!Z?1Tzy(C%dz-~r| zCvZ5Sl%U*^WM@Xi zM}0^=bW}uq>}wk_U8m?nN(|eCN5+Ya+BIPa=HzTIi>Bi4h*njb5GNVpdpO?^D#OtU z>UR4LlfCC%;HlcyWHu5)eFN=xJj0ql68Fxy`2Ud}KbpJ$r#M%2BaMi2_y0T2bx$KX_qqG`#JN^Asx13uoa>Rs z0%YM|9GDwayvaBl=I+OptvrusB{fE%uxM9f)8W#Vy*!J0+}_|$`q*B5I_gdF88{hR zBW#6sxV{fX0L231ioN)x5c6-V|}-CJxw@_R#u=)EEp#b zN?tj6AU1y;*j~gIaC#g9r#RCy8KpF9pXs4dni(B?-VrrNRMK~Nko~T8sH+@G3+-AL;G=7DFY|l5;JhRu!ZUp7}^Y@Q*#l!jM&z-6UTdgGYj|QNZN0vA#FPH z>&;qcs}`g zHj0hcY}544H9TqHNgE~o?}W2}-`-I1FE=)pFr>dg_J`AI18sM~05g&{SL|Z97xl8tJo$A*us7zUW-=$0I+Ho6 z)S1jl*=D+Z8?jORS;fE9n(f}-;asX)`yN}rWtVolA48HY8!@YBchZiEeHqxepz~IF z80VP$@>4Whw!3R%1`7H;LlE{-t=@^&;VPVY{t9G@Z#V-3xdsSa2cF~r` zM%#TWVSt#Pr_e39wN%_n^M9dc2n}k6zgQM`+vXmnc9!9$?EnB=vE z+Y%Wd~!a~$@(mF#(&Vb3Yr$a}H3pON%V#ew!G)L~b};%wv| z7#4g&YHSRR;6KXtK&aEBeXZ@KXw~I!(n*Z=jEFCBLXSgd0dhxtYS5>c=pVLrzus=5 zKQYm78nj#cls}>l5Cy}W7*L`;z2?CkCSlL4CI{B1`e(xr*L*@}eI4G5;b)rM5zXQL zfO8|fka844ZlUYVigv{1;L75T32%Lfzb?zeTmNigE6WCColOcQX65^tX#|@8)k&rEdP# z6l-U`cHhbQ+M(Dlk^BYxYwrIW{3|5zs&JCD0Ac>M>ByxGIQ6XIoMYD(-&cL?F9qqi ziK^iwhTZodtP3tSlkURZ%f#IkNcoMjyqKHqd2kw%5C0>NMQ-ME-XEF5TbeH86j>5DEP4<$K_7Kj!}S zlNR{f&6{+8>+g+-fEt+W&^BX$484w{jNSAn&Vn-U=4alMZ^aKrM4*j)9adDdmyh)o zTU&{(_`LPx22XY?KH(r1aKZCYDO!|ui@#)IgWK1MDe#lb!4IfLKvB)Lt-x`oE(|)$ z<8AhuztbiEU966|@4!B7E3vbUYmxSv_1&)F@SaNhuu3S z3B2ig>Gn5lD!+sWQ_wE_U_X1TKWb;2u7%G@3W1>=NT7wM+o6S{&;kqWVcAt@FuueY zn>pjTSSi*YCdS{g2SnX#Fa|i|W1O)S>a9Nj#`aPaS7>m}=UkPXOC_!^iEARbQVgzS z&NY>DjVG>eh>NzI&M~;o;#`+OFu;kw|6|}e0^WyBJI@}Tt3dVigQ&H*Qvxs+RHY$Zsc6W zoNGC84F(sq_a1s%aD4;s?&0~Jp})V9NCN}@^5U`jd$pUiF^e+kE-Y`+-@d+5oDVh> z*4(WKJNg?G-e72r9c?puH1)agWoYi7-R{OERsnz1{7-e2 zUqW1@yDbJ+Gw0gDxmt)Tp14SNPa0g0bFSw(*GA$R1up3B^o`J6`Dt@>__vIP2-#6uRE-G#INn>TWF;|ml%cK zZZsHYamFIf_!u$XM6xEmjWM`JajtaEbsBL^AuiI}K!eN9xtyHKL0nnHMS5%hSZ_m! zhN!^MBS_GOQ;2IOxS+S}M(Azg>(HCwFGXK5_AAVErf+%Xh>9rxge;6Y=>Nu3eEcb^ z_*44PQx@^397d-O?tYEO`_#Z)q!y!mHlD9&ChQd#eg$s<<7OXiUC}88Hu%cxReJ4~ zm=S-bw?ee6*m!z%E|EfME()co)eT;}$TSS^yWzJdHV416VoUM+k=QESh|E*$ZAN7N z61#>YFT}3nnb%?)kU^vxi&2z_m~M4!8zCU@mX!n9ct#@l!D~&SHWwg|wmrBUgAX08 zSh0z!I17yw=NZ5EkEWLz0+SpbKMTeOcjE*bdj;$PV#Va;0O?X|hE~pC%ia{cg_llo zNL8eGNv%T!>u7c&#fNCW1urLOVqYQ#FMxQ*y0wF< z3zL1zu?y6Z;s4ddz-t#$C2-6*_|tlJBK9Ls#247c!tM&46N%Q}*rE0quRpV>q{m-e zX|xR|d?`}emo=@q*+nnQ;EkI--m{baW0GmRNjr>lH#oF%i1>DXNg~*kq0O&!Yk#O> zHRv?f>|+0op9eiweowKR@d}+TzZb~7@&FPhRFwE&OUI9a_ zBL11MrMZ0C|hQ~f3FFE#>xDR^!jHemLIJgkRC ztu$`Cli&6`e%pf>A?w@tZ9l$>+rEC8oL?2{qmZd6p_gEyyg&AAU--I!Z;1mF$qcZ= zvOvXvx`rsb!fv_+kSh^D7xaGjswzAmVNE?L>1ep zHRlm(Uz_f9C#9?Q8hS+yX3KSk#SL`kErWK*PK4Rq+TC8_cA4xYktSuV@7;)f20ZQL zjHMSu0mW7%AQGCMuqXV$ihIL$@G4G`!48;pi5tA{8kXQjhzxzx#Kl?KULP_Kye#*7w4h3^sl~XK+2kxnAL1LE^H33zm1@D^PB~m*nx2 z>vuHlit9J?W~Iv;8xzD0P~9d6wwk*6>##qeog|Oeg!k^bD}j5VH7tA#eo>fjJR*G4 z9pqlza4q(=8?bldMB11EWyaC#JonN|n)(1Slg8TG>^05MF?@o%6JJUp>0^7s(cLpW z3OGaiHbZ-RT?AZMf2tC{Ud#UB~xor{gW{QPjuL8=))6wx0#G%^%G<4H57xVE~1W|8e z5voc0J9A#tZ}su&PQ#xaM<4&6nGdNrV%=>%Gy_I#=za;l=y1}yr_+Z2o%zr|KGv;+ z$~>NRXpHk@t)r*;(Au|jQ~B$&%v9DRfvLPf@fy4pO{Q|;KVT}Su9fFQmBwug`E7Uc z+iEa!)<3{+`xgB^_~GAv=6vY?et*yU$fWam=pBa6@5C2XPFm-5dg5gI`%auT%C~S z!+O29??cw!|7^YY0=rpS?`;d<{R+B=L@h1%Qqxb2PjCGFkx z0SQZL@B7c_?LEBr3!g>a-s4V1?u2d?j;Z(`7UHTBQpz3dTGO?>1` zJCv#&!R&!9)Z#oGrm3hXO&6^zTEO^@t*BF~sJ{PC=O3IZThC8Ib@c;gIKPk{gYyd% z6+;Yyoh_lsv$m5@oulvw)MEhZp*Fi%h(*;_R}tyk|D-;}{L@S%@J|D` z!9P_FWrNK6zra5|^R(oj^!hj>DsrhlzT4v>DrT}(Yeglz0KzT^&FQ*iFJ;k6fHEa% zEcRL4$%r7axYH3HK{}Q0VsWSaEHHGGTJ68hnc=?)rii!`iy%?7ip8BMdW2}z4sh|f z)A#JI7SJASy=M6HdEBXu$DRH_IFCEwOJ4|U5#UCA(4DlU;tV%V&kk`X{ZL`gNZM4< zk3Odrm7yJIzk!0Hn0`Yq;hAvrHa1egfaskH@LI-x9%Y;-J2Hy7&H<`k^&GyK+PG0y zZ{y=kz1xvMy;ZQr`X{;K#-PUqFZdG_SKG6`CPQBnf2O|NEKWG>XX=Z`31|IGeepP9 z?#b!PL;CV0eNZvPlk_?1tL;SkIvrobVgpJ@Jsag_<@gyZXV zr~qrY(PDkaWB=RKWT^&`Mh$*-5;aKHinAzcPbxaL7C1RipKW!W%>G0nt4OoT-Y%js z!HpBt)>^-kv76){);bI4oml<{-ZHh`M%K&Z&%dv6{QU~Kvgw(}wx8R>O&d?f-V0e( zMEfbYeuDNh_Fvq7IyNe6-}VQi{n&muk^S-Z^UD6>_Oq&p_ESjt1np<+zqtK4QQE!z zj~VU9{>90)pI7!5x1Uu-w4cJGCul!o|HbX!Vs1ZXyTbmlop5sP=av1%?PpaH?Wa)N z3EI!te{uV9%&oirA2HgGeT$Q8Kdv$PSAeF{)^k+Y;OPWjrO;lZ2Ng- ze{uU+RYdzK^m2msGxlHH{>|q0KWwz0wi!;aU(Nj=uk0^wKdXvpKMRT+fBa+YzqtK4 zF52DxA2QmHO_7spKd>*9KZdH{TH_%$J@HMf3?wmY&D!*`*~%L+kR>O z8D3Aa<-6G0Ee&ry(C$Zg(KVITkT%@$g57b$k#UUkWGC?mN8THrA7TAYX9F(9)YzCI z(n9=w93jJ_u|s5Dh_hh9IWOt6{&(|fA)On- z8_(CGM*JlZu)An!fI|dWknHxJC3dJhIC{=b(6AlvKj1!k+Z-o?aD=A&2^Rj1UBp>- zy`YP(?S3Y@eMlg>H~tLKi7mP5zlZ3Oe@EZj#ruXvT@4NCbNI{gz;05z`axZ4x!83$ zk<@tG@K*G|bT*f#)*VlRsfzS{MI%0f*AD5>5kD@#ESL=Ffkvtt1-}s5W-)_fP91$s ze77(@7;j0ByRY)9iq;e1{>0wgq)*I^9ghviUt!@8?qL3jPVCD&5wKDFcIbz95U|0U zzZ66n66!>qC}4xHzENl-L{;>cChn-WO4Bz%T3SgHaNSM+28^su)zrrzy|+??%RF|j@69j z%n1hb6`a|{nG=ZlQ{LDAh;AONUDeffjDE2BJ06?ApQ1(jhHyHzfX8DCcngWo*aD_g z!#Erh?;blHqJ57j{S=ITa_K&ROu9IBLGJ|nry-^9J>akaa(HhyEM1RTx{+__(nXbz zE8QLIiSuNo3qjq&1r_~wU8=qBWl}vI38Z=+I(Yp-k}7SsGwf%(`uX25}7n z7kt~j4?>Mg?!yRZoag;z=aCTGr!z=!dhEQI_5r0hhBTF4_@3^dU-Sa_^f)?LcF!fu zCw^~#DK&Nm8|Il8syKzYEPHhq#QXOM^cq`+){HYZ6#4a!fN!d9V6P5)r|QE@GY-wc zF@+Czen^(f6#63q_I#MZX+&iBX+&gP>@*@W{4^r_doJy#CF~)ezetovXLg#PXDgBN!-w*D9_&KU8deD%!GQ|&&Gf(Os!#PZ)nA7Ms^5X&Nc}in^-hE9 zT+Vd`=lY4b#0b6$oj17rUN-rW=Rf-Snpk?E@7VKanKWkkC*zG#obd8bR&Q1^gPlJa zytT(%kH`$}kgwCxQ|aEKGGPRQP0dRBk(u<{404Zt=Xe}0~Oy* zTMQ@RLezktZcnaSPl@8i-?SJt20w$#C>VsU#k zza3vEL_H!+w^uaM#Lv>~$-y!H0 ze}(tNiCj-$=!BZyikppqi03C&-=0? znH_F?^?xw?_!n*W5oxa(fl{yq7ej6jhd*09_)}3e=Q3~-`D6SEfD|0f>`xE#C&&8- z;^CM~Ucfl6#bXyIf@=f5==1=49{ZvbKLUzxM>XSlhtNiLOd6yAyCbtz-Dx z5D;VXgMa>6?*{Kd;8;iQMuKi& z!^7%#lEbC5j%y9BzjChioa+GkO#Lq1Eo%nXU7Tw*=h{tNU+^yV6M9oHsL^q!YE&KW z!FYdyoj=&doSZng?ew8a%uMKeZHA4o?**Yd4PArlp4|S=GoA5IKrkL z4n;sy>|8`J_63u=hN9Jmx~5~#;Kb@``$rG`oCYalEkWl5F8GtKGG{eYSppKM?Ap5^ z=A*hYe*B}(b%dlC74QEDLxYacN9_t-AGC9B=!5rJ zJi>7Y>0=1j2m7j+ag3V!>_M z(oG-QR&=9Qc5>vpHR8i2619siK@6tOZ#k`JS{)rVcft6wEN5?2e zdrBAiT@VE1v6|!&h^j+3s6QQZCpK@*HMr(*t}@QmAJwXl0T+7rhj&9U8}5W+B>#zX zC-h+xNgqm;u8-4YeN1CM6wj~1bx-cuTp!(i+6L%j7o;bA+6LWykv?XTK3X&IMYoza z!uo(un+1Ka9&@Zdwi!Bk-p~pCO)BG~PNL6n#n8S@aQ`}!V!nWd8a*Z!y9_5*%->iU z$0|lTxq}|l@r16Jy(F=~&@K`T)`fUreS)r-tp-;M=lY0q)e_fmu9&Ogxq`!L*x-Mx z|AKzD%lgT=UDppkFfYcqS-O7OaNU!871z&^@ECWxzq`jsM6z?y$k$!e$ zBz>-TBGzAyc#>{|e(H(sSp97G;JpI-+QYbX1b<}IUh^{ec-NuNe;H~*4i>=u8`l-h z&x7e}O}ZKbr()=;5<3DX)>R}c<3zejeO%Yo_~lGj(~v+{<>)2#8M>~{HMq{^T$gdK zC{(L{GS}7pcR^RneB|Av`J2hVy#;-dBcmt=oqJQoi(mm8KC;t*&k{D!D?|_v_wrZ& z%_N7(JI+?K)3i0-U59)gI=N0o9R!-5XQn`St4LV z2f^qYi|j;L9LsPKOg4+a!Tsr2RJ@x%okYEbiJ%la10;f*FyZ;qjki%Uw30-K!-fBN z9gd5H{v2SmVj-2!{$e4OL}LhKuPmMW2VE>@FJof43<<>Y1I2KJSnfwR4K7<@(tkJk z#9`H)fps;v0YCo4q{;?)xl0%)zHJx02o>)pl~H7uOe!~E zcYvglN2}}{blo;uX&1P)DNH`tZ0jx`OOt7Q&O$hHtP$=m;xSz~UAHmeIF>Tue1<5H zE}V*L2q&is{Z^WPIk_O{@30n7Ounb@@)^bvrwd^;34wVDCo0%Y2=QdIObFLudm$_Y z5&&LP%8Jc|g=D$4>k+k>;({a$D-kjN9y-pe&ss>KyU+je5nTvJmM|fl0$IR9K0-)P z7eXnVLvYq|vJhi^*Udgu-w~VL_ab^lvi6SeF!I}xPsJY{^>u>SOBWbCAd*-0NvE%?#OVufB^R@8DCf$fPE{oeFo`-(LM1D7SV1^DNdoT@=fQ zKVt9lW#F&dlG#y2C^5MH8Io|58-L_jEY07BZXsg+=aRmoSU#)_uN$V}H8FZERZGKr zVySp7RZDYG2GO82d}3wPH{?gO@A!Q$ME9ZYJ0WR z7g#n9-yiF*eXgax#>jZE0eR-N79E^xhZcfWQ zhZ+Wd;8MKjd$bU(fRg zN{oCO78PeOUd$QCaK`tDv5Odg%S-HfNN@E|i&(2q<&5>js36?J-gEoN;QEkr1(Bf1 zPY_oGanaiE9|qUoIoAfxz~J(8u17eRm$-U?3$xk?xSQa&w=sv@Lw`eL zjWz@MW)>Ywmc9DzYkyhm_Uc#z|X-$6Rv0IMf9cwH95`(BP zy}bM(oEBbL;PPYb>c?)>wrE zc2)z&T%XBp7->|b&Dus0X+H{rN^*qe(8ee!p}*_(@;Z70cs^L&78vhQjL z=q=<_?M>CUm?XWMDBo=)!jFjCKLPufDcHZH<*3`YoL-o z13)iX<8y=flp#Df?64tt(NoMZLyAry{H0gK$hj{Qt_^`tVmHH8GnSaqJKjN zdr1R(<0;X;&R$F;_6+R)?Z(7Fc<|n6GiC-5&-p0JI$-tL6U-R9zY+z<-hFI$8ZA4Q zx9kceX#bP&a`kuemaT%H2`*p6XMe}^zbt+xx2LlQvrq*(dyoNt!;T}*Akm}d7qFUJ z9X*J}ZO0xxpjj*aj4Ye8-n>fJM?o3W$6_R?^#qE@h&k(LOHk`A3nhJo=OdZo{4|bB zNZ%OI!@EyCPLh*O6Tbj$8!NOA5Ya!8dlUzXXJGWg+uP})z3gKp>})~zqZT9C^N#QK z!si8!Bk%S#7V^IExbl7hw;f;JNAB0<-C4@*|5hgN4=EfYyGWpov4+=*`rRrm7$4UI8Tw;Y$HYDLt~Dpk#p#;)j0+@k3Rc3CK-Y25(f8fZ^ z%6@Q2$@;q^$=X|K^x+9?nzK_`_$=K(tfBO!lfyWng&{ao@p^=Nqz#oja2yWfZg?B+ z{t~zawmN+o!Fw`670V{1iAWQXx{$h%sz}v#{B;WUnWig6Ikb7c3YAXP7G$MrdH4** zve{S&*|fAAcCL%6mNt)N`%|_63!aO?PYWLWg_YAWIiPh;AjA0n)9<-U!_%-1e6)&q z*|J8SR;m7|^M9i|w)2XZV~ab#JVx8a^evH8t#SswC8Ev9K_~kaHDA0c>Cx6zfs{wn zvyJAR$(wfx5(t6py}p`+K#P}y_vk`6a5ED^6z6(?xN3A!wHjRSaIVjhP+5rK=%Px+ z+$(rRNze0%X_;iGPf(4u@0nfVtJOHx$-m~Ly`8Lmzz%lEOPcQ=qMj=43in!|n`0cd z8^|0H5D`Z@CNOiPP!X9U1xnJiMeJiQ%)V@NgmA)a3bDnZ67J>+l}Iq5l;VnTIr>bb z&96)4vqC17?~tIqx1vMTKdwvV6@#meb8X>VD{wHh{*T~-L%F;RQaPho8b8H(N!~xP zE5pTb4;PKmJM~%1-qj=oX=FG`%4B`=!oTZ6u-=hI<8870_+(fP=`(2dGK|T=>SJaw zjZf~@h44`U6T&x0AcQAyg0p@DZ{Cz!(Yz}c$o)gNZ~hwa5?WJy!x~5HHCk5uiwPdz zn8RXnYzj`^vAr2ChOdZ|eFUuL6J%4hx8cm-(%_RFDS`gisJUCzUXvW|+->+v0&EBJ z5dGb7cP`?$_ya0@i?a`%hCJLJMO!xqRQ&CA%@&ISoIJ3LwE}&>iiLC83%;0yOY4+n zzhnK)Uj1kGe522y!_gn8GIcn-qD%`DeFCoA&l*h^-DC5a=++{E=zOr?`ZltkQV4j7 z!Bxq*G|rX03S3_j7xmCAgKH}1%H>>cDr+ZkQF!5UgX>bxHI8$gLg585mwN)8GZ?^8 zz;5yW@OUcDtBSLaQy?V1K}ZlVT&f^Jd~g32M)1hOu}H#_&My>RX(Hw!^x zNjV*AE#!)~(X!Cvay5Jjy$N1Yctr1-dtM3MPWAGYxM1y-3`^ zNnu-k5DM7NQdd)N8`h%sLTR}z$96FKo7Df7bQtbmC=b>mcdLtW@|K3N>yH`6F6}|a zVm+&Or)%%jMLT646KxI>h&F~cGm>?oU1D$z<6KvAu4r1$3!(j?2tvEN;8=e}=f`M1 zCEW2oR0?Af1}p4&AZP*! z0bh92`raDxZPx__MF@!G_x+wbGrOAvYWqC?pU*#^Pj+VRJ#$~qIrrRi&pG!_JI(2{ zppEHIt` zC%AwjS<`{xKY}74{zZ@{(Vh@xYW8$EKiE&c2=4+o&%Y;q?Y+V&HS{ zT8MTgOtRkaW^ec{@qXd(Mj0j>k8sGx*?1%QkdNM^Dg+o#-k1o=?6%H{)q=aE@6Jgd z*6DWmygW{5M9tmto-cn4R5O-nG?cN?||Ihg0x<+2unCp#5GFI7+9-9uwmy(>P-E0#2StK9xFZ>^;%fx z2B$%6yR+qR zV{~g`P`>T}Zc~6eZ~+UPPQdL1T&w{9>;NuTfU6W>Hv+y+K+Ba@zJ)SAf?jz^w%QI{__M+W8LPc?$3X1^6rh-y~p^XfrdqIe;z&*h>LENWixU zXt~n!M-#^t>=(Laab7<~l8P1#F0xA()`&h^J- zq!vDOb^#RGn|QC*8*;|--;L7k%9(*NrjEophVkw)wRe;KSl<|k`GW$GnE8*9Y=i>F zyHg`7M80_oW;1)d`B8)#Yw>Qf)9J`XY_p5N7c)095Z-D;PwSkVgOG3WW8_pCRq(ZL$%im#$NysT5z|Ix=e`B;=Q0TQp}BT{e>X|`y8;*V zcP^K$7F}}K#zQ4CB|7#s#@k%VjT@bW=(iwZEyX0Xn=}_|NH4e>K)$1zgnTp!!34z4 zbutpDz_GZp=M25W1^|6>SxPeg3wqvk-9*qf4oh6I4=Q{$k3pGxVy}JDgvY$n}NbeB4VxOvhJbLGN0fsz`zKdy^kt;k@oq;PnQ=M@uV*kRWd zq+`Za4t$A%zd^wd=Q`P9pY0gY(Y1h2(WlghZDU2Sw#t4YCw}WF{2%JT|35>Yj>7+V z;a~Cp9J|B6yG-#vF64h$PVEllumguC{~P>|ptMx}pXl&EoDJWX|Jk`C@_!P)OIReC zoKYvqE}&Zy%tU^+j#D3n)=`X1#?r|WBR@w?WTy7ZChLuCyrJ;OCDXrTq{U#2qFZge zjxH9w7UKfEo&npdjn`EWDbYtKDS0pS$9{SYKjqI#*w-z$P&AA%H}G^~;5|T|6S49l zTn=i~F=*__-!kwa`V2!4$BDDcUfhU*xs!wC?eT#rQ_i2l*E^3SZzS^a6>E_k{tn;W z_D{27Wx2ZbSzoyce;IAAW)C1b?%Rs$5xw>nxXu0^!$G(Z5XS-0NXda?(M59LSagvb z7g~~Inn;decs+iDwQla~E;}woh%DfOSwaq3zqR%qi@HhnYZGK=DOfDG7a`UQC-a4H z69~UcH~V6}eAGCP6k+n79Wrx)-)4gmmk1m9Z~+_qh%04FK0F1{5Pj(K_VU5@$0lQ! zJTVdRgbq@pWmWAk8X~(1PyCsViGKs1-sU2U8P-+;F&%8n9r;o}H8@L{POQ!Mf z0tedsnW0@;?I9Fegl~2D+q+N@z8L*WCHs-IGj{EzN&e0c;9gKt1MH0(@CG6kaRcV? zxC?Lt?m$eWVsCDE^mwc$h-YeVj109eG=g+p;;k*gL4j14;c7WYulJ__nnF3>EdnFp~Eol#8L!Is>$yAS@yw+=wQvI>|iDP zXZxEw_baqe3`*N+frweyzP4UgXsD5tx;sFG{3kQ zatU7vqO<+#-*6o9(;r9EXdssm!m7gtn+-s)UeSdALbKxwL^2tWyP1qggP~6g(u-F3 zJYo}M9D0odHBCX?s-QfCvN-g;%Rs;l zuurM|wMp}}FT7dNOD4Mg31Za}FA2eIO&rm?DlF>Qv&JLj8llYw*z3QSj00_a;WCFT z?PMJy_7a#$mgK<5?Kj#Oxy=xa+<^-)at&93#0cp2B?e;j-3jQcvp)FuzK;R}8sxl} zHnjcT@OKDCxA^xW#lMrlHC+4wJMtM_D2eT60t(R?h?N8F6Vq$?xFjL^+j|tQ^e~nm zm|t;)W)4qu7-H1CP{&mlPQy)c_W-OYN^-CP;hM=!1!c#>S(+4T z$uNq-=aMI1G>eSAd9KCS%K(8UQtz@DdqPqbMZtakzDiPs-%;lQ$?JFGbqoiQvIZ=L z*y7`cnSdJxU#LWhM%Xf-KCLWkG?%6__`Voy{ABPIGi`GI`640b3%G!s_rqYiYK_g{ z3mvGSg1T2hl@iKg@Pn6t=%0+U^!FtGR>VZCs#1v(#QZIq+6jOE4Z&+ET2TWTP5Ap$ z80^0L;7Zl@f0s(Cvgi%SJBp*6ebk zkAcr4P}tGpxpaa2{*g}c8jk<#Y@FPttMQKuaIy$OXw_b8{A1`x-!+oQzt{&7n5bkw zZ8wq}Su8f zyFO=*94Qn7@1iYB5f$q;6dYvP&NZ}=C9YoW1$;mbmHghWiVKK_f<;1gdox=O8 z!v9MZK9z-^t_pvch2Md~lle=ILx9uAJU_bSr1UgzX}A37{BGX-z8lIi&_OX@ZJuHG z@6|EVzklF@{(bUG6dt-jT0V+}pU1+J{TqyTp`9fB?ds*#UD5{ciylOr8|GzxJ@8kk z&+HnipWmYmXA=0=V=8yPAb1tcNS8bH7OQ%vsd{fns`q7xis-W_wB32i`0yi1MRB3r zT5Zw-Bm8OfF|@MKbh+W|tBvOZro`>Sg5`?A@5N7M>y!L5dfR#(-tYWZ<_+r2~hjY0ede47eQTRL2IIw8e!(T+>uc7U5hj0ze{Yn3XDLVR_ zJ6So;_GsJe4ID-Qx++Io*mgXc5*c$Cdr zLijq;^Mqf3!1f2K={~|6`}~ig$}Mt=P3%k@e$Sm!NU!(bDF#xHd3~{A&MQxZ>^TSC z4Dees)L~!&GZdWDFf&a;m5dob6BlfI(}FK%*{ZZPK~cxD%mfVU_h8SREA}X$Y{Y26 zY|K2V$iZ%{=zFjP#@>VZ$7y?Sr|o+9^XT=pHmXbrgU~)LxDmiOs;k@qi!IG$JyzFK zUmsw&#=gd}o=0e1Eu3bG4@|^J(w80o4vxvvON))t zUrsVgZ!a-QOQ*m;QLdX4vUO9(X66DkLr2?6sC#@+SPTh!Y2hW&uFb#-oM^O+urRbc zTag`iILFLdw#gKOpzNwwk$9wcyKm;*M5~HWUdW3%K+jMKGlg$|=ez zTvT*?;fA6dxDiX-1>owgY>19;dK4jv9;TvfXhyXCfPFpIrfKX1%0t13T7kkXQ7b<} zI4#J_GLJ`@Il5VdcSmQV#3FobH>WEjYcCkmEiLv(ypd?<8SUE%ByxA=rf3f=B>(r@ zdCN`Tqn&eqifJ(17F`HMn)rjv=!3(5gbrxgmn#(!tgP(Vgo8xAlI&V=Jf2bom=WHA z*M^a8`Ut3!L0Zs_Cm|;#2E~{Qd>HGIQe0Yx;wTSqhh;eS%7Ns1-@;Ss0Ja%`TE*NO zrQ?u9m>H9!EM&=0BDoi0?fY0I=@WjE^M>%SFwj3^r)MPb`#DKNDq8v1PPqL&IFl@q zl{}z+iZ&`arQ}ld>kXtb)COVZp(skR_XpsESPNnVjx@%ud$N;Sr*&TI+E!JoEtnNO zbgULIb?Ih-5`*SX+cS8Z!`>rqV0Rh^*NZjySm4HrXU8!I?JZrhKk`_4A?a8yj7*pI z9NfQ<_t>`#NwFi=l~EI=zbw{EM^BPLiW}&(7@LsoQ>;K5m_a1EsV6>cMD1gES1ojp zmX?-ZglXw%+;}nL4ezFfw&GrW8gv}w{}{;saghJQSf~x*mxY($8;g1TH>@M-3NMX* zRSzs+LZEneq(>b)h<0F3p&^h5`B{_2gcR9sBa{qw(oBU;`JMtgh5|-UK=2gi1Z+t- zF?F3PIQr~*zQGEtxE`>EA613lm7@X-GzW)N;b;!|cxt(v4}+%CGa*A!jU0sXQ4U@w zE-WJ4o+F$0v3QhWx4ln7k+IA@D2YwK3Jn2u^~k-Jp?w)GiL#`;2VMlBdD8rLq%RPk zDE(RQh29B$afQ1QNZ=%gTEO4Y=+pRMQ!%;VW#NLi*jot6b;! z9e^p#?5#(79+QtI4R>u5isHbN9@OsSQ?6sOakiL=Y+q^C-6+mb5z|`mSxiyXBx} zE`WyR!~B7Hcb66%gSX7llgzOtftId;54)O|P6_baRTHtjy+5XsW=~Mg;AqC)dooNj z_T8hXG$1JtWXA`vJnnuT++hY+;sWC)dhttWUM}XHL_hwqm(2sG1Ftf)KG(zVM3*-L zkjD}v`1%j)j#iETC683zZbohhH<$`ZglAE3Y2 zrA61gf{tNmSi%#^PV9xR(HSQe4=xABN;6vMlZn$H80&=yl<&0QdZ0XTpsN-< zC^ucSP(Q$WK~-p)7|o3Ex2c7IW>gy;xf-PU4&Z2y9%${M)f_;B!1cwLiJfIj0!2H& zYCqN4>Jx|aoiJX}q3V3uCH6!LyZjYz+YP|LME~?~EPC6Q)`S8k67dqd6>x*=WsM@> zNwNxfrs{5u;CG`t#jynGgT-5H9LtZKGHED|`!oB#?lb$sg16QBu9h0V!Ao6SJ#OXj zMD)fwIVG8Cu@Z>+#PMz0G3!DE@aUs4edM#t^-SS0th#)6`ELSHG|0S6qyx6mOM;Jw zEfpb{*`*{x@==1i93}V&;8@RYJYqso#T4KzAA$ilYN0BopbXSgq-&wscvQ^wd2H&Q zWZ5tUPf8NYRP%Xs3MS!L5mFQM8bKI)qLj5oGQB=P9IHbw3IOc%nUG}pvpkG-0_Nx_ z)cBM1V{p+NMDD0ILOK2-wt6?4X=`Px@RZtw)L=EZ-KV0d{y=~eq3Z@2-kT@w9Oof^E5 z-Ha5IY>~Z*1!B}(Kpb&-PB~+#1bfg{P7pqhEi`H?T7+Ou`OTBxeubvb8J+&~F)aPp z2M%CJGR_Zk6GkNH&9fP;!&r z!b=J#a?s=!UOFNAujrO~UNPeFiWQ>(Fv8LbtE6IQD-VVb{F zLX7M3;EpdyWJnbp^Tp!{_{*~|Ni4z4Wkkni+590&F6-w_f0?K<#}Y4s62qM8&h$?g zywKSM&VtT>!w)E^!k<|vdEi-~~vmg|#v7MkqO2ca@C|Rs}m=}lUtO{oVNhpze z6gDOR_mt&ihc1H=xp<2p0ua%VxT{8&$Kt#A#a&y~3H92KA@>&2JQOL|V^F9>O6WMq zc!WCI-}LK+dl5N0_LCqE|HN2t0I2BVuO&$fxsW13it00fy>M?ysd~P0QY;Gq_UD2N z@ufc=P~QsiATf*1OR64FSiO`cFCkGH$3(;@^Cj)c*MWvta3x>80+K^J{(J(4MU>8F z;30NkCpx=N@q8!?mX8>ZCN6?JDGKf5Xepn66HIOK6B71M%V2ivL}euNG-g(o`_ueU z-JDcjXr3>6!at+Wxo8;e4#Vgc7)G1@n_!z&ZF0hGigNagXqOns1 zL9KdKBiBfdpIDRGcI)iFIIq8E5LCu@2_*^0^FzoD+%}@lI;QKV7uOS}G72wVZ zcCSAlD82p`7xemR0@f37yaHVB05&MV?F#S?0;Ul?XDh(pI)L{pz{eHfuL+pWUQ>?) zt#5V!Z&HAj3UC+!GYI&G0@NMAF$!>^0@MiDgMd#cz|$PSQxxDp1^6i7Apl2xodl7Ju0({f~{DT5~S^=Ky0bpMO zc2|J6JAk(-z&jLR&S(IhN<97(x+vOsi32!60sdM6b|v6x1hjT4^mhP%tN@28!0$!@ zFpq%NPK7iF@G#d0kzSetY(^Zhd`Z{dsqoe~8$EwlfLu?Y*T=K!SuD}osqj|^aD@U~ zs{jwO>YE8@?NkUkfB^-#Q~`cKz}WtgsKEe_$2fqQ3h*QaxQ&1xf-(}E{;|*Q@{V%_H(%p|E zG?{wW-nyyjk?O5rSZLdGZqm*1*z`0iPY=6tV1>%bT*(~VWbs-CSFe>_ zp{H-tBNO`=5d<$|64X4zW;|!u*m~w1!B!J4z}AC+TD2A$GC#aXGd|OZ%!Mgx#L;?O7#X4!?AxGeA<@iKul2|* z^txWxJYVVIgt!ISXI z3g^-;mga;po_i3pya|U>8MPO?W{bGJS}AY52%BE;1s-G$V3e zw!2$e;hI<5Pz=W+_%io}DV|Ir>4;R0XtG6}l`3JP%2$Two%Hd3N!i2V$0|!TH zHH#qoe5J;{X}k(I_5$U=17v0l}Ors&slYfjBEG1>Qqb|~G?^)f_#%fQx zUWN`iiV8c15PteW`rUSRx3qDkS$8B`#BN1H#Ud6vUCLO(+RWpnU4bk~s$|Wu33DmY zY<-|Zz^sD~;v(l2RG^#(A+#|iP|jUQCj&Qzd3CnOJO**ST99@J1hgP$8h{E{;V~`t zAbzcm8s;+9(H!W*!Li5)90agNUl1W-p>MJ)Z@;KeGPpZM5#JQ-XG;P%1V-GL^fHm=3FLgb2`zj;7X7scy(KE7r%=BwC= zd}9@`ofa4dyg!Q@4?b{H7+7~&VPHo~k@n;pzS8YjqFj>a3GZn5{4`Jcj=+w?I<}T| zh0m45Fw(a-?)cmOb^AxHjRkeEC7kFf#20rmjcO#!}0z`qgD znmac;fUhdRf8c@=?c17#^9rgPCfZdGp*Wd4s~xCH1$CE#I)KS> z)$a+#xwFK9xR~`ZxNe1TjlR&25hqT3;AMLF6PaVIE&Y?k#s4seTj`i= zALB9B;X%g^Fh90~&(DNu;6o&s-68W!5RBiuef$SLjg@?18uHyVkRupTbjs)y%o#DAq8E?cB5+&M){ zJmI2c_@HQh_074@CD+uzN=n)yb4<02Wd?0!N69DiLIs?3nFc)v283w&8 zx0bz%^I?R!eBm4M!v1J00AO|tjNFW4#k*+LkDx$vH(_4(|6B%P{P;wB^|l5a{^37W4{z2R zzUXS?e{svs#vOb1WB%TvH+&Ii~w6z81 z)pw~F36BmoN49j;BYg`B*YrWf?eH_!9C0=#!;G_431d)#H_!+R>PKZCNQl}j+e zdT{i3WZK(FOZdQ!LxVSQt8-fXq?CA}K!Z!iry7E%fR3uo=){+^Pvp-){&?|o%iHnk z*7tG38w)F2Ml@c%YX1grc$2pw)@^(b*QtThmj8Srze^|iT3{BM5Z+Qy z`Pqnq$}L?RcQouj?8Wzv8lv6C_88F^yO=FBdww+vbTs>io1?SbNQewQbr_hK!2s*P zzBBv7in3gUe8xsV>gF(jY{(OKe+w6|`_uiv?n?@V-A4dE`s1@$a>o4*{BIQeV+uY% z_&+N6EW+<+#n}Jrz)x53a}<0I;jdNj?}PBsW(7aYf&ZC;zfi${_G7?br{Gr*{yqht z>A-hW@FyzxR|$WEg0CR_HGsGDs}y}|7{b_~wPNzg)Xf>02%Vw2G{_wK-0!=m1rB$S z4+Ut|yzJDLK4{$&3KJK|~&>KEH)`+;Ppi5ECnIq@oeIv2}W9dtb zrO#TULxDja{G`Ux!D!Q&ZEd5?;8|&T7^@gK!^TDypOprF4zxlIsK)>uynT7P|8&!J zmy{m^(;c|wc+n#ocO2Ni3Ezvv*vZ-u{g$}k$T@6;zlzR7cPRrtM+qG2H(<=tr*2R-K5 zNQLhXkv~i@5jO(t<>3bF5jgM)1BY=ozQmG1*(==)`{>J=Jn4G^IPRH;PDra9@e}%o zJ&|01BhG@6Ez#j$tEvjyg6@?l7sp-Q6m-|&uIa$H1vpH5?gI6wx`jZy;e)jZKbYey z-4`Fi>OAI@ent6v{a<^ApRO(D=|-_}_>p)*mqgh7q|%TsnPot@w9bN z83>s-##`D5dwN5r=?Uf9?FD-ybW!M_)1!o_0T2?$_2H%;{6vvApe3Qb(Ffsrh&AH@ z`)Yf#g57Ne{p#3dtf_`23Doe0z^8`?Z-0qdn1t>9H+o=Q7ieBu@KluH0ekzx4G@xD zePHjNlzLkz&&GqZjfjhfIzfDIbd51x=}CaAQvl;nKcEt3wQO7}fK) z>Dq;6~(v9By&GW`xWG?+Wvl0bjc7(lo^+pzMVo73Nq(e>I zi2qU47p>(AS07Kh-tv|XS<3Qze8sb{%I*~5vMzpS@)9*^ydwSm06LRBN1 zhfrfg*knu{Vb3vF1Ful2Zovg}%tf$K+ZKhl&wz|xe=1&0|4NUH+$#3mp?v^>2`k<= zMITJ)g3^7Dd5xOl+We(8%cpSfsv`oZ1VisOfL2uxIV85Ani5 zG{R|7go6`74H$0SPAASzIY`(@FE&n(ocFOjo%fDh&e|-AL$8-huRqA8=Q7F5cu+1q zf5%IBKj#P9qTq_XtPo2Dlo&xy;U&1Kv~O;ao9MN~N}zE<;C2^k9(l!2I9H$dH7;81 zbL5%Gd1pgq0&+pWi_G9h%JV;0~zy4)?MJ^_Bm*V38}WfvgaN@t{_K1?@f>AG1b zV?IMTFR&9vF^{`IzGT55i9yiz;iHt(!_j2KZ>Y~bydyYioM2ZKD4&p~gf1qU+KgA{(;7hZ~+>2f2;o5)*Y@gn22Tiux}2X%Gaa&q)m-e z0>gphd@kN1XRZAi#`muHIjQS2ryHfGpc`#XdTCcZze$U@xX3d-@k|mz2O+YyzzhWk z``?8S*`S7wA+V$)0d&q`1;Ba|?lEzLgGZs2iUgOd_Dzr)Y_pwrZ)ZYF&cf#coswqIXu^Tfl{??lblc9B5ZZvU1k=U#)J2Q(bwx>X!Tv z)g6F$RQ0Y$yXr(;{NL`~9Zq$>#RQRz%w<1B?|$S|cW!ERmCL0%C}5+$gJAsMW6sC> z+W1`cBh|rJ3ltbo0{yik_Wwr79`648Imb(wc37A zEM(gH#9&?Vi`uQxk5yBAyy*nC$1CNQ(QA~ zbthWNZsxI=-=mn+WPbD{TGJsJyD0ZWXR%TVZ|4cLMT0ofvlAVan4~??i}-R<@1Z~J zi4Ik-_R+)gDi)rdy{dKg>OqhnwLwvnS23S-_Uape>QL)T>eYVMt0I~@Hu!6*7?weX!4mhAIE;d6U};Qt$wcEaY)|wJ3qnssxu{ba>6b zEW(}7T+12AXa0#ZgU<{tlgnS0;{xerfWP}`!H@6+^%Ls!se`vC>=pX(;Oo@K{3G-> zF3QTjFn@~h)qPs+S};0L-P|N2mP zB6`_$YR-ixZ1{@xQr}pf-kRTl`c4oH8Gf7Ery$G**HeNy_G*3j%=J<~fY+h^I{}iM zpCQ#_E8+RdUf6-3*_7^9Q$l56^$rji3YMt(;6(M6x$6|7dJV2$Vhmr?vAtZ(vAtb7 zw$~V5k<#7|AV@oHZ_gvN*J?_!H05LeO{ys;11+*1GloCfu|1%Pv}ZOdYllB$_|%m4 z@Z9SEq&iC@zRv*U zKWU#E?UPvfbH9~k7Wlk3ssjBiejA>|ZxCi^&-}4M-xU6e{~VQfR)6f>gEZLS8&b(u znb;{g4q7rc4X~HyH{^1i(5o1iqHrs>Zlt_4>;#MqGvfgVK3BoJae?EZ1AL}}zt(}j z<`^lz92cxSjz0lQ|GuLv_t9tvs!%~)qM&jJbqb;AK03vL`jLVfprEn{bt<5+(7fw- zm=P+DlMTxbeUsMfRXe##q`3lTl>8khNLbMl74Cj8=lm2eVp_Tz1wuGSmu6EMK$R-y z?o7)IBHwz7<|B(B0@$h8aXh@85y zT;u|wHOJ+So(g9Y_J2_9X~E-Ala8Il1^nJt0c^T2LQ*Lmioxwq4I?ZLHLTl+WSM>u zFKL87j7C9JJwK|2c1eKZXVIT)jKTg3;SdI#R&{_{pceWL*@c05F4!u2qpl*e3^SuI zYgEKFnC^D?Rl0e*V)q_QwEaBc*@*04?50o-f__i+?gEjyLc?d6dp%cWX^N(KwwN`Sw6EYS8BmSUuY3H(--HqGet+r0IzxF5I$h( zT{HswC_Q|b#5Q0VHVg~A8G5iY1EDbp3azbN6MRt+FU*Alu$SSGL;JRoxu1uBC z5+h=#7ZiJ|npPms2rnn@aJmsn_plj-(AXvy@S&d({xuY-#Zk@*x>?u{p5YcKHH9?o z71D2Bm}mCDXu2c=OfL}%kzSwjUWFuBpEW4!21REN&llJC@8MY;Q@Gz{2A1<)?ZU={ zr3m!ynb)5KHD3?A+~Cj!IVdaAYiD-1v~WzVxb6eJu~zoxf1!o`c$gFryrgX?!1m6- z8X;nu7Mu;aHh5Q&ISpJ<<|-1*UzX)H=K=FrA_MDvedd*dc^#481AOMZA)Jk|@bv-d zWgZ?5G|5{UB95?I18A;r&+@?dRH22sg0@iXk-O5$)q?!8N~A7rcU#*?PJ7(Bf&h)6 z2wdpk-)8EX(W;x>^>EO=3~%9O%2O7t1-twkFH@1g%QC*@zjoQ;YCi-Sic0v+RQDpjcdVYih$A;X%(mYj34U)4g8Gl2zQoRFTc<2;Yol1` z&Y$slwxmc%j_j_uFDRyfgG`{k2}2e)u>KkDDYz+s$k2`OvP!gpy;#O;co7M~N0Jeb zBpDy&8eYUyO9(MlfWwQHSwNM%h8HcDM{aHidty*(bjqDQ2#L|2)a8egN6*tE8Sj!5 z+LKvZaerev$ueXK4?XU&8$QE4%%$7hGVhn=Di3BnH(7s>eFAWKP#TSZIBZ0{aFjtP zZLo!suH;bvo+5{)W+mj1cC5%DJ%5FW94)j2$_+8x4^9CNPREWeG4r}=2-s)+u~Jqa zlqCv9wCNa-2eL22?ZEUzCgk2;fPzmRCBPW~=;{Dy3gC4Bs6r|M!-=|ICU}slG{E0D z(^h)+t2!T5Tif6g0w{Mo&H!7f>nCm@RHKEaf-ME4s+^e+`CTSdY>m#=6Mr zOV@lG&G{4b^sGab^Zhu4@pCP7Gbzk-O}*IHhkdgQd(b@FVv~_>-U3I)$Q;$%`%ww4 zQS(O`k)g*T4fX|_elWs+t7dk@b|K8oEWqsNYJ?{32_FXEmEc{mGtl(jT!GJi?$aZu zd&_ZfftIs;ts!OHZghQi{n zp<&0v7lI)|en||GSONGpNq-IO#OaN1h}(~v>T6Vl!*4}D&p@}SKYl{506#7@vI1E= zfki2gP==zUmdFC##2OQw;3b-8~t zLxu7;mhCFUL4#WGZCdO!FsiE1hYI!Z%V^{pP`5)PgTDkM_a){@OA(aY z1aQuUZ0!KNjMGT!qm_6Ud2+Ik7Q6)71h^Rpx4U(!!oy^M#eB$>_DqxZ%$aemKXxb5 z^tzeXUNggg^~^GA+=B=*Y+72(kE}v-Jj^p%?dgg$KFXwgKIQHqK<=)%$C~e7>QBtT z7kLjv4ESd)pi+2dI4tYrnG?EOJTo5gRUfKNmP8zyGvU((mamq@Rvg;(a(?70UjBWQA`7ISF>KsDU;MLeG~>unCNsSjovR zLvlI61zKI&oo9e9$=1V>XX91&ZYX$-r0DET;UC}={^)oX{LTiJa0j@-CVz5naCpV&l3!9w8uaIl}_3zo4zH>EIRhsu#=f0%Xqh1V8De zGcB?lN>5!cAE}WMz0JsvX|*NnAqZ}ZmJiswi58=E(z_TIgi4NRwOeVFjhw})6P!>~ zKt_p$I1m!!WpF(NYG|LX6}^eiNP9KUtB`GYdfOL{`S=vc(0)Wl8+sfsng{Z3Xm?2& zDHqLyF#z*E*CyF?yJjXv#$AX#@P^;U(3l3vnxTAj!n-|yLEXHEa3UaKm)YdRw?K+)$@Jz=D#wtwiHL! z`%l&pI+DM;Y*+lar2Z2ap_H4@~1ay;>k#ad20+t}e z1wFV1L`P4lX`-h)@SD=p+p$`;qX7C6QRY1aRh=D2@tvU8{}=r|6|Dx9PvJ%Nx3{E> zl*`fjyR47&cTSr0m-=>R{WV?aFL@fM89>yS_Ym~|tiJ=%YV`L!UQ~b2mXwilIa+^L zA^-)QsA)S)`f_2>S$}`m75$|R7N8@D8uK24{-5>t0<;?a9m$L8@6RMPly(fYf!m-P3s z@1(z66QaLEu*gaozb|0bw+a%C{VgGC%zFr0XyT9vt@gJt3@6JY59JZ$)iENE_LS{{ z1ma=DYCVJF2-=W;dASyxh8NXdpt4x(ay-}yKwpd<1kKzFv4Im8PvMH)vA;49z4Ss% z>Fc$~UNxnEz$x7evs5J-mF6EUZD(X^9s&%p!aIZotO(j==%&|PDsV=4fTiC27&ySO z9lmgjm-esdZw~anyxSKHB4PIt#U!s=HMv!4rknwktFpg^C{+D}(d#c0z5S&TuAZ z0mUS0wt_*iGy?#}#86(K*cLz=Lu@<98{bP_Z`2d(B8CAin#cV5}h z1k3-sED0gjhPxF8qape#7|7FDEuK)WCl|MD)OuAyfv&IGS^k$P7&?TQ#43~flZb~5 z?8J%c>SF?k3cWxoE5I)-Q}|V*2z8Ii#Lu{X__%*U zdaxWdLNg7mgQoynHQf@l@gXRdIQx&5guV=>3kNDu0-^BDw-k`*q0=N^jkr$o-&gr+ z0CkeTM&+yV&`JJ-Du3CL>aS4wic>pzzeMG)#QTgB!3q_TF<3#i@5d0p!3~5HwCey0 ziCQU_4ne-=PheA+xC=a)8tsc!f{Q`M?;{H#zt1OyJp>zpYCG;FwiXXZ!S{+&1)p0{ zd#Ctbtn%MJQvOVp-*TjUL*+-0ls`b_??(Pn@#O-X!dELmeWlYz7-4ruwElzfLg4@{ zTb<^wRr&n<61z)V1oU}}$0&qpGrnH|G4TG#K({EMJ^%vsh`&xkw1s*P#E(e5Q*m-h zD)s!cY|6!Fs?e2=k>YRfkvW)Spp!b=F^UQce-pLbm*A1_Q_E~s3n~8t){>=aS@Ofy z^544!V=X^yEd{EU`+wM4-u+Vg!V&cY(#5Z8S@y%$a-6D#qyGo&%knP-W0cwdX)W`d zQF+^uM&&3~3&j%;B?|I0ssCYhTJSF@MXD5E*O98+vP(L)@_*2=W1NoBfs9DGj^gH8 zRZE@KvCuW_7;Nf}21Ol$FT)UtZ3zvwUk8R497%budYcmYKg5Rrd$h4>r(l9|{Rgb& zSE?3H3_oBk2jbEfHM1nP`u-g&owfz%JGo?eGN|N&b8gVIfj4eRTTx})k zx6Xd);!D+G5&YO&sI6=uwglusabCk(Vaa6}O2)%@P=P4-(qbRr>EO+3fd1G%svdx!4&F|s@EV-|iPEM>lp~H={3<#W%!qUG zVKFab>|^wsp0Gl8E0iSxzFvyWxGiK)`S56Je-a9A)lPnKKo7reeZmfbaerP0%7#Bo zxHPB^gnNnV!7pK0`KU1!WI8~6=A-V7XqI70um_^*tau=tZXG@AbI_D~X8#$z0|N;@ z1$Bgoo{wYq%YKRNPS$}VpGD6h`K^#V?7`e2i_NI#;WqpG1SsW;s0&bn4h*t)5wznA zN8#{chn!(~W$YImvS!Ah?)KtlAgLAU^$T3ExVi8k7B|+pA&+91M(>Puq<=EL%5j#a zfyjb-4uJ40diWbXyvAL}r^uL0a~uxBOV@ECuna@^SU|@vB?gDiabn`e@oR^V}5TlRq4sQG4_HA{`Gc#UOw>&dr9d4EPWD4pV zya`@7c++J2b<%oGjHp_0G@E7?zmi!w7st893ai>Ep|r?E6yhz#W}2mYXJHL(&hDc< zIU8Fkn{mm*f4{=?eeu=byX|JsUBNB@X)94aKdO*EXAq8H4Z3HskF5hUu6`>ct#B>Z z>ssI}18E1d7rSSw%#B*$cqE~e5~~zWVycrTpOVzcA&!0JU$k8rkzu6m+gblB>cKAQ z88*AL?GSd!{z%wm7|oj9#$dl@1=QA8kVI+z3~%Y1@e}c*=U+Cf>^Kh&eDn-o z>SlPpAHh~LbaNglt|*Edo5om3T2xOR{@`(r{2Q&W}Z*?H+I>^F86Ea{ByyoI7$a1|xilcXQzKGe_GE zWKtawD(I}TwLju6r0&U9qYr{ZD+Ok9(Cv{dIOF?)zS2HJy%ls1LI>4~po}tsOw%mbXzFoYZ!1J>RfD=%^h3Ub~#2yA`?KL{ZD}$P)=r3?~r} z#^@kii$ZPlD`1R!FMeY3ctA+9g7g2%Fjq8^>k+8M!!GCXpqP0)>~dZ|eE(qn>xfR& zOJOcSxC*zYg9Vnr#0GCWN%aReqFt)y=D41Lv)z}!zwBdA`qw(dlDR?zL42uhD$zjx z^_Xjf7}&&(!T`cPXtfXGmxK0aHa{!%q!Y!0Rc`tCaLSh1U;o=%W?>>a_zy6-wTYni zpSW+giC}J<+C&h$6*q?Yj7T%=b}a|7Gye2CXS_MlKb;I`=u)>^=xd2YfKpI`p=RL{ zy}MCdZI}$o&!itMC)9|}@>-88BX!7kaMk!vFpFk9upc7}ui;zM0C81S;@oAJaS+!g zywndID0Mw!ampIEUCLIxEiH{Z=JnYM zsk_u&0Me5!XXvF{eWh=qf4!Qvqko^5)MFHY>re!t>j52fx8SKL{4En2Bq0y41GYh6 zTTo8|b|qkug&~0kN&fDXe4hCuW2cya2T^*q++gesuv0Q|b5W%4Lnt9_Y{c^rf;8Ur z_J7j=;>hshBd!NHH&x;ulc6vCNm2MvVc_G_3=Dfp2Yx%}ZD#hz*5ajN-u0%#Dfo<= zk{?^{^R&Cy>E}M9Mq<@r7F3|!y`doU&4NYGxJLlU6aHu5(-s}Cv>;QS5Nr611PU0? zM*1JtJG?gx?-@9(r2ppjd4Bf-Px#%y4h+<#vdiDVK&(W;+T9Ia`1J2ym-q6bV)uOe zjQz0@;~~Lm&`miaI4?lyUhHSQyWWelmhW!JYghznB~>n!Lk_$+a)>f*2GaDvN0pqO zDGwWW+}0 z*)Wb0!bLpk>EWUoc4-`er6@2u4prbNR~@a|2r$OM&d_ShaSO!*g73H`^oI{$A#%}Q zI?i0Plk(1M-b7dPP4vLuLk!~XnU?0yLh$Y_Ha}wHHd;>!q4wM0C09567~Q1(-K729 zrZgRzT;H^J@}{Ppleag0Jo!BcBUg9)7~KJuQ`{Y3IVIge=V&GAyo!akping2n-Aj% z?|Bf-{%t&e5}8|>fK9#ZOo#q79Yk%I1#8FwZ7-MMoWcX$DGSKHHrHy^+-N2Jci{2l zS?IDJJvF$)GroWz#d&J@Ed-||G)B@O(~G#deuHbteiM+3(vhU5x&i%6-z z2^X}JQd)IYQnDkzr9JpDYQU)3#Wi5m?2;O(2Q5SkhzneZ3tYY*n*N-ErVir~{lEC} zc1Xi`)M{&iFbqSg2v|X37*1P)sTF*k7=Iv7wkwUlMp_;(#c9M)c1fB-N(S?Cj11=G z6lX9mrzArL9(NZ?X)QzqBDV9lX+h4!&cNHZuDkzCd-SRC4$5Y`DoI~Y2&cZuaEq(q zrd)msuMR#_;Q8RGbg8#_!BeJs;ssAx>WLRT@$@Cs!wa5r)e|px>Z6``!Bb!LWDl^i zMTqHKkmk<;&MH#mg~H!*_7an^;4vkixO1iR*GMPMvWwG+v+R=eDNT@J>G(kgevyt} zBvXcQEFgM(vB)qRl_Iii83t5h%YIKKjwA=wvl*;C$)3jUU=ToqP&YH8|=^A99%4k_qm0pInkc) zDHJ`_WTEJ*tLllFjiQA$_<6fk7Zub4D*15Jl=@UwSvaX>f?zq}{E@t5~gL48E(-#fWPDxE6T&6Mh-Sxi2Z!{kFbNVeyJO(_Bn z5jhUMG52K!z(iRA3pZa-u$_@1%lN=|nOfjuG>(D>#tvxrc)%iVCD-`aQ-k(^&-xIF zc5m$I+@^iwBbaKQhDpK5R|_rwkCBw^dYZPgQH>dCv)42oytuvtSlK&+>jU5AX@Tn? zWWmwc9)lYS1-3y}20Bz_N{HC=$g$TiXwRZ|xi&chwpo$q>AsB1XN$5)fFRCUXiKze z#rljit(yDI5-nq{=V=!k9btB$8?#Z_k5HK^)1_cC33IK$ND+Y9|BellsbI1QQzS5l z(E*n8x&xD?U|^9Sm~pzm9JFAbbYQX-OfF%*TgUe7vtUdICRf4W&K-VvCuESPH? zm_7;ykt_oLlpXuTG*q{pzG*=+m!GD(S}IPBlA6)a)E4WA5pO@yEHRxoi{HkS#ze zzMP%);QCk%Ns$<@9re+QY>y}r*-DAX zR!T%Rl!&H7SD{gRuVSNiUd2Xzd=(l+`KewxlHSo=wwz;&4c1|Y%zq0X;|05?=pBO{ zy`#hY2+gDE;8j@0G+?MXTF58%JejKWKcCt^nIuJCXB#2Rm5E|7X{mV=Wvb~jgy|D$ zDH=)$vnbM1^pp@LQ>1Y+b+nZb=2WDm=qw>jtw>AJTtb*(k=9Os(L%*wEX)xQdyCy^ zU7yV}9jb^SNTAlTS za4j3=KQ+ORjSaJJKPnGB@5iAAi#&&0D<&6DC}ePzLC8$om<=`~;qWiTzrBU#y=AXyiQii70| ziR4`*Di)R}B$9cNsCZbOkVx)DqGDosLL%80iHeKm35n!iBq}xz+WMjv@(CNa%`fK( zg>A5}@q6gQAGGXiu$I5C>}xmDzIG$+Yd6xqcB8Vdc}tsp5qFRWC))ZEY--uA6^Qx6 z?i9`)hv3+iJuJJ*J}yfCQmiuZ-zNAc*{5K88W0wD7F1TPmLCWZO(Ow+RDZ&PS5y57 zSO44o1oX}I7Mu!2s6W+_P!?i{L_F9|zi;nQD1HHoB>G(}`y^652~cupf#;(73fq$~ z5F2Eh4wgi?s%zR^3>^pg5Of=x4#KgPEt=23ZCcHC_lnMy=*uq?qEJXsZv)6;@wDYm2ubE8gFpLT!0p$$|o3<6DTaE5B}QTL~Al z@}_#U=T?YY6~~LGSI+B;a}7Tq2z4&~d?9DI%$59 zrz|5r0Z{N&LKc31KP9kQhsIm}%C&D;{z_E=+>~l97@=`fH}7qc%d=6rtR*>$;0r^4 zXak@_GSrGRpK|TdAz4qK1`R9pUQ#X_mfJ7k?pq>-sj~wCtfoiDb6AOYlRSru0NlIq;F;N zcutJuYV1CRewX8VLe4_{lH++N^_oi5AHR>Xa}g0~RfQ-TKmDKfj~c7%i9|0A1il=hf2qz-Hb$#`0Kbq> z*uHiP^hs=b$bwoLAND=v)t2T|dG#M18H^Jz^-}-6;9%#_;5$TAg*iAX1(W6F7{eKL z_>vrDQyoI>L9HV=J2{l2e|n7SO`BG`P&$Y`AQnF*`(JJtXFh8AUuMcqMjR#%0g8h?W05HOsM+eka=F#irn-(Y+oN!( zba#J$@$&URJ&F0XVJ?OE?unQpa4S?y?Wxa1G zJCp%Kx+>66wkV?}%SjM>lu?uIBv1uG8M#h^*rkk`K2Czzri_}tPC`Q8xB>bG2nB>f z(iH&tg<;t-F@GXXuGu|tQ&V)}_NJDJ?>YU2EO*!7XQtSD{WU;hc16vU#vNDIz2CUw zvbw)F?(o%ZZro8+$K#aYRC{~&t4$wIf@5P692}GC8$X(NQbT%E>!gOJZ!56fzM>w3 zf)#u6cWaT~lcM+IEP4)}{o8eXBL}YDz^C2)C)xG{&Y6unZeE4YR=}9Lx$Fx)aIpLA z8MeNIaRqW5geBSqsK+-p?znc<%Xo%!0z`Pc0k^=WWJzjvA~n}eEw1T<*B;*{0DTFt z4t;?mqM>Y=Afiuj#k~9r9B({2jy?JNwaDW{ZfSF8*zN1km&}xkVAxZE;F~P` zTls)W!7!6;6f1sII9BNj$tqo8nfErD6-+9g@3-+hyBp0F@iUvCrMqVlt^c(V)-BY= zFc*bf3;PtWoZ0CcqcmxPsr52^h8c?fuP{2VBJCHuX8-~dDn_faV2KhHICK^~2M7sa zr)q;0}o(Q$XJ4&qiU3dj$=7jPbO3%NnJrPkE=vOWDt5)=@R`jbr>KOUorC)XO z0O^E&XdlpDoisv(2Gwea@ZP9SqF;3q{i>7bSFMJLdiUt`JB^g9+>jU$7R{1r_4t#R z4q@*ljydUJeVZ{))c2eaH4X;e=%sJ@N;eBpB&3g%j?lx20AvdNuma-;u~8gWymCWQ z+9_`X1`IRR&T!&l`XB|y%0al`e6?2rW*=&TGfmnyA#E$tio&~n5KS;{NSO63D0 zVAM%Oh(b-wefCR;UWw`bw-JVGgp?u?P6x|}C)^PDP@;5pqeKMv79o;`cc%f}0 z61G|iv+$N4(WkKb-BMaTc5kVe6uv;J+WFMeA!Q|h2GV_zQBzc0%rdEau?p$I+fhJ5 zsnBY#K{d*6h4Zpxx{jAAf0f{zxW}uh_6?5ABirYW;;>p*hOcrcD$!x3bOks7Utqf( zEdh1j&jfB**o|ZUsoAhGDe_h9aS4ajPDfdoeKYhIh_(=!R_t}Scj4Y+IjCr1!-H~A z(X@sKQ3zm(kOxr)V1bYaQ3POlkOxr$U~!NKTLCCr0{qQS^E>#PVd0N>v;5ug@5zB% z2t^?B5R1SIf)Q{zx{arS@hF|3?8#)5(Uoy3#|iO9e-re7%zsISoh1B}r{oK+FwXkCQ2qzC*ra@aA_Vy3!V_i zIo;Nhn%o!7sSNm3(T&|_D|{j!ST6x4*WLU;E?V^KRh%4<22B*NK44&Gz*8~s z35a6M4U(6w@&up7z^CL1)0F_9c&`LAgh2L1nJ7ir4xxC!XY0V8e}5b{rJCNS0gqoz zUTTSPgiX?Xi^agw#w!8lfr2=vkvr{VILGe1J0v;5rQ!3y=aXO^} z9+K_o-u(KqB}f%U7pE)uqyT73n?H@l^h(^Nk)O#TWy_Pj(hv(|VJ1>YDCaa2!O%>k z$WT6NCQea_B0%7=wwe7=5Bz_uPm4T-nsjn7S#>v(upF#*(m>m6h<-;tp*?672N5n) zQ70}l?OM`U8Qs&k&^?XI_i|k~lszgRdsLx(jH9#agMN(u-}ZQl7n@A|G1uo6XeF#Y zI4VfldvFLoj#zf$BcJ)0_S+-bd-T#KZ=@td{nX}<_V$nw*p6@8Lp0+Hlco>D{Ik_b2p^9MyjN(sQZykW2nE`z`j$E={tAEEa2s zsqop}Ci3W?luhJO*&5ctUMt3s`+tLP0PULZ2bySDOZgsPvOVO|i_8^b4>1sbkK-8U zW%DPGKaPEkf`TRqe6UdB@3F5!%pdM=l#5tg<&3`nv?zUV{E;O|R*!UmU|k1YKv_5J zp@XaPL2>1{Sj@mp7-EysR*5ksaxmD797dUDb8ys5#4m=$ zN?fo`;dy2<4{e`+Rq8zCjE^61{`pFaB<7#qXv+Uz<{ui_IjvwK;yS-)!5=fyZoL_I zORXT^fm^kj3B3fs-aVoWj#`Zu$DS7Rs=3%vX|=#P*lZZP zmpcwqkTCm3N1iH4nq3TMoMW!IUW~17Wv>=7sEs>tIAcd2Sb{q9>VnIsbd>;s% zQThmk%UmcF8$q+#-DtUiWdSSGPoL+?)LLSgmQ_KGKU!{Vv{=;oReNf+qt_2%zX6$O zzY#L!2_}=~qa=gaCY)E6(GJtMq_j`$JHYM_X1^i2?){PG{l%*h|dF0KB1mZgd4% zJ~t9_C7>4!Kw%O=TCxt@a@D%4_f+&L&iDmNN5*0|Z2(eY%||ETZf;I)6(lZg0crCu z0EF0p8XL+7uDdlCx%cKFr#Zy$ee*B=BUA3_k7F8y1Ygx6zd>OT99%D>kv);VjOn$f zu-T=dL3n_^2xI*w>WQ=X7|Fc@Dy}^@Xtf(4vy^#!J!;a!ZyNo-ur|VI)k@)fi%y^= z$Tj*m^QdeX3EC?8h<#?TN8inq$Pl{z)AX*-$if^S(F10dph5~7&q%o#BlGxFJhFh7 zl95wzDRB)$q(q4^3|a8^4?`CG{llg-9m?@sunfP(1@-tXzMuuaeJ{`hUm{-pK-uG* zHd~~7hou=t{+6<=*vUwNvxe=`aWv-sw~X{H!r?>N1uh7sS-67PgKTh`jO}N6DRKP` zGS>AJj>Yn}Z7lmVo@oQ?fwu0l3ec)$YDeA61(zf4T+;r1&+{LvB(8T#Jb!bcOwaRC zl}7a%X|m{key2*KmW?!7dq2NhrBT^Nnk>Pe->cH7b0bYw;?K9LG^*Z6lLh(nhg2Fh zaHMfv?s{HF6r2}^@X%$L8z@WrNxO&CDw4kh1OQP+e|*GINUBw@K#~`>sQxX?O!HQ? zR^noj`Zu`MBO@$QuaP~0bb;Emu9CGqez~^Cg=>3UxVEnXsa=g#AhiowAhiowAT`21 z9>G$m`!ZgP?uZkFV&A=mm%iP-2(R*C^#8YnSE1qVSs=558X@nCio7o=@~#o`zNpCik|OUKA@7Tdye}#8t`YLSsL1=0 zBJUa@?~97OFDde_5%RvM2bw|NRxNZn$cqk8zoQ{bMdPZWh@n}_(bB4Dcc+|Rm82bj zekuKZ1WTS8QUeYHQUeYHQX_jrP2(S~tSec#tnm+*)fF$SZ~TL=&RE#e_=lo8*FrDs z3qH7J8j`gHt3h#u6HXK!$wu=!QyF9j7OikvWG8-wgd-B%I-=Gdy)_`U|_Tgz;4Oet8(^APOHjk#X~U$Cf+H2>JT1WXzH~Ops6x0_mtg* z1_;sG=EKCW)FM*u9mxuT7=fJgM4Jv}&wL7kG28zX1Y>r^Q!PyxbWhP$_6c2O|D>zz zEt-8`T-^YF*+1bgOEUU^8X*1=Og}WF3HHxpWBRG#AXwsHqK$GEI+6PuV&r9ha;a@g z*%wp~kl~?jk-*?gBokOzrsfs@L&s+(leIPG4H_UBIOBj~RCnk5($M zs8UPr0uF!u_`873x{LX*;Jduw{B32+I0Lt!aU9!K=ChooN!8^@%HJQ}qZwP;>K}kM zxgjih;d?nLinaGv+`xcC>$GLCtZ>hW&!WK+IwvxfJ_%27uWRn3mN0W-Ul=ZDPMiwi zhRaL{H(ZJ#+!`RPy6|^u7i1w`y$iA+$GX54p*APfu~0Q(!R3vL^!&qG@M~19!qJQ*O=GduH~T*sJ9?YzCBvYO%a+Dchk34r8Acto~Xx`;oBE zVfW}Me*-vHaIdRZ@JkiVPOuDe#c&NN(n8Pj8A&dc7%L zv7P^7Xr)A!(RLw;J_{%@BCZ;H>LQoSQugbn5)%S|T47af?4Lcj)`unv@k_0sL^^xJ3|MsDtX zGQs1avEjO_a+gSw;`yETWY9Q4At`;JAfG>r*2H&{1ZFi4^shu#8f4!O;45(*mO2KH?b;6$FN_ThKtYPP|@nzfEt-M9k{UbT#unXBq~@vmy( zWyGqcLpjL7FEa3pbo`E3#kOJYFZ-*3&<0?L?zl9ooj6GooFRiM_`)GJca0^@T{RYn zEnk`rX7^nJ!ad&iZcsR<@7)mV6s(`cs_`Pd++w+fSHeS^EeM2NL6w48x$sGFDi=Np zHs#`wU7PAg9m9oCH>cf5^R^*>Nl{NS$50Ij* zrbrgVuc~~2)W`4fA-;L5gB-uwA31k2?5*gk_%FD&+{sgNt0`ebElwN<>NgRzd`rvOy@%UMymXwNIru6$-?l3g7rU^KQmB8XY$8T@~4>c=e7s3Fa)7+ zDf#1}{DH=${_!l? zz(NBjsR5$}40fYQ3zXE*CIXs;jhw}^Tr^6ss4+o{N(CYD0ZlNFL|B(qYSC(IEmYfw zKDAX_FL>=P>_P&95H5m-%sG2WLh#$y^L_pP`SBuq=JJ_2pP4iB zxy{UHn1&$>L0}bTF5p0Kn1=DoG|c0q6g0MMCFmokN$2$D@lNQ?^F$#E6-jDzIzs_D zZ10o!;diQ_H=ub450-Bgzf(U*BMQ?#$m3(i2Soyv5BSS0JeY+CvJ9Go^X(mIj#9lv zr!=$hnN@HyeLNDKD10Eof!@sfYoeexi4l79oG30$7}$yh)8}$jv`(xZK~lRJc9CVm z_)Y2U*@6aG0pHyRr_>)W5W1~2riS|1K3<48e2slvg{P48W`Z;tn*bNG=N5b}l{C+|U5W{Aj~cU=f8s*7N$H3$;FJ?Aktg?Aks>feXpzUii^^Avw9A z7m|~w_CkjRu@~^OGE^jFz835S^oh4Js;}~&RbA(}9Z;$MF%YGoC=rtG97Oj;y&p;h z_M=5s`>|h*XYHW_&wdE>8T4GwN65|-^<3;1VWF4ubUo6AKFX8z2n#)wr|JpizDZ2vJsTqsY{5;>ZZ3KBV* zlBOmWaZs5R{%Q)dk_!}OB~MlKep?UvJpvU?0<~i#&{hm>@6m6b52KyphK* z5Yxq6tVeUVdnxvwd`)Zc&ga2`J$>)SVhvKTzxB!&+uABC2E>)7^LT+50}LCK3Hc8R zIR=nOp*On2zvYHX`i`S>w}|gJK*P~f>`)1h1f}W>h(>TS4(HiEuEsP7p2p;|eO$?4 zPz3Q#Y*6Et{K-sw1V3;s_Lo)NF#vOLK;kth55V~yJm86L&wKDh7l8R*T+Ue;hMF&- zo|RC4Ce#Oj!Y(#6=EN)T`10lNV*PQ)+;5a&b8WJDZn*{w*a23ZV&x^Q+yvxsU2qS! z!bo+ExY0v-<}C1ax&pqI1iR}5uZm1_fkfD?sc>wU%s^7sZo%(7W3H*$tvJ>I68n-( zUTjJBYNl3)XXMLv_$3^I%C&gj6oQeOiuqzw-kos5wZ3Yf;%MVeYhTcez($dN@)GH% zE|Grv66t3yk=}HP^yW*Xw_GCq>?P9AT_XMbCDJckBK_he(pxW)-qv}#(n+$TYTftM zwpyiz7YwQKQ>|(ANR}I-QfpC`3`J__Hb==<_>A25%$4j|eq!_FUz1;R#HKhnIPl3m zW-n*H1$Hl+xEJSSV-5R_8Cy z%Gv1mWj%VrTs-GvOZuk6fq6T|cSCu5kFPHXWOUI?`QqOJHODsJ=dv*lb2 z_#SOG7oS2Mt%n0=cg*hv6VdmjiDEUjmq47wDhe>e5?v2-Ag5q+z&jZ?B+eb$qCAiN z)z8%xPrAvKec*{`B>J26&_e}DMViv#c+IugNm)J#KQWtqCUF4;9 zk=MVAytFRztXi@af7)QuA{)etB>#n#zJdblUzT$gYf^Q*2F(5|1{(rzVXzM@ z{r~xEM4tHnhQFr#Z}ZnmgTF5LuKX3l1ZKxh7;Ijg;IGrcd362?rkapi3BC#Dnu4cb zvwiz81;>VvnqL2;WV1s)Mu6<~eYbdy^5Gf$0&F%~-}H~JNL|k#FV;y?df;bj575TJ zQ#bo<*;DHMHtW=#4DAjKF5FJ0TQ8Vyy}vj!XGg$VxQA)GMcQtE@t~Zq{Jueto&qEO zyCwZ=GU9v8sIvf!m^FhcMc3?Q&0f^Ju^l_#1cn`8S`^z~>-Xh8dK@*sV@Y4fnr}%2 zdrk?n=P{UB(d_xgyEg}Jyw~Tq{q$}$H^o+g+FL12Zh4;itY@A%In%1bMJ>rsj*imsW z%27LRDzFg=st8!Y3FEJNSR@fed~U6GH#l35J%8$7UDwyxLv+otvW+~Er**tRs>{NZNxqpE2w6k#?UPQ76eI4J6Ln7-Cb z&DsbkvE?`q0aYhTlRL;k3gd1`_B7)$&YRa z63M$8iNq2A=)PkQ7vG+ck$$w*Kc*u0s0n$Yqkh}GS!eKT1n&?*%;iVAfeG{rGXeM- zBojdIJ)TtF1DGUQ3cP+>(&QtY?nlFFS|3+ZQ}K%V3!tp+HCqa5i{F|bx4I6BTA!Xt z&7!QOc*SFJ_g*qPvFq%luCr6R&Q9$*yMNc&XpFX4*VzSKXBT#zJ++Bg_XV2<7yR_@K-4gMV=!O@NN+*p2Ya0aqc!N|*W zMvf@clB&+a!m5#vE69-+Ru>M*gNsQ{4zR8``O;Ya?Q4h!|ITa2yj|-X)1!EIjJ^uq zo#)mzng!z~fB1jQuT#OV%dn*>hF6n|#_;Ov$g4HAWLGq=7WTk){F?U0Jk?VgpvfBT z93>gGs4$vQ<4{K|qef4$ByEn-S6wa@{5e^-H&(4pqRo*yk$H~6qFZN!%fj`4ccf4! zD&9NalGOVnq>@<=`EO=JD|&Ulc#irTpJ38!_ym*QP%55%_si#i^YWQ~MH!g%M0_TG zo``(qUtN)UP%`auvYm3UojkuSaq5{s;uLUh^VExA;b)>)c)&d60yy}^C=Px}=6wrH z8Q+z23tlbWpH{rLJR`~8BVN89?<<~Eo;LMJySE0GO*w$QIo||Ye{nzt`=YLXJ(`R36;fSVkj}FNI11 zN^en~`4Iqw#s;skTu58v@Z=?s%%{>t+;y?@pSC<~?QN#&0iA{wcz%WoI?zEZX8ZOg zsGh+#hbKGZR?oPjjnyX8T44}d-#C_BfPE!;O|CmRbI+8oZny3P@^2J3+d8a1RiYMYA3F##Li`g?@Pea9tQ#!XI?Bm z<;+OG&;tLe&3tteU)?0=^t)FB5Onm~Fdh9C!zgebB(yqETW}n+#@u=)u>Rg=G`YOT zl7P3MB^+BC6I&J&dod>Va!hP_OzgFo*vgpL>oKu6V`6W|#8$<`K8lHXV`Bc8SZz$~ z)0i06lA zLF^#P?w{B9rnrFb801z-=N`mfg}^>gxgrMI-FGeNUBT^i7TDHDS(FXah34AeaP5HS zQcqteq#d=yyihf5Zfmf?&g?8{@tEZSl@}eRwX}nHvY*0c3e9z{apn`aq5LF-E48E! zC!wCfu7L5lQ_Svyhp-k)c^bk_*B1YZ`7_PW`CqLVV4jS7>#@HDUxhW_RWJO(u6SGk&(nd;5t#D*`M7Gra}k3XiTSQI`I%dtxaPdM-eFqpsM_D7 zbO83RtiX!v)8qr*IRC5j`kE)iIof=Vwhd+9IN&EqS;!Mt4o|`OKB`B123s`&Z+9Jt z6tIsw87C7PqZ3c-iHD;ToAgA^ApL#zaV>h{>FC6BdSXj-;srhNLUdxQNHn`5Y36b0 zH6OfhV_9GLA2oULRf3Ph$lv6>KAfYo9zMa{#4)$$VBYqi|7O3kf^3%#Qn_fGy4 z4fEitN1hJAmLOk?1uXJmZ-FtBFY#%9yW1Sc;yei1p*Xj8_!`WYsp7SZvWhlvnhsSw z=zWv3P$wssK;ox~2?B}zo!3x*`5KmI$QsslR>Q;IM=pcFTZhdsvWp?H7AIcd{6Uv4 zAirQMOp7s>NMfx1XI>wCRc%>ohi^sU1Kd%Zrdz%H&ERQPEv0UiMSbr|qj3AkJW z`Usdyz#Apt^f2H967aty;NJ*%GXaw&;LTycQ4(;x1bm)=`2=i4Z{gMb!hk6fFkJ#Z zNx)kPD2_0?6OR1aXiR9PXv_sX(3l4acpCx5*+qAPKMc590&bOnc?7%%fFbNcPk1E^ z^|FNeyM(%$P}2xC4p4K$P>)HdMH1>X?A`A_L?|nuZVN+Alu-9cs7-__1Qf2YUHf0y zD)-i$*0yN*e!<~Y95b$MklnMfCGX(egt*4TWw`qQgra&)J5?ae$7J2py!hkHWnwuy=iK^ z?$Oq^tljJ4`C@h57;eA<@Dt|1*~#9(+2k=v5lpdzwDC${s#dj_m6{T4as;3ylFt>R zJOZ73%mBIxK$_>_#0U^>xg^MEVUQoN9#?>O3Rd#nuBlj-@r+5Lnp0?V=VPN^Lp*3} zWhcFXJAE4iXh4U)_y()c>UJwGlAXUBqF-&(Tqm7B-SD5&U zAg>TakA&CzL}@yok18z&yePd1+X#Wbfnaa}07VJfJ)=qhE&Y8jhf8!5@0(zh;045@ z#AtMKDm$6IoyI^^r(k_AjU9IfsxkU|+Nsp24!;#2o*kaX4p(Xp8|A~D9{dnVK(ci9 z_(+GaNxVK9x38l9y^Uu;8}x>ijl-r%a}*8$1H{${1RFBmGvib$zXXg?Etc7t_))6= zh+3<>&G0`s=b_-_o_K9mTP)z5&-2~)oAJq(JqY@#ykUi7(u0k^1Z8lz@-4)2sCcu) zUVgGE^351De6cwG7WKVw$umEAcZ^el=iq{44ZSu|Ou}4*IsO;+<$mP`pl5U64O{Tn zD%NTnk7KOzwYWvR^aDDuZ_-?FjRUCA4-9)ZS^$5-s0`x8nj}Z=+Br*!Wuw!5zXjrb zf7?29e-wywth4uTKUIv6_o8SXCyUK(77hb>lE*VwpE9mThz5#_DHCbGTW-*YV!;t` zJ1P-Vr$zKaFMYig_|e${kq+KY(FgN6&mE^d+>mn#(lhQ?5z_M`lHsACFb{!Qga!-J z!(Pl#UAEM!d6u}cTOF>$!FVyv`rb_-(xV?D;P;*SkswFc-Yv*c0n$K@9>W9VsODjk zqnN5n-&qB>5g?Uz>%^=&VPSYB`Trmn1C=)v%LD;!*qsik)~S)MBa6LHBwKkO_HWk0Yq zdR`rr<9lAaO0iQs05z`4Z+7HvpVMchscb#mXCIRNQ;n1Ef}+=A3KPSO=o-&94rXP7 z)%`aCTyW4;K5&?XVNa2bStN`_si_h$euwFZ({$LFKKc++tMH+6G~7fF-*5}3asM<1 zbcVVvKP?Rvx$_f0YXm172)c;~{eYd~ zg^^dNnOhyEFGo_ZX#c{B;r6RjU~I~T$LO=CeSx#HYL6D5ylBaTOQ5s!y<^?wS7(ebN&FC%YCUV& zo>FXc@v1L5n$NN=KMc+UpC$rfegxYAwz=T_*u_#a3Aqz+!$fPO0*GDcIRh$4KOA=U zj_4ev=4*Vm;1T>pTIj4fpj10Bqakx;SME2_JE*UrI~JNM+hEK{bivNrT{WGk-mNP6 zwJJ_eFGu54_%mFIC;u&$4u^!}8$>yyak^(F4pbo)IQOL43KgL7xka340f);l% zy}PVjb>|NwbP;R~)*=X1QaZ#J1*h;frY*zRrR@1K=?xseyI0jB3*OM=00t$LGiEI2 zu-F$fM*!}m_=RmSRfJYu`8l?=`N<|RjH`0u%B~5H#~VP6@aEVorJC+;D12t9?lEx2 zR53g+CD8gLFmz8$a?gTDC-DS!Y|@jz0fM*#9k8l<7LEXXycutl(rx_o*0!}ZT;-&X z*X+2mG$H`3_>lr5eE;TN`Nm3ga1>4=SlEH2_A9+W_lRLEMfW+26qcI ze*2K9H27aY4S+X?0c{fS6jUOB{TpE1EtS@IKi`f}MhHhP6CgeShn!p8+ST}|!;591 zPedWMYf-q|5R?ny>Z*hb!s90k{HqX- zIthaMmW1njT8Q_3e~$+g%;~81{aYPEi;F|ujiLLAdBW5%{9O|MVF{l@cvZr0#^-4K z5b#m-1h>E7Bqd&Nn{2z&c89ItcGY!Wbw8OTGFBs_>WRcSrTSk8(dlBU!~LYC@mq`& zh(Yc#iI9Mls^^fYxt~ay;IY1YtGoOlC~RA4LZcT@W8C(GDG&zBQkjv)jI|RyGY+OS z{)uA^no(6t&$54oGygilQ+_bjR&}$b>?NCftQjog*S3!=;_XL5SMa`W80xD05WO}k zRWtE@9qtL1aqf|WSoPjUP1HKp9NK|-@9HWvW|J(Q%lx^ zAD97av9!aF<(ZE~67yo@RpO(y!@oa1O1M4ByKK;<-*?&k_byZZGWnuEqwoRV;R>Vs z(`f(Y@Dc5A-yZi^(WTM#rSf||TnAQQw+w3!4}7BYJA98-WD9E|-}`?%p5+x7h4F14 zZ~wmb`TyJT%e_w=oSy%H%>{-$*_)qL7wyW3`+HWDCt05s?xmhi6Yiy+&f}x}=_2}< z>hBWnrCe#ky_74D?xpN5_*?N|5!x5)UrIWb>ge168*=3>@Gq(*;H_Rb26LEN9Imqt zIKwX;6g!_V$lwlw{-YLP0hpe_jyT{MZ1!V@KN02wNZf7!=L8Gr`$$`0=HTT02JkZ*^iryJ7bT|7A1p`YVRM{1tFGk0~D+leCc!>+v7e>5+VOZ**! zHvfXzQxbm{IC)oH;C!J^{{&y)6U0leDqyjhti{mxQ7BQCU(BE^znJ9_)kb$?)hW!< zYzRLzC1o+|4IH{_y~u|PI_5J?z##t6l*CB+?os6tjxI0otMZz0->1AkK5+gnZ-?)t zE;vjfOl;%oq0m2wuQ6f4Iri0@9@tV~!%ksdsL-N$7SS({?~u7_A2xlb)MHIN;U5#A za9^C+do7-jcCYGN<3z#*I6hboSj;AL>eV~D%e+U8yh`MWMgMrDFfgQ5)63r>VG688 zqPl@bG*3itHlk@Fnj@nrRuTKDk=Y=kKQ^LX5w#l8H0lgOq5-G%z<^VFVE9P{9#}e7dGBS*qYxut*?8o5^XUlHWc4h zk@=kY1AkRkTDhghf`P**(cw3rg`D=V2Fz0xJKRyeQ56yfQST-gE-kEa(iJ6Fe z-CaIU^x*#T`oNIgUk1`!acrsl7rXtokvQ0d*B1Y@BJ-kl? zahW=@W;5#(!sE!u$SL*MVmA%L#g={Nm*Ju*uLSzv`85mG%R;O#EC0q-jLtv4S8K}$R~P3HtP-9r#IyEWg9ZX+VG#XVw%2qgHE}ln(?&WjFeSyy2E{KyT^+mXp-heMUZ@?3=4e;m<_@mx{xAX?A(HpQ`Z@@vl0cXEM z1NMheiE6<9%QYaHKZquO*?9xXV;k^{-hkzL1OBNuz^^x8hu(ljy#eRGLj&3~2Z7Ev zmuo=u@E0Y%>bwCJu?=`uZ@>z@0sqn)5YQX&x!!=F-hlJpp#kk@fIz1!{#KmaehwsG zdJfEwZNO5!0e{jP@V4H7wR!_~>J2!gH{il$8bDM!%mJd)XBJ0+tbG$2d-P`Dceq zu|J|?-}W7_?=bp_OIM>mkR3C00`|@a&Q#07D9o4XnEyq`e3g#*dL8pGbK*>}ba7JFyIBqC}XlJ#mJy(vs+&CdkwYkt&{ZFJoUF81N$!aE+hH$~3N6W&J&G5MOLNhE(IDeDW1A8|i z5Nq#H-n$XXNY-uL0!Mc541CkND{ujN0yc$PKM8zEWg&UW^1u;l2}fGDAg>;&^+=sW zEimi0UeU#j0^ZIFBxB1Td<((<9h~XAU{j}BEKD0jC~VIP{{G#zX!|#-`S(0Oaib6^s3eE0rwdhm8b`x zh3Th8>e}sinPr#(t~8=~qQ)E}nkJ%IGOC*aer#lN09@7AYDB#vI>3l77twx3RG0yV zD2NJsKp#CYsy71C9`HRQ)Vrq<>f6Hz4M;FT>G4Krh{*^IZ$rC8Eu$_nBWiaBiI8%{soi#uUn`( z!Xz-sKf~gJNr2Bkz$C!uTQCXm8AuQXOZ0-L{0kiZw=L9h^n$x!D{%NrEpAl6=O6V7 zj`SeKkZJ!yWz1Wb02FVy7}#>`h)me67lxssxZ;MjD6zS?9#)0H9|Y=-H5ETs@uTOm z7Yb$lejS1HG&6i;p=7UH96a5mGo&iPY^G~4Ye7*~c&gXhibUwcVMdwo* z8|r}fHfbpkc+=pKM*~L~jg(h(!xl(8L&p}pCaeqpX(Reuzk3^|$F_mOz21hUdK+HT+ptP+!>4*1X!2njq=}(p z8>U$s{?i6bwR*pM3%EcYHI69V>n-@T-hw~tEr3N&V7yUp0j)i3fwV4k)`I2#c?&AP zdkY?lZ2`r5y#>$dE%=Mx0=Q8Y7}M-V%xUJ~QyLaJwm|N(`Og}W(qSSCJ7dR*%rgK{ zAK=Ld`oGc9he=hSDNUZ5h2}P*P4f<)(x%WJeONF$ox&ZZ+NddP?tt~@os*v#(O7Gi zO1;OI=_s$%QKq4jDAKUTCrvqgN<%_Bl!clXZOM>yr#Ne5XZDN;)}kEbGu^bC_<|_0 zrE|o86^r-+9dVei1!8~I5vOUBh|-kCC+#_i&&Jd5*+M*Eeiqwl9-`*baOAK)CgNQ;L z3kCuqTJ?z-jmdn{c*!R%W_;3Q!>2SDbcT4e!WM;iR|8xi9?fCJP`ShypB0OEwT?KA z`9zGyV?JrF{JR&Jgcpj1!G6$2bv>8RIN*i7_r$v2(Q~Jox#)lM#Bhs(`PN<)(dOEYFGo7L$MRqfX57Lz^h1svgpF!Z`)Y|PQI^O1C9)Z24k$B>-zsVGqmH; z-2!KJZ-y?BiuJ!g!36LWRNi{tI+4{og!e9yWB)P!Og3Z-nBX zLdE|tA&c0`qTi>1i_;qmagWx#ge~$4^>h8U1Ww{A^(Zp{?Ga)lz*QzH_QHQOwz+zG zPlf|^s`oelM7Pu2Hbh!8#-lMnp>RMh5*SDyDRYJt$mR~8A(|WMKeV?8zum|MP|FI( zcJmjH%C9A7IAMmC13hb0K73hLWZm1ncusmo`i7Rk@YYZLzVz8!;c0S6M*6N6*tUc+ zR2Rse@>!rfIk7vr^?E&~b~r#A8Nw5D8I3L;C;G- zS(BgBVhYTA#C~J9;@iqHJl?>l(CWab(ly1mf1Hu-Rf^{X;5Xp_EJwNi(W5N2{?+*w zTvs;S+!hF&xrn&|huh9o>vYReKMtUp?_xQU{(mg(#>@3=5XOt5kBZ*c{#Z76AQ~mc zOHY}QJ_HGX9Fi|uYmZ}lF(@!VvKO=)D(0x65*a3zK>F_e#dC&dr0;FvFxqP7_xsXc zoWJrb437R7Mi@19ExJUPPc1%KA0(f$nt&J}N!W+5TU4`4RHF|NS#Aw$TZ?jbIXZli zIEP2reNN5I#_))Y_u%eWd!e8g+%GG~h6OYOM;y#g#?Xv|Z%djnL5_t1zvOezWAyGb zWysa&ItGScCHrvHUk69gOF3RlIHWH&6@i{0oQMIF^}g*w;PIK-jkr1@^HJJ4hUb7N z%zLEZMsx9Pzsm4nD@JH*U`XjUbj5afb#kNEtrX)QcXhtR zNgna1|3iIzSFPh}v8&@KV}pd*6x(u3`1`ZGB4&^xofQiXn{oe~a?o)LvhT zU2VT(xF^Ec)%H8~c|AQ{Ozrg*AJ$&{ozWBbN8WEa#qh|Uw26a=I?cNSXU<@0fC~k` zTDenH3Bx+(gshU`DP~b6q=~yNewffLoLpu<3SuB(SfSt=waVKKyA17)TrW=gqJg;b z&{R4ZX9+QDIYeP29SMix2|q_Ocs)zvDkPY2Kl2n^K?Q{C0iHpfNJ#s1)PR7?T%&(H6L))gN%Se&)B0YV~0N;an2NL#r36 zBTgpyZMjHg5Z*vV>Rimi53opcF8Q7dfjtlJ!B{>R_@?Abbj8nhV=NEESOy8 zXdgxc ztg??s$!Fg)Khzo8X+4mBJ#vCc_&}1raTqk|{);p?a;49qPlXpFNF#4|@M6@)Yc3ik z3}cF{@5w=@)e%Ydl=5(3cU0#UO9kp-~gC_*vlNR=A+87{m+p1~nnp6bQ67 z0X(Rsp$9JikVe@L4P==a5Z=1ide zUO<9sEuoJjgvxs?6=`0?jJ#zc&xpMkhJ3jLA}KYr~MAc7WU*1Gy~>xw8Z0mobnHVaNj=ARA*K4~HRTKSuZG=@`hCFyw`H zb;5HZKMFg({+=K|DBeH02`-S1lI~xJ% z)WQFruewh9_nmKoTa(5_^+E5XH0Xf16)DsAK~~}Xi#RVB#V>J3)Eby7af^LZ=>#pe zsdOCgR$HqoZ=6(pM~6Jv#L}6^RWRvoIJWfX@y4;KS0M#KUU(Z1P~;g?aBOMRP2!l- zRl{*?$(+R_eoadCMoe99VZ>p_Z)HT%~I)4)^%EI7NwGii>OWy5d!w5&DJgo;B{<&3Zi#gzHH` zJ#AQNmh}{|p4-i$B6I-uc%%Fn97LBDpgZq$8r@j{K?dDG)k$4QivgN+&z9`TV2)JQOcqZ%}DU{ zec^h%wIuOr)wHE;uj+miO$6NP!eD>}^tgMe_&_dSVepf&ohJYCmz^djJ<)0MS)8Qn zr0!x!rJW>i_@1N7whTH#Yv` z+DJUAXHIX8BomE-&tv1;jz!{8#fISMrt!9@agEjExVT5*wcy6_09AS#d1b_8GD9GfUdV{}LPj z^Xrj#RJEVH(P{F*y^&-#;`-yU@&7(;igT+4RcE^?Rb@DWjI*{ibvEUBuhDY-6-K{c zW%kad+#)Nyl9HskQ8hBpFq8svJw=)8X+d zybh@0jzOT+HnD(jNO*6oL7v}%j1PLb77vi;vDF~A+j9lC`!VjIYwY(UL7o?d;cF!P z3le_(ZGb;>qkum@1n}Qx3i!!k_&X&00}_7Y2*AH6;WrTeT?v1E7(QLX|4hRFf$*%$DZop4ILaUFg2TMSS9NVw-Q$PZ-F@IY;gf3<;wHHJZ-Puf2SV`4w@aycU5twH zIPZ8%6663ol0AL1A#YD8F+UCNp!J03xkG8CiFB+0$-q~d7c*&LJ#D%03NdI1ypq?zQEe%X>cfjbs#1L%tPL)0O-jw}H*;P- z0UsVX?+@nzszZ5W9gYmAsqWk1Aw{#|B>yt280X3x>rr*Bh9+z7J5$jH0Nn{7yEP5s z5dAeEn1l+BYjl3D!EA!(EEHPJm(N9DE%)Em^ubGwDYO%)(1BAPj*h}RlY1`uRL!GH z1|i_V~c6CJ-61TIC0%dwRM&Fl&IPcmoh9%Y^H1QX@X`7zgco$h*EPDwZVBH zdhGy0&fIfK&29wL5vB0xTeF$LD%>6)r#ur+(E;H!gqxwbjaVjLqj(M>f-hAL3`!_gwoF`eU9~r!T{9h{3LUtEH=!>Fga(b@^|qC=8IoJs zVWO_)UHocEumsc~3q!fuT8ZblsJIt#_!4dcNf$jIx;u7%BVUn*Y2bjEi{Np9^P*&) zzaw7g&mvyxn=0a;@di~ITXEA0p{ZuVN0o+4XJ;wZ=Rnvspup;gipa%tC+h_Iq9C3v zBCgko{x4+f#S1Zl=4RE0)EIeunYES$aFYz)D8`A&j`0P|CaNZS1~XO5Dosn|t8kkP zhQkj*(OI!5jLsLGhYPCNadD*&@N*Z^Eydid-Sn@Ns^JDrj7xdyC}KZ#xIbnM8n#q& z%z{43=(iefN1bpT$X441EqrmsAbU07Y!Q%DWV_^Ai9d<}GEU}CMd@%xOjs;Ys0nrtDbzfP@CrFs_;!2cO z<6pLV@pM%AFDSzP9b6AyuDMIXR4xxh${$VTKte#V&>=dByAz-WiF+)=m_$z3W>U5^ zlwJ+*xg5X^d?BVK{x++*Ta=n&uo#^M_r;PO*-6;F!rfF$c`Yl(%q^hBb~K)x0N;%W zBMdyM#poab!}xm`#O?_|j{y)|6(Il!LF{GS*3YKkIpF6wwlc*KkXWYB^{@#OguBLPZ_v7YU0_zZwC_ zX>*)U5-FX2NqPqQwM-{kf|^LG2Kprlu~PkWD94;xl2lbovWIUlmy|@(vt@E_yFSuM zmUL>y;UV6M=W=3OFJ?S@KQ%Pz()7$L@RU?uCuuq}1XbHCGBvy%Jf#v5hwF$^-HRV5 zOxW-R_@#CtH?Fjp*JCe5h_iY*p709=!jly9e!*C!BO+6?KEpA+=*98LK{`FdOm*Ns zEsVH4(lfp!^9#`!zKowXUIQ)*hY}Z1l2u57-zwGfxS(SA-adTe&&ZuX#rV;cH*jsc zAYqS3NEkVto{@&K5&8vwA~QZRh#XjCwL!?Xka*8!TSX6HmRiE=^{PR?J_pHmxX!2% z61EsfY9{!^+t>I)8cCR->}tGxhoJB}xhu8K(gA-J4G10|3pHJb8XF6x#zKL3iI)xn zl;%3h?gagM+xjN)!P_~y;f&9qUtfTJk=I0I&DvDTny^Yx)`a_#LzH$2fr!8CN86jGlR1*3&jrMd?ui!P{wB*}R2Z@3!u))Cojm70&i#4)%yo>bh3KmpOy zcP#3GpRa|{2;8LCb0V0Fkm^ZTcqMd!5rg5x`X@NYui^e^(p~JgM`5*OB(Ya}o|D)w z#V&q&JZla zo0OWfXE;5Miz@Oa#7NUh^_)Sn@h5|ih>(!c*$p*+#l=AE^|jG=eQ~JFrg%CH z-mVStw2)y=M|g7W$y!r&mt>rpsTvJ@4QRiYbIl8sC~00SG0H?dAtPd zOfqPPtJdlI6bs#muS5;}T11$VPhltr@*tgYG6#&KOLw!Kd3$d5QU3*!8y+sir zCxi>%sjKCn`PB44e(RiUGm-))r)wAb zWTV4Xj|Y8On%1+f9K={-ypD>Haj?FcI7suPfBqY+uP&JPhW?Q2)Kw^Pd1Vs~#LIqWAYC@B3xq@02yrZeFGgyjXpHU;s1w|<9-f#B+!nxDIeWDW z-1Kf0xG6Q=VmkIDWVJAkXIp z3hz$p`}8vPah#LLKgs=#2&W-D2jMVfdqB@1WxI>v5{!0nwb4mXHpuMDr1n=LjTXX% zAQLOuw9L;z)1mT7fNU5BJ zj1~dBh$Kqbbwa~q<`Z?TLOP*@wSXjwCWN`TID^KrLLvYXred6FP%KGPow?@~&(C8> z7`PawyZ;i>Bb-gEbfpQ>_y<(;BC1Cz96EG?MBxO5=WEl1IyEOx*8Nf5+xHU7rCRcyC{ghal+ZoUX4C#%P6I z#~Clge5dK011@!Gjw?bd`v35OF|Phs>Jv4oOeJR{*6<#!V_cRg=4NNuhOWZkuEx8` zU3Q1QCtD%b7h^iwwE^9>2kuJ6dnfXOLl?ZzzVVg7%n|cVZp8(;D;x8QM{2CbN#%6Zo3KM3 zM*~Q}TSRw)C@qn86USd1hGUJLD=rNz62STv4vA^vqS@u)*DM=r8;Vt^+rE4|O@ z+C+O~IgN!gtkWM@>Ttiu*FlAp0;Ojpr0{)>+moS7n!bVY(JA%xiDlb~QYB#3%w6mv zu!BO_9Z{9C@D{v1lqUF>4%8k(+9O{VM9h)7QT0rcayrJPuY*e!KR10F z1(l~)1)HbrjdcKt%1>fGMhawfTywFnaV4_J*NadM{_{?B6<-FPyPUxxE>G&UaeIYet8|z zJ!YIS=sob-?iiTUd=_e?JzJ#5Z|Z)Gzoa%}9IgotXPhp*xd}lm)I7u*Fi0BEqhh3a z@l>99DTFbE*(mRbwU64GA8 zCc^UAL2*%q)&j|ut38E~1~wBCDvKE^%jSs6f*~SpP}pKldS0J(L{Of3-Tipfc#!j@ zkvKr(ft_kIf~pA%94#7HUISuE^&7Z5iX#7X5KcLlDVXm>9;eK5JlPOY5>HMYgCEi@ zwq$=y;nWG`MUO|maWTt7j)Mpw#w?z!0vNCvA{X*`C6bhCjob#8)X(J=3U-zgqI||G znW*-^DEppKYfRZBMkAoO*ZUvPAbH&A#2dtJ_I(7Ch1 z1O))Zieg$}XfSj@pda0Tpm@TN-U1C#8}YXIoJw^HLVQxf zqQacge`6~SCM;`!4lxV;i|;8eRqLVQ8VTkkIb4k*9So~Qb5RJqNOQai-SfG;woJek ziTl@j!1THr?N@4e%HQY!rDhCbMkgpWMW#sC{Tr=g*L{w*iQWrd4_IAa5c-ZcG$6uX z>mAqzs{vW8c&=5de+*csYX^jzGY->NQoRsXiR!vCFnBrQI-nHXFSt5U3GR#pX+MmN7Y)MRQuH^z5J#97LCS}< zcvk#0c$9oTW_%fM>oC5RB|gENBV!!QULWJ@ImRKUb)Z-Wzm1_-I}z0>)<%&dDVC4n z5?CBG2 zg`_@oqxfF%`c2`X5!Hna*QQhKk`}(fbX;?-g*w2A)qn)0+6v?}*D0X%vMl8?IGwSQ zDJ@qdH|#V)DYIY(Gvox!I;u@*7Yt*97)8%#TI%^R;Ky5E&6M;_bDh^fjF_c`iP6m% z82q5@7fw0PO+qn&g`aI9EYQT7uyLP(UKJ?`0Yo1`&zT;whv|_-4HRWMF#w&wA{2xr z2lzEgyvQm|8=#xXg>qoB7)eU?{UA5sf+lJ!1Y+Z6NqY;NuClqdCa+LPQcG6|tx-^v zVbn)fvQqae0$oU-B)vlK5UL24+KkDw7UnB_?|?IY5n*9!MC+i0xD&4ms~}@siNg%~ zlO-mR5aaU%3QEn22-$%t=Jz57;|u)czmFP8glJ?F94pw2DFk_o0^=!2eLvLYUs6r6 zh(a|)HLU^Ha^!AMs<{VKbH6JvlNyNX+71M9RgY%6=yZJ!)Gvx96D?jWsUkJ3fsA!$ zch7fa|3so*cB3vp=Mi4MEo^^L%d&J4KO@ROAaM>2f<3_~5lW$a7oktMJ{bJ!KSnX% zojovuqEd4T-O~x0KhX(>S~ld8Xa;>K8>a8|!G9XWsxJ87G8lXuL`!~&;LOX@FHEG0 z^dRZc(oS9q@q?3N$qIvB$ss}IL{L(-6!Z$piRd5kX@xOH>T_Mx_u^&hla}XIye5j4 z=Y#7b-`TM-W1XV&T&w9kCNFs4qbamPjpt_OjwhI^I8Vwx`Am&n~8 zoTrd*!Pgv_n=oe}%n1`TZJF)`Vs=%vT9n_2Wm*d^ljH?qYi()R+;m2*`lbaJxnz{D zcAcu)4@n>L0cD?+Vp6>E8Hw3)gf(hVSOip?aluT=^GJcPQh?j3>3zBPuqLxO=%L~X zE9qPF#R1wW17@8zV;ml_F9vN6UB@+7fem_J=Rhiix#O%$nyP&bXgFxNfKuLACi6h& zmW%f-)1@=Xy;RrwEOZle1u@52TPwvY zbg7SPk?|ktuj67RkP?MxVP*~NpU}`~`YrB5;=n=|=BTh{qaPVU%Zik0I%LSN1~Y(N z0z0w9yS4}_Hl48Qcu(R=Tvi032mQGZf5GCD+cWZx7oXgdk%zlv&-R0#5?r-&j>jHk z@I2!1pX)z~>*CYx1wK`A1&Wjc_k*xfhk1 zJ4tF?K{Z#f9^+fsaWyKJ6dN(ALp=?73AgmsaeDyTo5VSg7gst;bu~k(L}x=g9q;MC z5>sEN56XL25`XJ5{LcTOqiwwkVXkV8Sztl> z@t6UvPUxt3?r?bKQ9W%%coSucoQOO$20ipWo|VEIRy>|;U1>wYh)BY;m1crm5uIXT z3Sb0UE9T9gwlWdAqgjI#th&2`3B^$mRJ%_^;1eaV$zv*F+Xn{oaAxaQWdwoKsJ^wPmY!U znGSv<=W7}V=ES(t5t?f?Ub90rogD8OxfI(Gu@gtm@4(qdZsb(G+0j<3!p4fL7O|tK ztkDTsJrf(JS}EP&%o7ldjyp_EvOMS6O`M4%m)`10fnT=AlSX!DynLyd zc_WkX^<*3ehhyT2?cz!8;wkOoshq(hll+*$f{6AxOE_E`m%WTpHYk7 z7B^XVzZZ}r`-roHVaUXOsB^pijuqu`xFMF6E<~l~fqq-sWa+`4FX=xNl^&6=FJ4g^ zH@PkhnmE*NOPYKnTRC*SA{kG_IrMT|}?`13&uiWk0NK+o(-7fH+6C zIE+Oasb1_Bhb*MZZOhk-d?DsS^a$C-{c&Qu$myM|O>P&F);gu80cnxiq{R-4BuEuf znUC47FFNej3*PVq_QPYiQ(PNHAcWHpwjvxB*RIqvD6V~v;TU3#O#@+ZO_1v4n9gc@ zaV1Panj z$fcqLHrIxRlIdcfg)XE~Wi_(jmUcDM4(fLpeRl}r{IU=tVGd&58@1f+N)3&TTtCVUiSM9PH$yWH-VZ__CGj4vm(fb}IjH3x zM0JU`UgSvXx|ZQ6BC630NCE?icMC)g%tvrwN686CW)r^SNyGL>#tccVcMqCJ$+%|@{L-N}|YXI!^u-eT) z$Dk!)A@wFq2c4>kB{AgFQZSKi%RO95ZBpgs47buVIc5Up-b2 znhgFW>@-Wo_(Ql*Ejf;xzo~Q@Ow3Vo){)r?!hG5ow6;uGQ;uU(Bte$577%_|T3AWc zR(WyOhvQACuwX%Cl=?RLPB%tkkEz@kiP-|nD>g=6%fNK8D{?o#eQGG1y3}S?DwoU~ z8N~3$@fl(EovVt)ay4xC9{d@Hja~+|%{A85xL33gmQ0w?Zh8Q^z$u==kV6zIjlLf4 zMfEgo%Q*tmw!Z46Z)#Dh>0^@xR}v2>#%v?_tt`PQIIDD;&`~&?Ihl2NJF;P2^^Cll ztGnw@Uc|RS8#U8@M;lgLj%m3V-f}&h-3XP#;rfa%VMoK*g({VW9510X*iWTi)(ONQ zKybI?a+DHF;*W|Y@nQHZ$EmboN&HbRiHiu9#2=;2K}5uoIGev1McJ65@OU)#qnkGI zNQ>Tt9%ur@f*%^0TnT43#RtHovYGo&T;wu%6iA*9l4gK-0TPuD&8d}Z-5GE#z#3MF zY#<|gORj}$;L}(OKPA_~??sD)IJzl>Ukbw4Z=Z#kggKP3|Mk!XWq{6Z6#Jtbrcb%Z z3t_VeJx9^M)h}o>XNGS6$^-20V9kdG@nm}1g{~)kQo6|R|Ey&p*1g`M@38yFf z_gEbX+sFK?D{ipgv6&C;js~i4bp0f(bt#KP6b|=9)cmXjI?l(`JK*RCusbIo5Si9z zMfCXOIuT9$qu*DaT=2qjKI|{R#Z7tgv=9E{y5)@ zKu>YsF?AA@$-}1ZEbmgrCmK6|R8u4OofJ8EV|9RBHkIS0{rMXZG~@h@=E5w8ykcwH zMuP)ZE%#tQn4@iD*$%#bBKA{}v!`;~kr#tQ#Zje7Jj)g_8w1nJVU|aOdZV7*fTvPZ zz^OdF4&C6|EEHxNrYVN~tTC=8O z=LycTYT^VJ5I0T-Y!+(~PS+`C=2kE~EEc|OtpqHrJVFL@X70fLaa4zvQd8l+&CSfO z`lcasGdQ3Z5oGAQ=WUGXky9H*Gb9W@d?BgR+=`WwY$(tC8DkJ~ z+963m^1ss?BPrPv&MJl`wJ)l&VaQdJR7Bg38f+QTn*fP4|1c@&S!oha4s1;$svE>ChW4t;8wus^D~v>7o1`z`HwkXIePYX)n3GAOV^K0stop9AwS8K)SIz`uhv=VzFHz}nC%n?VJ4Ra$ zyxqKfzzofz@89u&V}$#Y;fSC(4a3FT`28an+Zr!6l0_#x5{924;VUG(@4OkeqYo7D z`wjzsqlCXH3_nuBJ0<+D34hl$0{)+b|AU0@6NXQe@B<|L_9WnQn}lCT_+Lr*uM!OW zzr@KI;C}=U;QwdB-zxFHjqsx+{6E6*uS@ukC43p-|3}t;E#Z4h`1xV@atZH|@cD$_ zBinm21o%A%MgNQs!)p@$ZVBI)@Y`hls|o*i2|q9l-(SKHk?`Lrfd7}Q|2e`}06yk> z$M}Qbu}{0ZANE=IQ5M}dfCFX6s!uEE74+53sw*45Fvu(@P%Bj{5=dcgNpg(1B?-&p zS98|hljsgozI^mrn(=?aEy2iyQ1qD8(~A4e-moN6u`eDLh}Io zH<9}&;8rk1ZWDFFmXX3KM8Pt~HQ1iVc(M0`3NT(km-t3GW*Av9j8T8*eZHyPWHSrRn6 zG7h^HZv?Bc^cTeYybb5pwG!z#WVAS*Y->L2T8-q*rp>v>73bRA)rv=o4(Lkv_#Ad; z5dwJ(6e0i&aIF7UPvUc!N_$&ypwOKtSP)Ef+9ogs!HgzgLc_eIf_GM}l|Cpu=C?@3 z($tbexLz5dj2h_M6dFOgp=vQ8O~kv!n(jor8#1BvEFp=m3-(sCG!dYg zf{}R4{gAf6U_@01uOa!Sph-UEGm2&fqL#zeZe%p&P2{E3~-4zlC*u2Ue!s^*qH3v5;A)4;F19O4)SUk48YOxF)p*HI^q zo_g#>oja`G##fvMV27ybDeRz0>PIMjp%FGC+so* z4Cxv+{&?&?ig3sTpN=1z{0IIb?TeS=S6l4GZ^7aQ{PtfA$+^(J1mbjQp|%9#^o+uS zB@m~l74~03p;~0Y4=}SJcQXsVf|qbTMa|t*_CGWeK$`X9B~WEnfyo&5NtNT9c$@Dor_UezECTy%cf~MAB$-v&De^yg8e}p~7{^kL2+(P+C#zUkv$}~QZ z)+o{r%QQZac37kxmuY+;jeZyHZ=RNEd?4+#NP`<>zJm{>wTQF}GK~+UT|k=oO(u!o zWb>OK2NoKu#Qucf0$3&UYwDHifo9rQ&S6um2WMq%?(RG-I|1Zd_;>j$(n!Yzt(C<5 zslFtY1~H#2Vg@mPNW{ixI7f^Jk)Dbe2dza?bSEP+&HRQa3uJt)VMg%b39q zUO{6g3nDoe0YE{wMgSmK>EI;7qQU4KA#fuAwHG!p1Yk>`)}kLY(zhYG|IHbumTZ!w z8S@x((C*rSv%n4*Y$-Jg;_dL{!TL)245E#LzGFp9Ad#nfMvE92-2nA2D2t#N%TcRg zG74dztWU)Xxu6mvM>jty)oWTvJZlZ&*$ZJwJbSomCAFmS?j-2r=gi6kt6#14{6acl zilvKA*BMYUm;j*P2_jdhmF#o6_9IbPQ@OYLBWooPlT=got<%*20I3xq0A>U*>_R{o zXUD`BGOMnUR`vqyDUycHv0x4dzeZJDIO2YRR2XF^ct&2Q#bf#3VOr-j?csJOC>Gd3 zq_4+gt4g`;9A-x1hYXHA2PgZ9AHo8f2uI4WgZ>}t&ILTG>gxL$2@-8|5*szPsH2TG zv}mK!N(yZg3_62n6fM=D6eCcjt#3bsq@p4Pk|^UiG+wG`twn1sty;x9)z)wc1P}#8 zLGcn5H8H4w2m!f#zyI3jOfmu6+V_2)@BQ*TnX}J6>+G}7K6|hGTAM`5wO1bM4c?Ju zkXl1NlXF! zLaz@=O&W}7gw+fj4Xck%6Cqbze2;=6S=;hJ>4Q{==+!&-7=5A*3q@~px*Xu#9lS?lx>yTB!! z+pLHRxu~M;I6)J{wQZfF9-aMhfY4KPv&uJvuogtBReFg%)EGS_TLtvgv;w9a3y35$ z#Ay*@p%?VU9m(}Du_NEHhBg~rOuUaCffH4Zr3OPmlYFx>Sctz{VuaRU6&#jkA->!d zzo+?!Z1@MP_}=J_XksmWLWkSh$u~&n#wZ=d=hF)Mq)i^|-LUAmJPl!hMMv3LfJKMd zS%5`P=d24BJyjPCi=JR5VbQNyNm%pe)Y?KUAVrC;=heW ziSHQ)ys&7U9^zxsiT1$%zp!YoaO&PzbiW^cF)VtG!iWDq#iC38Z7kXae=eLlzS6sH zO9t-P8+SS=d^Njc*bKbspmy#J)fHz7DEGR(VY=YU9|!ny*uTV=Q@$v^G*meyb@EIBeFEv>_n)rXkIHoEOIfT|B|>jjjlA-B1^wtY!= zon9PIPDUf&7+@ssYBaj6yAEqx2~P<6qcBJ+zzz+vYY4G}>{1!5TM4Y&ZZyuJ z=8Dor@)I`3YIkSL3TzZG&D{hJ$WA^Iw=NvdDD7x27K4J`4 z5>lD`n=7KS@RHMla!xt13iy!DRR0QV-#(gzHDdl6^{(Q7oKOS{R;99p9hfsKw+bY6 zx|Lg0MpQw(j!@TIun$w`r-$s%Jyy5Y;5q`f&gxbe9NbI$08>!->>IuGhX7%3mSE6V z&(!^#H`uv91a%uMjXi0cJs{9wxf(tOsOHA;V2$FyDR{ z%(qx#O_#|_H8*JTCSGhm+s*dPL@$(jd)30vN^NmZkS-}Xu3#ZFd2 zx{SQ)0|Q>79b#_GDCUPo;||L_oR%bj!&5&`0h=b=fa4nF=c>raDH;&55S0MP5kL&a z*z($4tj2*w;o2XEPz{_%8x7wmR!fZafP|1oy! z+G5RLedJ~+4{U6LlCWE3-`LHi8Yzo|4284)1$G0YT_#d$1{{SY<0s&-Tr{)-72Xu* z+{%Z!OqMKGm;9O#qI(yG>kivbi^}68<1884Bz;+G;8&$a)G~_^_ZfQg>{M^GE-s6x z7g`nzts9c+{arb4JY_H+d!f&Crmr5C%>9BEW9cj9;hPqK_BdeQekFX^ZP`#slu^XA?; z=ATY=-XaWx3fu8*{C43BI4Ab59P{j~g{K`?C{M+L$`Q}9!B4+R?^#?i`&Je_yRaPRgr{fe zFyd)0fwP|G(xm)?r`ath?T{WV-5#!A4kC?Vt|ohFjxcDr?q}ev_8vrBN&4Ql#vE1S z_3NuI^AfMif*pQ{NOzH@qoScUlnisFWFQ)VSRd>XP{WnX4=V3;oDTKHMk-Ho@s?1h zfyssQ22~D#VvzK34`l5VeB*20LxNAw1Ub@$r@cwmMWMvdConoVD0&hjTb!J6!iBw#nm;G27%}E3mzVo-^Ct_%!@D2S3%pe^2ngh#35P zF95%CnZXZA!1C-`y)|417CVFy3Y!9OMV{to_l!RI^pX=(T=4*o_5f0y6~IQXs4 z1OFD8i?I!#n}#3i;4gOYR|_^A$l^ZUX3 zZfvsm4LSI43H}GJ{YwOYj)R|{hHrH6D>*PeM+&~o!GA~a$2j<#((nxq{%!|<(Hp>* zJNW$tziElR@2E6~mP+f&W;g%S@MRAE90&i2`V(^SPYC{Q z2Y*N!KF`4)_=_F<5rTiuwZBO4 zbAWHiu-}__n|QGzc~p7ov>mk_2S_r_F-a?2D_3PN`HMVnz)#U!E?4rNH3)QhqyFDG;EnCgBpCU?N&?Izp3+`}(>(`Mt&FP-$X^fuM(wz2S@CKYiPiyS# z7xeU}^z>3Y8%a-x)6?Oc`JJ5Xit6M9S5zm*x}rMyV~T-(#8P&`VWA&EArh{CE01yl=ZS8`oRYJ3-eXHbeJpLU-ZDXSu}+^H{yA~@H! z934%x>nn|m_E~_fs7fq-1EU^Im0Nkb%INKRDDG3`rh=Gab!a=J8cGeK4mW3WSd#-M zJ;8?Ec0`Y0Fr1ggZ~tLl7O#fuhnO14`d>~*Wmc2chuF*OF0xCP6O*<%eBCsi47fZR zzHT<9z@=hsxONGr6{San>o*F5samYAl++GDzjF}ZN6G@*=;HhwH17;FJaq-Wd2Dcz zrwuN4m~(~gaA6uGxmWxexr_v{GW#>`XW6);cXBQkWr2$Yf<11fsh*S9^d$XOE*hL0 ziZmLrq_>f0;h7svUQrlJT_f*`?PLr7hd26FH{9a}*MF!n8XBH)WiJg)tv*T9j=Zd< zjy`V82PZzNNVM`z;^Z3QJ80}?_Ly)7y(yES=1(AUId*J0r7ZYfJzO2+SsCss^=cn!W6^XUdAd-5X z0Wp7N6WODqTk&t;<(BH6KFu261GdW}LS7?d*^W)wx!QX+$kSBDj^gM{Z}bOvq1k7M zI_sV+Ex3A0DCzHfBBCzWM=a@lB^k*o9-?n2DJFzZC?4GYOF3KXNsZ-%U{5&Q(w^$JFK>a-`-?0%U3l%b5!F?NA=G9 z-nHx?itFy;8e;GX#C3`@zEjtSgzFOUaq015^U%udI#cY_;awWTM7E*rQXm5V3c?4- zrpMqPC+;<|(L12k)Nfc-Nof|_wyV9D{Bz5gf@W5gkVIUbjeZpdSQGI68HYrgpgYAaF6+bQ8_xWN1x%7TZR6OFJIuy{d=doqBdE_v<*3$5MUa zw=YvtQ!>khR>o7gb7gzYBOFNjDEuiZbN?)c*?Um2B=OSLnszVrF?pFaDX~5abMVtY zR{I%hu;)wwX-5wf54|rVUMlxlaro+z+)qL)qN(hsq^f7<6Y?#iZ1NhU>GE#mm$GzR z-i`cHmUhd#QJOBVL3%CkM(Mb`25Gds8>QXy8l=neZj@fjYmgSpyHOgg+y;udH{wp2 z6C;zt?QQ+&mlk9*67gY5dSS>C`8JiEt0X;IvvDPJQ})s>>vQ4V)s@50FL4-weu={v z^h+H2O23ppY^EuZM$9xN(wLd1MCv=!DUsII;BEb;Oliax)Jq-K_8{mg*+c#q#VEAR zALEn*g;h?^6E+ojh6I)zVB{GRD0Tm_c6J^lP%8fdI}3>Q{+#(F@SCnE5*Tqsk-$S; zQ6%s{iUA3%l#75!pt)m|sSD1K2o)rIS#XZtUBxslG(!Qjuzw~kbl4bL7*eymZy}1Y zPf5rfO(umc!%yau9B^vs{=%EtH~K{KDkFlCX!0tsiXwoMSAl1YEaI2zCC}*Zds&=O zra}odC{Lsq@f|FDA~{nd2T=vJmM2=vEiCTcogyt!sGNCG*aL>9JuKvmD@X5~_Ae_u zi1p3mI38|hoyz9H&bqUP-xW8aDjtvV* zRq}4Qz8dDovhv^0H20)w`iw*;qt$8L=Q>#T=s2si?xV`^m-gSofnq4@wP0>W=NDXg zXu%KhZJw_br&^~YT{SCMViQx;% z%g?CPatKwd$DcJ*p;r}&5B4}xC5kN4{sArq6Q$gS8nZ=6K#dD9=o84L?#+!^7gHns*i8sRa%1;U{-r5#1t}+8XT#hT3d^H?; z7;R?(diar@1?b@n&V2N6nJWrCTvK_q3z;HVxf$z11DCC))5FmudS-hYv)Pi8Kt1}V8!~h|r*UP_! z4@S`>nxn#(dqO#-i975uljOh70WQDSb4bpo9BZ1j8=gUOz5Eue<}_Cgo9t~;QzD?_}@B7zOUd9ckt8G@Yg!{+Z_C7>d&1He&^G` zzyGB5r!ozHo`b*C!Iuj@=KAxT;GcBx#cBBC9sH>dzC!SxgTGVo*E#r()xrC2eZ=0k zmxDiA@Nox!nc&ZN@XcxXMGk%q2gYZI-uEmAKTz;rcknaQ@V7enKRNi91V7Zl=L&w? z6ZXDi)9{x#_$wUz0>Pi-;9vV2@QWRM|1|t}9Q+v$zNg^Nb?^@e{x%1nlZM~l!5{A6 zw=Dra{!$0O_Qha)UY=v)^F9aO_e{0FpM&=Vf2xC@orb^9!9VHXZ&Uj(xbNFr z@Vy-T#5DXb9sG3;zI`e1D;)fWxxlY^+{P!GhCkiGpYPzq(j?yI`tyw7|K#8c)9{Bm z_^&(oH>FvtbMQ9^{t5@*x+-|z_a1ihp9Al^wHf$7yZ($2{231ZsWki(4t}wNUnls# zIQYJTKit7jPs3m9;BRyAJv2TAu0K0jDy{Fc8wKmJG7W#8gTK_lf28&gbMVgz{z(U4 zoQ6N%!Jq2jUlIJ_4*pKTU+3UERtE38^&vO^9sC5rzwP>enc&ZN@XcxXMGk%q2gYZl z;MY6&fr9_KgP)m(ztzG2$-&12|E`1275uhGcLnlfhW>6g97pZg5cxs{tyAVCtI?mWowL}{;ozVUOeI8KJ zM5}k`9`f}jc1GD~4??pDZRgJ)Y33N>xLi3R*}kjM(#1F;jpjDruVXVc;rer_ z&^CSWOxv;g5n5vp{CN~gMf4f_>6Pe7wUebxJ5ELI066cx{h~y{)E8LAdAb{$AwIA@ zsNl|(vC{LR;rdU-fs8&4w-xyjjXteXg)#~wROnBkDw@bHs=azlR`ven$?T%`T>VOf zIxv)#^=AaCJvG{mP=lGNbBvAlin0eI7qEx>`FZi=MR>QK9f>7>RUAv6QKIH^1}Bdn zlB|B0c(oW_FfbbE1V13Ieb5SeNZjTKHbks#4|(B1YmHDNfS}ilCz@lmt*ULLmhQ4M zPOP>~6^f|!M2zQoIDN#GCgU{RQhykwUTvPHXw%c!zN?3hks;jP#HTzPS;u9s#Te(M z4r;nTJB#ghiNHa;uNF*DGH)vX92YwYRGq^5wFWIn43DQbChVL{N1gWPVRNzg?zr z#cPat7hf#r3)1Mz11t|7a({~PN!4LiHB_ZQY1*ubfSZOit7u_$A0?cK zIV)6n&lDctet3iL3o%`tLEa``(mBjn`x9&1}s?Lv8A_U3p3c><{pM9J%YEx(l&*KrB|o$I)S|IT$>!hh$wg?Hn>!(AM77YE(Nq4%{hRDeqP zbYC1zCG%u#R?Wd}zht7g@sc5&SIryThaDhyN4JshNrJY+TV}r61Lx`})|{k9;krFdC#W zLp}DPef&4PKD%R~9VFSq;p}gE+C;*b@RM5WCQhi5!mOgfMI+tB302aYRW!I@q?^n|>^7;-DjHlY(oLKIB`sP-g9}Bv$(+P) zlQONM!9^n7WL{#oNvBrP-~y3uGB>f?sL5`ycfLt#wrJ_g;rfvv}N+>bc$+)Ht4n!B9l+J^u& zHx2cqgKBnAYXtSYpl~x3cv(+<2v#bGGuKYAM zwY%`>b*fe37Qp6E=^4tf=HqK+n8A621OsQ>}j6-`m694#*a+Tm;I>aaH+_pRXn5fwKJ|Bvu)7XBYG>}KKr5xsBbQhr4Pm%#rGTmt_$a0&e1fDaG& z|7GF-PlW%+3IA)qn*jf_MKJikqR$)Jn`+yON{J@?k5g`rS{wEhBwn3w$!zaz5$V#rYhAqqjjAByyGzTE-F zDjGF=$qhyj+PyyOw5vJ;^c;qnJC%@eibhp2yRGmlGtFWm*Q^{10>e@34i(BQTD2Q} zkpJ+AAY4^$#Lj`3Dn}pWM@$tYfhtEI8GjT`X@KTehWLyMpfpZN6QcC45}3}2Q(&BqMAC_BvHQrtNv*2fG& z32tGV0+r!NXc3R4FhhkZG443uSLEX|%j_bvqx@x~wYz+OS-FqHM)}UN8yQ&37}{h> z$b1J`V}g9+d5eY~)B^%3H@8HQ#p&SUmm|H`*tq0otc*yyw?KOsFRS#SvL^lfJTlTiH2C~o$Im!VOk#QZ%%jo@p-`6r0>URY-mJJoPK@*8?F`JKrnJQ2rzA)O<9P!_IVtm#3k5!l=ud8lhLkxOuhZ)TAam1}ktc}tnDu3hVqJajk zVC>H9yu#jL=DmvyGMb!NIcZ2 zWb053C`PM-Ii~P^6*!W|wgU48jp#3OFz^u!j$|!}m2Rq8PYgx3Za6>L4v?Wt7kTkM zTVkPYI8Ece@}#_Cg@=^g9n4SK!-7W?J0esROHGzOVHbvZMYL#w;~)@Eou};qUV?%t z7j9^2JT(~|#Y4N|p^uey3l{=*m109Z76nT~J3PriJLwLv{h$#8_s4`j=`NxCNq2zk zsp(}k!uXT!0M}EqpVbKA54@vkzJnNe7rVRTNBd?x^lDtcZ^TMptGTc37~%V5&r$yf znD44PjrqQu1I%|HX$j94i!SDSU>fSX4(dz?H3Sv^+@k~~=KHWTRDpv!&Owz5>S&-~ zzOTO*=DTK=G;qkv8Tz+khST`()kN%(Gns&XoY0is=4L9Q$zK%4lEVuXIDCM4`B$Sd zXmzntgzOcx{TEOQAok<@Mslj?zUuO>L^Mun&?{;o+gYb9GOR2A}{n7`ErDY$v}!g z^fFNiQ35FXYzzFTrY$jWIdZUlf&jLX=JW4VA^~lm!XZ3uHoxHL1KqwAVli@y;A8R? zZ822a^QCgLZ37&hh1N^ox6;WYCjXcw%k*E8e@v4l{ny=-hD-l-clB@&{ny=>4Ig$L zqS2V)zW!?}m!ODKxrF>Pl}pG!Q>Fj%r2i6Y-QOu4YrfQW3v!0^i)>SrSkwo>`eVqP z$c1E|pQ?WvhEy)$h(?AafP3IvZC=&moku75zE1r>8F=M?RSaH1$`4!W3pt99f>OiFqRBmvUI#8(tl^b29 z4pizurNTyNQwJ(7h@@hM;zp>{fl7-ashA=2Ih8t4sY4#g|ESBpURBxC$;+POmA(+J zKTGWva3&jc!xI_}&rf5{gMEH5jpnp3_MJYO zdnL~9hk=rw%|0nRNvtcTbLp|VVmd#enxv5^S!1L_5+b5uhw&_8umzc`(!T6iiCa}s zA_X`oyb&(QRu^8L&93K?V_|)6<6T5;tBDI!{8Y&eak*bxUP@Hs#E>>tnHJU;!4E>i`IUO+hqxd zW&4i7_v<2mk=hQW8&ioLpsRlwC)JL*NfJy>0^kBZ;wmrnYScsuBg zYA%jNDwX}P$yvHutRJ#xuMgy{smNQikWGfWU6$@)!xA!ad!=vHtoL=Dg=pT0MjRz6 z3!Q48%^o?HjLh^#pt{pAp_^lnq&$0IE)Tq+$YI3uwDlS(0!B)r(=F?;?v0-5T8D}50PiI6M1gC4UuQs&E{nB$-@EsrWppmpM(D!yKm3^ zn}fep@aH=C1!?$s4t^B}+Fz;mCp!331%IrAzc~$mql3T4!9OARD;<0x90}<0)6Awe`fz!rO1L#`-h{8&GtH-_7Bgt zo_!!NdhKoha58B*DW4S??+44Xnm^C}5mhG1&|0&9kkxxZI*G+|JXy#9Rut<!VWkLBkX`f@0sXb z$fx;w&0k`P55@kErAK_+nuFSo<71ls1Ge3Sor*r>4Q(eTBp&+AWPln&vvi97LFSFg z$EemDtMrRRfRnhyu(|Q)!nuW*KmIE7M-)=q#r^>j%YG8{H8T5A5E@BD*hL1)U-KS$ zHnp*glkYX<(4CSyML{FiArHU;#rw2Nqs9oy2=ahXuFYc^LF7J;AmYzrKPg@zIxr*y z)ca|DThk7#5tuwi?rr~|s=NJzs_ym=s=C`hsOoP2psKt5gQ{-!kC9#NAF`CVfnWCFvh-+IwDNe~? z`9c<#Ydc(!3|K;@4l}62?m0ULvlRDYo9wn5;6W$;%EH{27>E-zMt={NnIBu|p zGpM?J{b~CJsn3nqp3NA<5+4BObOoLzDD*S)XWB1Z>Rv-bPjMHw`}T`xil%VRP>Ll1 zxuUScYA2o=#~w*gygr)Qej!+CTxoP#BJ6|x2~X|peAX8@Ml61+!16$iLoFK%eIO(< z5n0>J7FaXTM0^5woudY1#fYJLpeVCNACsA%ie}6N^bFlzq4{U1mw{cv>>A-&Aq`ZE2EnG0Tup`v;+{kTBJM;TV zHJsjBmpmdh5BWNK-c)1wcQ8TwH@onQfxbSo@16LDP&#rv32o%W>(^EP7y9{LWIy2( z%(1uN{1B-$Koz~I!G9;7=t%49TYY_fGp3n9A9Gb+(aj6zT`)2t9<1nAZCMV_Nf6n3 zgr3Z9kogHlQvNQIdvZ|MlM4w2JWoem&qfNcvqDe8Q?QnV8%R;lIwwNrg{T~aIu-Tw zX$|TQ=7MHKYGeqoH%ZVU`*dg-SFrCUOp!rdN7Jh# zi`f47s{Apg#-IxP1lWGA&h!>V*9)qbz3L3q7O;-GFuFF$uB+@PuLFG@rG*g^fmK}{3X zD#tUvh-JL>AJbj>`p)m&MSowrq0pGeVkB5!!F@=yc41*R&QUyK+ApTQaq?>f1?LHk z&ZOu8N8W zvODjvZDuPT$my)faWJ`^HMy=Pud^o4)f9Br6u6qg&YD73)4Q{#ceJ*N0L<2K{TOM& z-h>M=^>^p@R5Dxs(*6^0KNi@VXMw$W!)lEms%_diys>G^@Fh(fhOcT`KYX3|9XSAU z@UC@C5NcB~LD#w%-~~00li{*m@zdx-a%|>Z)FVCf@R?m0?VNe|&N9KG$#TuTa`|MH7c=*; zo>+A(kGZ!zvBdHEh0tE8A|xfToUNv7-!irzZRZ|EBU#G(^!PBpJoa!Ytp2rTNhF#a zimTG>X6B8}nrJf0be?JFFo1cz{1j0}hCuui!XVT(tr(x)`ZJOo{yh)~E#oz(De-XUeHm{!?Wl}v880{OYZ=!v-m8%J3YyIB@`>w@ z)QTbf0h~Gl;Z*3HCRwp23B2dS1g7e|Bsgc^7)_hvuIst=Gf;T?qd%@aCB4#5!*%^= zOtdTeQH|~RwZZc%0+5=w+VbpyJx`hyEZA>Nwgp?pfd$*~7Z&VDw_yLsT5WBZwrkHm z=)mX$EdL6}bUT_FYpSPa2@+GN+xBEVt@Ce$Z6 z^C^thc^3-9o?MncZQA`kA0)zn_$by2?Q%9n|T9suq;&_vfXdhB>I8IH-YwssRdIytIx_ z`}mZ-?f2m0mH+weyCX4_BHZ_QCVJYwpzZj61H4xCzH9d zhvXidR1k)5rsB(Vh0lcg=Pd9mR_^5KAb&7))On`GQk7&@Alu@5y$CMt@KTd|>p|?2 z^_@(5rq4k_M6&BjVqhT^%ljRsn;d;ap2Hi4>pnFVyx^p17sf88p)Z@f&aFzChjz4d zY4sy9((xyDoz$qknCqAvAl%O?=V{Zl(DFE&pq6%2Ki{gcX+P_>F9ez%b;>kQ(P%4P2Amf$9L+L3v>v-94p zt}ScF;y9ZB`m33gab>_CMB(hzh~H%?puKZ`PrWLy;15BYC~xDX@6~+Jb|}DZ1%~Th z)mrQFWy|=o_OZevzb`i;XxLsiCO_Htg;I*P*`n9?rEda~oZEi5^leNHrCSL# zD`_kfqNMtM>@j2p;Ul&mf|va~;?7CK$S01Ke#ZBOf-+?KzT+=%Ab+=a>1Jgy@lx83 zz`GP#pz~yF*Ap}romX~rJwY(&yt2FVB==Ik9IIW(=WD5Huumt3_N>&HpWw!*{SQ6M z_Wc`B5z%1JlRxDQt{cuLAwi=hBxvM|f&KigZWg|C<)L7Jf~LU179T#Nu0a1{#qV5iJ$P804yoIfG(cpfj-l?czXlvL@fi zWSo3MMRFuKfGpX5_&V5x@O3A7kO@nz70Zc)c7+&b+;5Y2B=9bu(NGGYAu)5w86pnx z)M(Jq>*a8bC~`X3lNH8oN&62v*OTlf{bSmM)9x}OADbpCH-82)S8k>~woJ-@^L z(%Q87f+fOtV27o4)U^#(tLqyLYv}rCmGkYsE$KT1*^zO^_8+viBd_2uU7K|BN_)-p zc>Ra$nZrCL*8Z<>H37aA((@kA3YvnQiI;v_)7GYZ4N+&A$3Pz{3*WM+Eb=^FH?i7x zT1?f}m{?HzPA~S|jct45tkl3JKA*V|zxs2hQ`^kZbZW=^%(tsQ_0w!2M^&CsKxI?& z`CE1Jz1ujE=y}W~4PZ$7dS0?Go)f#g0b}$z?Mr?|dWpol$R0Qa;^x!0h|hMEk*Ci2 zK{9J)4}c}omzz^o2WDPt_-Z>hVkRalXgu*suy4HlRbOOd^mQE9bKJo3Q;xevz0D5t zXs}Oj9<}+JWB$-DH0tfgyH$*Tu()i6K|FFzpFi}=A2pstt(g_%Z_TXuP4|ja_lo+= z3i`8{{=9%Lqs94ODB&!Gr|uBGQ2Tx-Er}(T#I~)Bu_GD!qT|>*n~A;>orls9m1UJL zh8VMU_uzO!_K}xsK8p5Pi9Xobv9IS8-6MN@{+5|7x->fY3JE1{~v#ngdI6F^u z_-X!UMM6pEb>J`Q%d02c(YDOCH1_$^% z2Y90Zw*qLK;WueDUF_}Cq~FW!H~J!!=5cn=V}<2_0SC(ut_>u~#JMW~J5 z=@swuS~NAz5c_u;I`O$U9Qa6_Hc;>iKih`p!snoJu^3}oG>;mlL^9tef z`Jac+wTA<(h2{&N7kA?G9EZZd1SM2dalFi#4>`@w^u(XhV&$G z`mSE%uWS$|pDlboV)kC#r(HAV)2?Umo^~zGdfHi<^RzQ#JSp{_-IeWh_=gcgpvX81u*aVqpKdTIEbuj%mna^ZKQ)O^{YY5d+6 z;P=LT;P*!1cXoMV()#$_&;ZUf!tXh0{Jt6-k3I5))$cYPxB5elTRH9?_11&Ms~3-Y z>yO2&Ul{c^7<^?K#~0*}dK(PxD#nBKvq9-#@VC-9zTg;3!|f_g2I*&00S3>60E1^j zkgYot;?tnQwXv}Lgw-n-mLIp8?Er=4qc+9C@^6Oga)sq{JJ)+BzTYl<|F!P;9(g){ zzfNpVN-rPVFEDJsy%XCzeg_scBy0~Kke?mW-Pw6y`Y%#&`2C3hzn}Z-02>eeFT=(c za{wD3aR-=e2>2aLwx|Y7Hs|uRK62O(jkCQCG1cd1bH&S!EWhZ-#>t}95Q1ta?`R1s zj=VG$LIj>-xQC!oC*=%IG{#d=xXAfvx_x1GWN$1#JMwzIQ-vGf@fq?X{0sDQ)1XA( zlcjmUc#M^+bV2nLc%+j)B-LWGBbL&}i9Sk2H|$YUi;Z{`4(_6*=zEq9PHd|P58hP~ z9ZMH4Wkq3!czD zf*+jNLdV}N51+XK%o5x4Ld~0Pe*=>E6Y+}Ej?lNL-Hv<_R*A%AwePiHbL-i3vEhw_ zUgs#5@6cR6g$hUh;i7Ek-1(MHPuR(6(~^%JU-&D4qKR#Mur$4Es#!h1>6+C`Iljno z9mjV$emH9Ur^RJkf|YuAzmrFe-_^YWj&5Nm#D1g3!_j3{fDbY&zz3NX;DgKxIJ&MC zO`ooQp=sCZ#bmPg`86dCsXI}sKNpGCn^@(w$=hj;nx^A6mEO4 zoGrl`TJw?D=Vj+*{DBNteKvyTHbq05*)w#xAiMFuKD(D1c;Mn}BC4~p7H600K={lT zE^Ws2&{6YX+vUz;c4abH*jK>FlX_)32Nrf2@qcqCx`q8AYr1vqe@VGjr^jZ-r%dPF zY@r`SE*Pi6>_bVh2Slnw;W0$S{hb>RG4Grh`!w1nRB~LAYfiaUgzKN-9D4+WJUhIgU6n0z}sq)xuYm&$KI**Ez* z;=CqLBg-#3KvpyIy3*7Af}(}G5J=Cv!_X1`p8P50i;>uG7lY;{h#PPb?8wD=3>e&B z6%M}gu#npTNpw_{+co)tEryjM+qR&dMTFSmLI{bgk#R9XPF7h2RYv8A!qTDSqX#Dz z#;UzBd&0w?{cQV)=KMaIQdo z33rN?F+UvOimmB(oz8Xfg_TlqmDxikvUj9d%}Ou%K+zPg$3plED=d>dkMI>$ z;kqAy)YOfMyw^sVzQ;y!gqsDdvzT+KL|q%F-9D%I)kAehv2z)X5YVppEbTQso9l$h zY2Om{(hCuO5cT8n)rE(t#P$$V^g0H8FY$>Si)IS66!A62r*m5|qeJ&QL1!AdPHIK< zOAi2%aGCEWkG&Quh=5h+)VMsO60a*x1All?yu`ZpZ#eodT`S`r!Gl#@(L9J_1|e`q zUp!nlkcZPWOqcT*@fgxpuIMIu3g4}V=r)DYGKhGP;65W=-yn@Qx;|}-q^}eWO@h1U zAuH_uI)A^ERz0^v4Jd6i{#V`r7G$_?lwLdhP`h>&)f2VVbq*hwy3dsGw#Dk6DJr8* zeNYqX-A0Zm2EOaR9}kzGD|!KYd&M0qG$4lmH_vsR-k(MXJY&;3T<2+>Fs+))zZ;2W9VI}BS_bjVnJG0%=BjMM? zHg>VZi{WY7pUZu}uJ9^X{5o4VS4_`#XF2XH$DQT6vs`zU=g#DO%zXv!tiYWWx-ka=6AM{*JqD}md<}a_8!7mb&lvj~bq*9)jQlwJeu#_T|@gKmJf-bQexL?lsSBt<;V7d9eKZLZ4WKr|N$NL^x>-=tIm}K&-RGd5 zbWqC#bqi2%>b-vkLGN|RzVp4Ub!7ZQ`{=y^X%gy$juqZu9oJtwShiRBCrC^Me& zivOIaW$GxiYQptGRNO{Rv#TKJ+-B+A2~kyKKlo3#9nyBaf7`ZltEst08jN^yP{eMT zXHSBNlYh!ra;&P(7ZE2RSWrup)Yy2yA4^-UkcUkjIize}@_!&@fy=8YQ zZV|H8tmB@erv)G6VsWdfL&tMqsQ-F3gE)^tY`YOYEauFr11YK-C8nFV9AE6AOr>H{ zs)RYm(zpIm7Iuppjxv=ST{)^-hsOMEDRbh<@nx~(&!XkY1L3(3QKlNh%m7s?u5=c6 zi5Lhu3wh48W3^}&`qXONg~Ii}R$VIl?;O^UVSRHfHz0MKv)mv5_*-Fikbz6DNVT-R*=ZXT^E4Ra#_@)8v@DI4sq? zxA!|~YwOI>yDaT{Wm_T2O2pdmj4%ZYE!*D;*fV!&uHgX!FSD7V9*#=L()5dOh|X)- zPs3AIk?4T8Rxuh+>?BKWhxOA-^=;BiilZQ;T?C_n)Aj+~_-ARE`kN8UAFxoH0{&(J z>-(SgkCwg~uKR`h03M!V7qB^i`oE2SBcSU>SKZiqA%%6&$x5#jV(Y$=D1h#*#!GEa zjz02q&L%|s-5wQ`RMxhPs=fLLiR}v1{d(d}g|KXHJ#Ex31h{tz>mNYgXIJ+v)e*)N zoFCooJlySk&&8SWA9p){GkwlZks@FkQunBZT#~X1&%0RZ2Ins&E;R~)S!>MIC-KB? zX3WP*{LAogN%)pPPK3$CtF5&(;DtVwjM?6HpP@+BO+5qXBBRAvHS#zuqM_0vjuZQd zI50*LHkZM~%_x)HqSlL$^5An3IT3M4AWVpG&dPif^l^A@WiMDK)0^*&JCR zZ=%Q{DhXEWWk>{!^D_MOVu|NO&owh*eOAX)6RR{mClcaVrTUH3pG15Cq}PuVqf!il z_~oZKw)by{5_O$N(WbNy#S|>0ij)cZW;s@TJvzX3KU{Z!-kFx{Hj9w=d&yIZ)fDC} zE=MS88R>r4Eh?YV+xUUr0(zKpAtpVK4c4(Y^SN@`f0X721YDvPWH;tZYO(d#Ihq(c zXE-b5xY#I1wXaT#)xkWZPS2t$TqhSJhVg@FVvU-#QKfU!tto2BfUY7FW2o3Wd!!~G zuf7SG*DM3QjEj$ z(bb_88DC>D-EDktXyq-&pUJtFem3Yf!YuQx0d(TxrSKxt+1ne zH2m+mo&4`C-6{SzSTT1N%~loC4pq(E=Y<|~TscRztA0NWhy){4=9O-I>FhQ=aXME# zG!U$S5=kUebPfT7Y~!S^mVH3Iic}T!u31H3^)_NiS8&u@=_wjnb%>EYQ8TH^2GdPp z$z@}48`hE@CzlY0u@TsC-LW=|K+~Xshi$q20V2g~XnA*F+=+^ccLhDyXX z@g`~)au{^s*N2h*R39@yGtU%F(THfVFm>ah?Vj5wH%fwXkKVL$HC$IgZ&mDKn+aI|{#S**ZcGf#A`B7>%l~lG;Hjv1I@?;{mlpV? z26tTAh!o%G36Jqgy?HFfrf}zjvPT zh-YztN8DeEo~+|R)J)HwN2H++cTmL+YSlQP8sQ|3wg2Rbpsh_mvbOefP@!J{wL(y0 z?U$#amN}@`IPk=c=;G(D1Pa!^d?c)W-wVtmJw5;TVNX07tvm@dP|=kn4~eFZ9R5R| zJ0SZ-@+QNakx7@VGUt`W;agdWjh<2>%ORJ1&Jm{Pi!Lp)ZRP45Qu=WbRTQN9tTL4O zx@&%%Ot|%5eo%4SB^LLO!1;5R5{^0WIkW`_LYSPeRu|Ai+4)I$EWCX*p>@dmq31wB}dA? za&4@~U52=mmun0*dOha2)5)L{lu*=lcy2E%xubQiUj*H}>_6eKbYx>z)AlBQSX1@d z`wQM#!Z6NiJMxDWVQx5oUhBvo%PlkS(6i_k%Wi(t2O}Hth9?bm(+49O7ry<|+qtW0 zGrsH{#dK0qboPDbd+H8Fk9<2r{DQ{rvZO!bpW{KTm$Us0H+CXy#Rd!{_-^7G6(n?2U`4<0N(}BR@|S` zP=9n#PdKPs@kg2ao}eU5PfbJp%0V?as7kfuu@=Bk>T5aZac%)C6S$GmhiD0hJ8en}=&oV+`XPM>M44-2R7)U40~q z95EA&A6$xr>mP8JAQp9wEAsDRHs7dAkfAbvN-OKQx{xXC>hWd%PJ;LA4shITo{=?q zg^GGy*5oEjuSBo%Bq>HFKkn|rn;#GUG6_nBG#pKqMb(cyDn7VMXHg+IK;=pM8V4+)rbp7c^}rOzX3ei{y00to&Zy;Owk=ga*gnG<}xutLnJ9 zlF1C!4$t^6sn7dt#B45QvDS*7Dc&hOL#e1@sjFdnCl!tL-Opl)Z75gCq})-FSVv4@ zJn!eAhHF~{y1I5I|9J56!DS} zpuPZ}z2>|t9cGn5HAIl31HGSBslnhyHVKB5c$cKoqo z@bu(l;>*YD-eczuRHBLRMa zKcw1B`!imj>CZTi_^ap5pRpPv3NG!@6#A;rJh{8UFVmCU8-s?BBVwg5)wBo5!qB4U zCo9mq_LvYhZ=NM>zqNNPWBBEI#7n9`cw;{wAb5X!Q~Y*DtPq);L5Tpa!0TM2fESN{ zjZ(L3`1JVKC}mtLxZb~1Fr(|0C=ra>d^{mvkJI(Kq%$hG-j^@*>N9Y(Mn#>bC-kpi zde9b)c0NTx%%#g}-e~$HjX4)DoHy~r#_?ZLuuA)nZ2UAgpJFUeSZBZw0u%&3n!!oaXL&x}wRM{Gu`GSAl;k-}jXp9Grh!chg&>iVI*7vFGJM$d zSxr0h^{X;Imy{J8H*7kwa+F&r^G-YS3#Kz3Bm`(1zoeq{^>At>gxIi1zs;mO2fL z!2pnd$)`3J+V^e;&T{3o>)G1tot^ByD*K{QLyf;}i<^ClFpaHLsX0V##pk82=&jFE z2w%8D$aIR}Q|IT#5=w~K5lfws%cW+mYc1ti%6qa(N;a2=j~Vm5=f4+xOup>JoEY_R z_ELx1%zowQhm;1wa@Z?H4h(521|-m=r0GXMy;z+h7D9OzEb^F93i?-(1>iXoPiyJmkEWx)+zZOYF@=M>u@# zM|(}i_mk2Pd$9FcEQ(RW#~dA@gOCAx0@I;IPvRwQTHs2^8);_P8@aV149s4ON~cAm z(hbbyo^YLUxjr&yZ(r%dbf@-f;0>%jZU712;J*%-Cm?3pQ8mGIG@{^kRB8WVm-(k2 zO;^t{NrJMhXw>0-)Ip;HT}VO|gCEd>aH_+|fZt9hB`ARs(9VQhp9~~;OhfelJ^qiD zF8A?2Y5sm~sK0YP?i2ruX@J017xn)I{c!l6DTxSQAPY|qGmURs5!{~h#TSxq8d zzuwH*)#dbU2J}D(Acygo{zd44NXx&V2Qa~!&4is@(VB&arae`Q29>>=jv@vpmIn9( zWHObLOkzOldLfNQ%~R0>(E;p5W)FyrzraX>!`co13BF$D@U`JZ@U_q-tL|-(Fp9Ts z`k;N{>n8!NNW6;1J(^f7{o&YXpEc1`_NiKhT3smO!K7nwPf3yeAEYaPWFMWzQhy;SrPfz+q*lovSr;SU_|3ZC7-zO^ufprT# zvZgzHc=2ZE_FSd}9r#!{Rd`_Lvltnp-;RHA?YGdF^rtX@=G-x*v+*h#S4wt=pcMaP zF)@}=PmK#y^asldHaC1G1OCVKnVsAv9+UX3&`+!@*VKw6VTO5FUqY+}fy%{;CY;Le zV$Gk%INAheOV+8KvxRXHdS;07v_>^#QoX8$G`A2-WglfJBE;YQH~57zy(Z({sFzFg z3zf{g6aQQM!V7NxX`&bh&E$Itl#27iCCXngJoj4T53Wt~2TyX<*n(V1rEZ$6Aw?_R z@z{KJ$NRj&D}Tp$g!Ig_X_h(ja5u@AbhE$Bq)X4YvF#@cDM$koOg1UeqyF?7YM(72 zxBG2${!YxSF%nRKV@{o{{mXACnxYr$oKj|8vsUhPi=%TYRj0Hn1n(u>MKx6=vrC@# zBq@VVUFJj=<3w!oL<#eaTyv{Gd7jFgJm1#j!Cy)9XUdsw{xtUMI)BD?pFc66K`@

-=x@J%wca@0kZ5VgjSD0cg~vP7Oc?cEU`{R~Ocp4OF=9`45FPTyy%&#Q`=_ zf7OteSgvtmCJDK)&Ov?cKQn}!=AZTPO3XOr3+=o#_{#jLD%lQJs{?Mu__G$;RF7<- z38h`8jS{eaZnT&@HH06RLus-Hs#9atDZ7C!#f-~BlTAH{S&}iFBM36DE4`np2^g#G zfcrt*nsBhs{UoWCeLpncRRr?I{|ou|1@S*yk$+PJ$vOro_`ZAdKQDKafBh|mc|#Iq z;o3T(H@RDk_gU5U8(y;8_kmGN?7(~2K{U|*uv^peFS~7ZkudFATK;9*t?t*-@-O>u zWnL?|HIP|v>v{!CH}da`#_v>rCzh)I4X?W?ni{4f`L!=me+n9cdvDCRSHl154E|;T zKI|Z*V~~dZJb#T|2U-1hq3uP9V}WX*G7?jaU!?wQG}yuBI04E)HLN`3KBjcqYx} zio_=h#z7aUK-GLpnc{S?IO_oys9|#^lu1v3NppRB4qABf)K+6~7w*kf&?G}+Dps5yjY&Am`iGD|E>L(O+kFFL5d3zd=D8CoR&!W7|aUu!37_qb{56@1(Z?#r6IT*WC?oZrP7;rciE zA+ZDYpyb&8&L)9p0O^JxvrfkMl4b5fa$J<98yyxM{i_`EmJ27c1YfLu$7~R*qN5kY zM{jO28$`bI=RvBR?~D=o9&3MdWux(KO5-5zhgWN*4N>D+97PPr>AhR*1U00PNFyiV z8Q8A~5Ra`h_Y~4_i7LxJu_B(jpf_y9$MSW_0yb`G16yJ#y6wF8 z5QiLLj-@8uKphxjUHhm`dTIXM?)}7CjH3}k=R5W7c(?d3ivLBGUFBT@Bf>Ui+a9Y< zhPHSE?zAr%uFEog1VkN0%BukbB+ThfS7<;t+Nrx)%AZO#4WZ&i^prrwF8;*&lz3k5 z?^s~O8?EK$_OpF?$!Y1un=`~vfal3qry|Cw2b?w^uamUiXQ_HD?G4G$=s0Va?`mcrl@OC zHYf}~R4snpS7`Ct_9jY(@VwTSbZosQe8uO9p5~@*^0iUdXj(}m`s{Vu2S|7p==eVv zzBFAH40eJpS5>oduNv$H-C(d8bgRK`kWrI+eb)*U$oPw}lY#t2Ao20~J=MpdHo3vn zCg4B%n>#(@4kZD3q2_&kuSyoWV+s7#?psu zAl((MFO@O_;O-sR@AjlA-F~J|dl8$Jb@cAJ3-?phF=6?ZZfwLoR8&sn0Gf z5(|Bz$z%uu78pTi1EYsPg~T}h#sw`sn{+JZJDrPjv7c1HRjq)dNR4NxuDBD`edTMQIvo5kJAT;^NLnAz`dcugkMHQS@Y~8`9e&e7 zDiMO1&7&gwL~lye1Cg&Be@US4>HXi<_sD2!_CR3)JzmIxg*OfSgfo3N{I^g1bUvV< zCDgzu>BE@ge3p*)fjd)1R;en=jDw5!c?km@ti8`gXdfJxxF+?}syHE1N~0^yY&11# zDtc!E+s|pFWv8fi$Yxk4zSN?$MXETwPEJhAEUKu-XMENH$KD$p+ZfsuaAE<1BHCe# zmhPK88~+Sk2edrZDiv;T74_D}2Q10578(#cqsL%`6F1c!v-P4*7q*Nv zs?2#4;hx!G$L7br*AD=Wgu~%>7kRWd0HZ5;rZt}WkuT5S=kFHFS~B%jbs@^rd8$v8 zh5y`kEt}@eJvG=v~$W!h! z;X}#F;8s{CWt!3(%5fmoSvww7^qe6kP?BsdgicJ?j2UpRQ{U60s_+fZ zLs*1SZI%h6f+NM2X;KN7A;D^%Nqeb@Duv=~JAXMqtKs?~)`maxyK<#&qLa~E%?M{3 zujOcn$cnLQaHRn&TCe?S+fMt)Xq|N>ocdNTtsaPrCsOxu=t}1zWQ{hu>C-tdKzponeh4QydSdY{Ep$^R0viy;x7_QG#g?F(okZ<+>=z_H zV!2ul2$rkgl)wC0uI_G025PYDnkOfrtuKwfhL|{N9~ThIqH_iR1opAj!DzctsZ%~l zWdjjmfSgVg5wk`NQ8?Gnzm0l-46KB$_^AU1Xs&lCv_s#TDfl6I?gi$FzMVdsd|CS?BMY@DAsyBsM<5TaO zpp3uJiL0rKBgwjHRLtS4|A3yD{Z%wv#AF?`!VA3{=ttzO&f40+I^!Gp@}lgo0X-=m zoOoGAxwI$@l#V#Fj6;Z8IACG*XF?=JG&z5c3Nt(7vJudlD4;C;Yl2T}+UDq~ zqBs9P$wx+dwo3n#COy(u?StMtU-SOn^ri%U5CYSvr54pvdze88w!&i9YI#pDZEgR$ zzjB}*rs+`IP!Z%#Xd~Ms0AF+sU9GaaqVL|JJzPt9c zKNH72Ec_5I8f>jr)rHGxpesV0CK_lZYPvXnX9qW|GEqa5F2d)v*#~#a7bWmaxZ3eG zf@ePx;9JBWMJd22Nt|T9`TR;3bW_+3-PqRlV8y_N&a(QaB}Vx}hc+@uok*tf_^wE1 zYk`ksO43N?R}Kpu^sFxV8ZRg?38aletG{fl;Qfd6cDdKXXZE{d)$X4pe1qaQZPQ3% z^L(<(m`Hn*A*$5RknnZS?x*?W zbj<9T!Dj-e^TqWs#qN?Hx8DJdu_OxLLs2!0M?{GvgadU5B85qayA)J_lK(Qa`s|7u zBG*)7`}wwB^?JfJ<|Ptrt3#jJOkIy|RfAZ_%b+)U7vv)XBwFRk|~)U}BgtCYe22^VTI>wXs8 zB=C`f3yHx%+Hi%RHNA|3!;fo2%63Ghz(%PO;jB`{^@9)qajS zk{X-Oo3iJM^z~3{Pi9+G&*Az_%#f{aHiRrdmnH%eu{u{pnlV7U#5;WL5io-CSQ{r5 zYDAW9hwfv3h9`1aCjZ{77ECAITJbdv%VTL@??zn`g7SLY>kkNl{M#ly+qTsB#hyR^ zK~H7|?&#nC>5*oByov+n#|v;mnmeYL0F++D4La5>a}qlZy0)s+Jf^QJf_Yp*mzhwq zLTvQsZvpdHVh{6If*$2!5g;ZDv#FXl1Bb|y!{v1#<-@}EL0XjT*+u(qLTeC_p8VO& zuyU8~r^IYrp+=*MD|T~u+B+=UVD5(Nf5$+86@!77QRpeEs_#I(oE~RFnUs5}*xyu6 zE^|k_(Mz!Z99u^-b{_}xES$Pf)S3Rwq@k#Dm(%`Q)I4l8{=_9{cxmhX$8<%CZ-qhr ze{@1i>p1Xb`%Dd#Evi{|FHD&Rq)HPvV(3P}#HD2Tv$Q&%;)t3gIQkt6`SR^aA1~u z1VqmLEwkK=>=&h>e(a!r;h+XPs7h43A_hQD_-(XYs? z@0DRqlWewP9Gj=Fo#PD{iy1!Oug02PXw|y4GCClNCH_qjL~(#qlJspfCbcY4WuG zvegexu=nAPZ|ow3q9rzZLwxk7?Bg5j1D1z$`R$l$rmm=r&Is3tR!xTj#cRHF}A zjY-^Tl{U@qe4Jr>o}Vjc#uAvqtMU$g?@OGFy0Y-dcPV(uSQ+^SqiV6gI+;9!rCsOD zG08KmzQpRUrv4dcI7(hk^iuZCf}3T%$Eyi#U4trSQo+iDV#g9~WDWR+?vffenwp#& zOO4HCONc6Ia)sc$)YVnd)S1jepRrAyMJS`QY|CGvnEQsLyrkVA)Wdexu~gJfh1V#P z;<-`c>r>Vdm;6qPTAkMLKFl!&7KQBS{iL~Vuz`G_XdFjqhvPD&Hk)Y|ZZSK5)5Jb^ z7ClUbUTQXXwq45uojfjnrdjP`&xg$dTQ_no8hSkzdPPc$MqPupljuTksfnQ$Wae6H zm*7Vofzr{?F`%=p4WGXX-RUE40u;H0-T4&Z91YhUCd!y_Pt*|2wk|dyBed!{HVmOw zb-F{3vdn^R6f_ze)fQmxvprB0)!h!fRhQ(5@4uUoNB2E-XR$$URKqO>={kl>+cd5wUdwKvymQNLElv;gD3KJ_AqdVoeo$2`A_Hga6g;aoaW4Qi*wF7O@j9j$@e zXzjSfN7@dK1!yfI+*(4f4EYvM6ul4Iz=eGKCx*& zIW{B&MBZXArpv~ zv-V69w5NT~dEfv0|9?I&A2NIIwV%EA+UxS%*YkMA4IH}`@#FL%o}U*1R~Dc1{LVD$uWwxN^%p+O79O779Q@! zA&d+H;kD&J2akM-5eoo&^XbMdA1G8^ehdCv~KPYL&l7Jr?K7GM2>6>P2^ zKG(8`xO_<4&@SUo+Y)H57oUXh%QzA2so~`5bBzJT$Q4fd=OUP=^M_Jr?m`;3*6Q0; zoqMa*Coi5Cs=CBvm%Uv7D()~lQmZb0I`wE(i#C3=P8(t>^a%^ujgL}`ZUvh^Tr{8V zl>5tHk2l`WEBGkrfy^j&cwMCMHGdyn%>Gig=>j}*08*c717<2Pv=vf$YxY3HD>eII zk-W@2MEG*nnFU92zd}hXzouzpv(rJdrrla*j;dBtQz;PZt}tXZn|^nOZ;ea-N+zEaOeZ+(m2ir%`zU3A(7X^?bhyIm+(GZ#DTLRp%*U{dAf-s;YK z?1p8=jNZE6E-Wb~qo;~<@vCMV^_uz{+m=@>+mU+V!qut8mK)$XX=T@rZGLNZOfv&n zYq>?x>6^dXt0z3P7+ZyYb>3pTCRXs!tUl)zA;?3E6_MyQ_QQT(5t4{tESXI zRgL0*85QxpDl<;mI;J2u*<}`rzZ)ufPBrXVN1T33b^aB7A0I|Y1~7WWx1rznDc(!& zytaF$Rps75c$d7~8?^bX_qqD5VG^vWN7A#hjStkAGb@)zeII%DM1_QjCdjWwKCPec zPz~-tVW%tnz!mC1;dWQp;|g`4kmvNcGW7DeCF+ePcz{OzTxIqP9N>aeXjkAY!fP}E zxC#~Fl|l>jnwxNCit|cYuEMrP%bs)jaZT$H9AEGybWG60Jnt;W+o0;D= zWKjl*7&+)S|6F^ppIaLy+1p3x?XH-KUxNvR zeJtNkSGDdXN1xiapoOez()P(<^bz_aSosJq3Ql;0J_r^)lKP-3ss&zE$@+S$Y69!) z^{OH*I~CJ7Z`}i(S7ggBQns_6lgx4pG@lOH0_I*=EAg5tc&Cn(OF@C3zqMNbg!tB@Ql^@JQ=HQ-%8BoBMn z7cE$0_SzBI42m>U&mV3wdj|((_V&*qjsLFLr17ywBaN2^r1Q$YCjN%UoRsUq9Gi3FO#I8@Jeruu()mdN@wu#yLQbp>)RtG5 za$%2$IpJ|*&d%gn5C!rj4=RHG6+H7}-d({fVPR;QVry}2FI=jY!gm$ei=90Bpvs7N zrHZQ3k$P&U*H!Z5sfRIr zC>#|z!~c4q`3*`5*2>0%{hXy$gnrRAbrCVfFm{u+dT`*nQ)KoA-CuteC(@^>zXI3) zfU88Haqpl&(?a3;Wd5CW(6SS=vl4Y{3Y(eLPI#=3Rg}FSXqsX$xqrp;pgkv&@PA<6 zR2ohmaiv;ToBQX!IK6=;ah@(#Z+wL!oZc{-0!xj|H>^RwZj(v;e|Ee2wejf!-VzRk zdcuhz8h4Wk_*~_`lvR!8H{^L$UZ43+ky zv>;RZ~+ z=I+A;iBDWIbfN=z)uxRBe1wj_no!^Xb1_;{fA4s^=c93Of@Y3FCSt6K4&A301e#1rQY`AOZapxkO{=+2qt*2p zu!FVDdVj$K0Fj9yu;6!gLN*3zlB0=h3ovph4k|mJB|orGaXH+y*{lQRtS1kTwMK6J0w<&}x`hAq$tLqUjwSS?Ps1M%L+$oo!R48jf3-#6C zBN~tcWpH6~0U&@Fo*1kP2?U3J(+aIRWG)bs`eHl=#^}(VaBF9wG+9fiQ7gWaW8D!l zKDtQVE#03YQ-nRLtQ#VWf~ z>_C(>rx8%sEasTW*VwomB<9LuWt#)Zukj&~Gqpn0WAvD&|c6C`@KIiHp=KPB8qdfYDH@bR?xzB!9Z{LWf9d?Q6&`x`9v}_bS zcR->CfzgSxKeWZ}lyUCGI%gMbd)SRl9SFZ&>!r8*^o6ea592TOf3W^(LeJ>6I}|%O z{ak&!lDIP7fd|r5=4MajV>#a_ce{idW`dgQjq98tu16CfR5qmkv(BdlHLiu=ye}q%|{h7`!H)VX%-J=5Zt?NmvHHu3i+NTG+z~gX1&v0j3ay;5R`#Gl|sZ z=$+OMh;{Uw^wd*u7S>8l=czdgf~1DBP^!`^DUI|+N5-oUG&cjuCt!ILJQicpVin9k zARcH5Kkyl4MQOg|YlDn033Grinf7V;hB@#VUA25DfWZvlkq+=g2l!n9&K2O-9pKxa zP1pE$pVhdV12v8kV50!NREkez03UaNs~zC+0&EhX#*kOvk^#KQ0p9BXXJgP`p%L1mwifjZtnjc`z9s&2lZ z&IW4FXVTyMVU~UGVGiokf=UVs^Fa364Ak=us)GZ)GFVUxfWjIxcBuHj>_hmorF25# zFlcA<%p4{l%S~B=&bSax;cW^OE!4#U9XeFEpCQmFw?=RgD?101$G182vhMy&5RKPp zo8n!ZDrs4)Yz$VLCj_0ijH{o5c00IA3~B6vBj|?OH@6TxKkz^|M5>j)lqK(8#|^M+ zB=J}~?Sm#-)O?V&n%j>6o$nVrJI*LQ_)Qtn#+3_s(ZOODLAnHMHLkLT6R?D%-<%s* z6( zEo+Yu`4m(cNGhDa8223ZI_}f5jxX0noQ`nq>ToT)_aw)HtBacn8Q>i0#1k#%L)D(r zz=H4lv$D!wl>lgBV@hKFu2>{_IAWrc{)=Y8lHEa%X1cOmpU6z_2SMn91HH$1E>gl*na zY&Yk`+$SKXQZd)THY+N$05Ct(RrDafmg0HDxt1gM+pVbvJQ;rHm>QTb*aqCa``Q~x zSmNXe?GxI*jW3G$dSZ!$=1SYIXBGH;=Ov?gHPOU%DnV0a4z?Nn2QiF;(Rqv8md{!e zC3Q*bOj4KVKw*n3M4_ib9VqN@g&nR?2dNi$IybJO+;}=f+wa6>_=B2(XUL|01kt?|DoPCHo!wS%Rx7>_zRLB8n_iHc_EfO z9b2%*Fd3awBb=j-5`%0bnG9V=A&95@i~ZN{e>&d-O)ue6-EYpb4joF8By@!AFW!7d zAUb5aACsBx+F_Mh@0t1TkkC|RLD$4b>j!4?&{Y`HCKn>Sz25JKuH^5+Wan3chT$!3 z&~e-C>|whh6&HrAZf}B9pe)Cw`N^M>o0N+Nbu>B}5 z&=es}7fzdDvy4@w?sJ_`4nao}+=Ubnt@&|ISASe{lwWf`h-*!LJwmcOCq?-u=Cue{=AsW#C6R_-{D) zYX$#32mfoqCmsC3g7k;)-)$d$goFRS;4g6S(*%E!gMTgq|Ez<5g9G2Y6DOZ#7drSe z1^)#HzbFHLmxKS4ga4!8FLLll3I4!7``*hk@KYT8EU*5ffS>5#Hy;FkwS)g=27a`I zk2(0&g0FS(4+;Km2Y+M+KG(sY;NV~U4DhQR{9M6b3cNJRnf2W-e*c{9;s(rvD4fGI zaMAaFEhSZ)%?Vcql@Idlr2j8Kfu~RdSlIhH0KoS`io_Nw{({|E6KMW-DYMX2!U(p6 z%f^Kv>w+~mJamVNylC=>(eDo?j=q0wpn0M}M|jH>Fq3sqI+)sMa@eCl#A@?@AqC4& zr}N7*ou71dId#LjN_bGh=`Je>G#{szn4a={8sxYltXHHA>y;iZaeg7hab2|Rd>Zr) zC9yJ+=^?NMnore}Wh(;BpWy_1m#Zt5INJ;Q|0#;)*^Kw}H9=O(i#*L~0jE*{a$3mg zlkT*b(}y|j=hLMA?PI{jz=#KSSpZg$1)x2X1)x^Cjx8bImQWd4fn%>!l)XOdiAbVR zWCfH`C|NTYuH6QU`X@IWFtru&_k{Yk6XByHR*T;)GAe=kCl!rP?axD|R@~@tV1XIA zYgdT4&_A8gl6DJZD0N@Sny9Z`H{k!^H4rJb$CC9mauH^{iE{Wt_vbZz`sNS#N)!%n zROKBJk}eN4#L(yXQkCPT(!B^su-(-y=u7CA13}3_C$06!rzmj z4I4zWeD4Lc-y!vCwf~pcTl~L--^IJP{@t!+#)WvjHCMe=QTB2m837eX;@vTFBi6nZ zu6<2|RQ7bBsTu??Jx*q<$k6$&h9?*)dC8O;rm{?4&amOd+g|+z6ToQTvOv@#r}xMos8cX2~T@S{}RcW{w1*T z`f5dQUSB<2@s>jt=2^w`ufRaA5A+ zW`Y8vgiAiet8TDUBpTAfbD$DwB3aaI3!LY3SUo25mPeDLZ?RIcgF9gZ*F@y+GyonN z7bC`Ic8rPpvISqjiBPodFAef(a`rvG-+17C93xLwV`_h(>1=DZ@ zs-YP_o|6IVPxu5wCG#CaCG#CaCG-6W&3Ch3s#sP`p|0U(za(7|_$2-`Hk!H{7Ycto zs`>q>c8Qc7V16rWGd$Sg!8Fg4^RDqc$8ms%xffrUWv{>kq%~}t0%^cE-!Z@)8~~h& zS#sH{0GfvFu?*BQ2eryUorVc++3SLmhV7ON)J+cRUI*2rm%b(_Y1l5$KwavfW;>{_ zO2d|Ws_~&)m^_`&?R4x(AAK3XpDba{8-Mb7=qf@7NjJjcOU}3gMcF&}+{6;+vHRg1 z6lHI66-k_jHtrk<`&zsA1IYOIQM;|sJ%U9g#*`)o!xb;x#`mA$Cl67B-Ps>bT%+9( zPjPQ_H}`7u=c3Sv4qan1s?!nIQN9jVjAS);FW(|FCKth&Q068@s@T>YYxY?3bbK5%<)y%M#k?*{T7ky`Td zR3J8CD|9yF8EMtt)U;}Eaz$cDTkGu0(MFOtJ(l>Mk}a*Bxa+{EZ)NX%b8uGb!z)*k zzP>eeVET^2i8~JzPwOa*CQrDeYj76Q&g|S3sRNe}yKT$htckn+UVNoXbi&icGuzv? zTsmlF%fwwD7MHhFBxV;^fZN)(%qna3rS?+a-l^^H>V%HOmNZ5Rua?Bo^urkK02w1Z zHb#u4|An^Jy|dZd+8s@P;|5+Es~!DIV-?K|N2aM@IG$frcaBDEPdB>_Oe-2mJR(BN z#}H@YftCC{!yl|qyEn?IgV)|w)Bp%6ts%JFup#gIDi>y|!YwWmIIAVx*g|HfjvW!$ z#_Rve&4^>0*K(Q+eUp?<2T9vR)xLJ7LhNbsRwd=*&^WpXmF6VQVoqlePK+*+4dEuW z%Vl~R4|j*pH4IZ~?@JSk!fjjj9s|AgDP!r0!tUHE##fwFFwlD0Ad{g}>!%>ex=UDHK6fe;j}NRO)#3T#C%;bd1rg76yLg-_Pj5Tp}n26*T$@I7;~TaMA1Z1 z(by6am>vl4c~Ksrjc*O98xu?B9#0+TB~M?bT{sHbhoW=sdC6R^o#z%={yhE9oU1PL z-`FqdF>m`l;;Ya!k!+|E)t z*_2vkj(>o84=sF4nt;$vZ`nnsyTEIYJM2i%9lZ9qkUt+6c*7_tN`(+ z=ghu5^vGfzqK~xbQ29uQ4#OYe_uA0Ix^->zVcoj6@?qV&cKE}X$nbmGQ{NiF8s3OG zG7DVFo4fwR)bXYWn2!aMN-r2@h*r#M6*Nq@s5{5(c5V zVlWb^u3z$wO^YjlWS1d}zcJ=J-^x{2#%mHZ;V$u4o&woCtf2ed=E_BjbqFnL(P7e}4jpnAg&SAkg!Wo}6S7m7E@~P@-;ycS<-t+m#mtV^E0F}VAaTD@J~vNH{aO)US%F=x9jX#j24N^kq@*oPRSKfXSv6_W!G>=( zRZbx}K|xwSiyXNJ)EYw-Zeaw@tv&tsj0boXbXZ!U%%cRwQ?l=rA;W0NhVVBU7^Fbs z!~7~}0a|Vf3AnxIJ#%MjUtZ{LTtZfaNi~XpbnlR9*VzV2@ZHtH z(4w~-fV+_YbLeh!W)3Zq|8wYWb7l@LlK*q)?py~Z|L4%%L3bes=+NEy?m`aGp}Pxo z;a_CuME4LZi|Md0Mm zp+PxzlJ?c8V$LeDqGfPOrRRq-_3=nS>Ab_AQ;@DH;F)mM zw&sm63cndG+Yo48tpW)HWfkz>Pugt+hojxM`|MTc*tf@O{qCbNxyu7=j`z_p zhpq0aYaNec|J9o@^n0&@3+;@Sbq1Of170=DZl8mU5;-FmK|rrYjL=s-k3zJuk0lE< zT?MA)+IUpe-86%Ot|U+B%d7#--?RG0=*ynR#vW)Ms{;G7F?`wAR6QwaMWSBq%%9}T zs@S7l>b<>zrXQJ5a&8dbu69kh>^lt7i-3#Q9TaHZ!KoxKzT>>0ek3HE=g7gSJSnn3 z06)Z<8-#k`$-zQKhfL6Nm=>OmP=_iB(d@n`IFoLdu zB1LOQ-_G|C4(w@+MvXl=)6P8v-~W2nm;ILvMAp}}&8e#*sp*`VdH(;1G`11Ed`mrP z)L$o(a^0eV&yNc{GU5_Tf_gu{_7q6^KV+6F>!{zj*y@D-87xhRYgDYfJ$X*8UmLb_4zomIf= z&I;;BQq@_|lE8w?c}!#X@AmjFO)!C)pw)9^1rK1eb2@_lgzh_10^^(Hf*A=Z;bMVK zm^?;cp3y~4q+~@nISygPUlfz7GC9sf6Pf;E%E`5_``?T{${yx}y@uYytGpa&dQ~$W zicxxY?eN9)&jduEZx5C9;1z~0Fs6{jkRAn)?c*2jfxi4L%f^1GwhwTj*rw2-w@~3B1Y9VBpv(Z) zUfGL7kFo!OeGHFv_Ir%7In$~_OKr14h+77@T@azAwpk(c7@HyV^bS{OyA^b0g&(-W zAXj;z$87gR=&>GGnC}X0_eAKi{jRXU7256z|6{IfxXS#G5&VTYPZsQRX?xf05)sn= z24QB61^n3U$v^kZ42B+*B{Ud)P?peO<%6<>28Tb$`l$+$8k@zW)Yx-Ke@$xaIg_fC z8v7pjAbTv&J(lMl%NzcnWQADS`}MyiK&0p&u073HivHz^{(Sm2k?d(9}~ zw>Ur%KgUv9b`}(|Yokqy>>(MzYzKIZ13ZBBZ`ou4{>1@q+@G%T^;fONcQ{bv`vMFL zaJ~clO9t?NIKVasxJrOO7NF;YH$MaTa|d{*1N^)IBLeh%@M<%FKXic89NlwgN4lv>XeNw>PCcsWDu|#%W1~A|N3mxF952$gR06ibP&X3Yxz4aCQ>OCCz z>U#wE?*jCE@Se;7{>=eC=K$YP8!G`cvFi2=)Gr;>5(l+GP~R1l#H#5Ts4E=QTnBZJ zpso^>#Hv^Z>Kq3(!9jglO}iQ>eDGQ~!Swv`4bu>2)@MKdXU=wQpfUa63-A+-V75T> zF3$Q{-~@3fAAK2Y$4@|tJH&*X@e^2%Kn`b(vt`%&f=<2N7H~*s!V`^gMa9Bnqg)t- z@3{x0(7VD;uh!{&J0)3F*6b$D^a3kiA2gGsw44!etV9@Ion)@9iZ?G*uP94a0!{aSLm9}le$RQDR5!aZ)B&gDuN zMy<4gqrx(A`k)o9k%vo%P8&BL4DZ2#-)+7gi65B+hvM!rBCwh9%Xluy58bSN&M8fP zfLLRyv_Wn$)ybZMUW$mWUlp1#UwwS2y$qFbH2O+x1iN&c5gv>YQFj}7RNu1Lj=0dP zr82{kt!iX?KXQ+5>iu#>fwW*OXgB+3)Q6;_*A@htzGFRUCR6PbFjXdImeU96HL8Ka z)fx9-%63pDl?=j{Hc`+I^Q>gHKd2UU>XGEkulf~1ivj9f{wg&4Nd|zfI{;U;oHL@x zbZ{VikA@tRY%x|DHN>jK=}T3Hlf!f5@unBbJGwzf3q9-kqx)8zn=JYprA!sKsa^(H z-=2}Xqw&q1XY*e4TbGH--GlPzF>`!|hg{I1p5{$cHAwDFA)nt`8!g!k+vP$lOOqtm z#9kPW1F!7If0^gf^rYhP9D8p%+L@RBgQZibHfv7FYp;c)e5BhscEiH;n;%VH;K-*G z!mu&bBZf`=Y!lal^LQ@bN~cM=?Qz1fNK#MJV>Bg_tSmyMKSkE=`9-!XDLJ}?#*U}4 z`o2=AB2t{wqn(Df@Nt)WR1V3S3+ppX%y zA20JSP3@f>I(0v^c2JbfEyaNarQXPu^XOnbx~+5d2KuYO%5l_$3o=t=u~K+8?j#Zx zJFs{8*Z~u`MX5qzPkMxP3MZ~Zh2CnvtK)mvSS(N2((s-)&rSN@p$oH6X|C20J*G79 zSFS=;f8g>H7H~wJQGN2?+L6)X?{|Mz-sf6p?BKNZ7xQhA37@NQAuOokh;KE~4P#~R+C$S% zBu*(22=TC=o=;7-%;Gz{eoI}(KdBLgG_{+*U3PaMKR$8Si^b)8Cr(>kT;3|K-i1Qh zKW@#$qh^)dlsm*SDo*_NtW$3Cf6HKd1Vur~Z~Wh!xQoa73Ua@hdU5^s)avygFsFLz z@6}u=#4fBsFNp69G`-2}vDf#_M}m_^&{7Nro2y?+v+CHhu$|A|8u7i(coB4pi+5wb zH*l>O`Hu9lmyIZrR`ybG3|+BaaTm+ptKS+ad6z(A;dqKYVz80Zd6(IrVbDou+<9Q#K;sPV9yh75x06jjdApWdDl+(ESpYK5T0XMHLIHPk`EwxT0cIb!EA_&d=0!o~x_I)m5OnM!C9DFH_fp)YXsQYurBB zn7vn_FrKs8MLJDf%w)o#nasVLfy4C(RY!;$itvsR6EC)M|&B)=?b2wpr5LE@*ylP8*iPHS*x7PBJq|!KeKk;+ZytPAMHcrIm zx5-!edaPuBD890B6HX`?6C)+hRG{To9bw$f!cb>UF-^Xxi8ZU0DF|43v}858?v7|( z4zWThA5NZdZZyEs%{~hoyIcaRx{oGKlcFav!dwQL0!BJb^+1=50`;~VS{^d}7*bsJ z6mRIhcp&{aH(3`T$AfM;dA)Vw4<;9E5cFudsslba4O2@`)Xe1Y-; zDI0Q&5&{*#701KAz2@(``3J+Di|?L#B2~dUMiW2gOL3{Aah>UKlSVr9NicLPnWRL< z3i!`X)}7{0y>h`@3b%P&TK)#1Ul(a?85EP|^j=_vwd>~WPp!G&9pbw_HEnpR{es;T z=G9$i!v5)VR6IS^{Y~X3S~cPQwkKN+vS2vz(Z7o@b~2_-r%naF>#Fa=Kc zDsq?E(>Dl_#Fay<9#Ec*cv}$j45(^ZR-Q@yt|)V!eTL}r5b+~`xwV+m%ZEY$i2y7bdubN31B`d6j>h-y~YF%>e+*k3elx^N7|F`qL zY=Y#DX=?=14_xC@Jv_cW0&b~yQN7MDFnf0UM?Il47CCj&4b5HJonNdKn0;s*BHkkk zb-cNOQYlgpvr44{QYu{nukuS-!|UqR_|Siux^UmrAz~<85le=yi43JvbB9JF&mpEX z7_=`tfgKE06wfr@G}XW4Q^vDq2ihp{^?1>=|2a!GSv0MBHmz{RuYl#Oa-zK#1ciuD z4*e^0(|g6Lv#R=1A&!NFYct9wK&c~v;;b+>^%N)i2PKomry9S-zxFQmwqN|jX*olq z*-r*av#1Ku%vuz0Rrg!A?vLpJ^xzZ!?MF#a@k$oy)5^}Z!rM!54K1hA%g6XnN^5#f zW&Q)iJLy&Tz*w599_KgyQ=9l=t&7m^c?^%!VV~USm9>}qyaVxetTfBC{HRj#Hu<%Q zvdbQt-Oc||%hDeZ+X6{?rdP@RKUP^<4!6{UOsh*M8_HOkpcI(*hEX=`-GZ{=$C-iR zVnj04Fq`Tsn`)m@{YRR2XP1mizGhO)qY*ck6s0)({I`hfkZ zDuDz3dDz{3T_rG4s@Z?95=fq3D89mKJkXcvbf>+@cH<)g^Rjx$1B|Rx@qCLfE1-BO z$5s#x^zJ`yeAsO9E6EaQ9Ez_gzIK@*;9LBg2-bS_^2dYorADk+)_i`Vg+4+YR;o@X zs!dfE6Idc(*83y^$skGE96|kDM^Gv7+5Z-)V@pVj(2B;KTTYicrMnXaGtiZEp*~q5 z5#8PJBxyKsJ-w^!G;?n0ID2lXG)?ZXMPh}XJpsHh!cn%K%4S?obEUgv6zxmkyZ4If zgp+fr9s<&d>OgzIc_c%14mPvr!ZvN$_(7Ei$_flnbrg9H6ItVRn$Dm&!sQ7uq2_O+qU*v z8=UXL5qicFej-b7hn_QPiM*qaSu$2W)ECP?`eK4Rjap1_r!kAi;-e~0KB`uji|6VE z9qc0xu9bs(2DD;F`?Awo>ZeM)jD;m`hUYZZI8Cd{_i~!AedzWmIjw!E*Lk7a+4Dv% z%y36}89cLiX7SACnawkYCwKiV?98(>&s?6lJiGGjYJG|{l)n(i3MccPn#Alr@hvmE z^LX^C78tu|1^__}@!9Femt44Jcbnpa$TRgiY-B+FD^Rcm&qniVg}2RwgZ%h??B7S8 zL1sN9JY0qi0Ya7;7Bh@LCaW22f6_cVzKpnZ5N2zouVNv$c_NqATqd^eD;X#jL9ymb zZRMOZKTnSuzwHgxZ@u~En1NR~`ulJnaelHKB){L;~RMOox1QQg6Jw*A%? ztu5MHsEQ&5a#q^*h83!W0|}8DwlMB$qNZYL?^aZ7Yg^HnF*R!t%9jTe%3TM8dcDV| z9k1bDM(!{-V?$7C?Yv*)l;q~sKKP4H8NVLxmE?9G=9X;8Y7y%ww&d3tiXNe82EjDR zW}afBvlKl;(YYznmATUC+7#JgJfwx^EmN~f#>lGwLzHr5u%Guf`=Wi)Wv0`^Wz zF}K7#D~a#AF9lWxo2=dT<=J;gEspE#%n$HnQ}<=3_WoDT%m316vYqZUFe@CrLmKmN z^sYtb@mmQ6;pp>{Jn&I>^K$*Hl+Sm)tkBQuy4a)@wf`*dTg{~qgB#=^f872f1;5IM zgG;-4wEK>lar=(+y)yF#)p?}Tz?@?5jz=hvgQA9KhDsaUp*?GBelhzF$u@O|rc`em zw|`b{|IBU+$#H1Iz}`BQQc$@z&+HnL+uw^$kaU7EW& zhHo2MTl3l3roXyQ=M9q>XJpEdQ(F^1`wqdt`WYuE*8~Y3$sin|kxH;h265r$tLhtF zR9}iKn%WqXeSC<1S?oRW6k!!d+Jqu;osuox6!FeD{blV%_s+N8p-Q z9IpR1>f!FEi8Kp~cJEJe3U=e!Ta?U%=3@5#hP*uXy^H?FuUTCezw#Xlc-rPHaSuJ! z)*nSSljFU!KQOuS61m#6%oWt<#>mvIIn#S4Q};xsx1ueoI%=zOH{IrT+Z3S3^4LzAVqNcR!h+Jx?SAV+og=}p~^yYoe5 z_OH5E)lQHd$OM^rq}Cf!k%XCwd&ZATRGdL$Y<;K*Th&*l47yw(+JD5!p)~)?-;1dl zLk$_tz4-@yUX*XmHeNMf>=8#0!q8;P$ndX(`I#3QP!2{n2C&Y1=$N3Kdzk^{4a)^e zPS++udBp>gg8#QrO!Vd7!)^+@Z@{jxE(Lbo=-W3Bwr_t6u+{#q1=xObUnU zV*3IF3?R0hPk&Y)1F_l98i-wY|1lx9beMtI2Q`9NDoa=>+WZaEu}z$*2FH3*SrTGb z{N_Iau^)8;Vqe|e0%C6&luJPDwEzJFh&{!pe^qY-vB)z9V!i)(Oo-i4Y9MxhwIG&C z`L8w<`J1Kuf7h;lw9nDkQXqB)-TLM!f43YUwsLU`h}~;YE&;Ji?jOLrhu{)7JfY_S|fmo)QqzQ-}3=lAYn9ryG z&iMvnl}{LmP5Rw2A@-_cAa;Z`T=CNElv(*3Ly^B(I(7;xW+3)MDoaA_>V^Lah+T0! z5NkuxDG+DB@;3{y z&siY>u?#9pLhPKo{u2;8<2WF;I@$ta4;qw9Kx}b_;q+V+5JGDW9=s|ZwXW|ZGIUr9!Jy7|n!lYFSg#wOM6QnfEpVFRFYGOypU z8!c}%bE*=$nPc?eLJ2+UGe`2!-}21M^yYZ|Cp;?1cNqj(^rr~lqy57~`J))g*uAEx z{AX(f^LWBn*)9x zzbQT-HQ>tln0T+$04F{)o|hVMLHxov3ZdpQ&WslXpLraJEJr6~W?rajs33#NCzyDf z1v$1?YBT}g)JLHIc`x?emM|=TC%z%2$WUch;{D$V+9%qS=0LhhVrTh1u@C8NO2GWs zTP*@YmA9}95tDURbE;6~)e8J`t3XGAY`M2c`2zw2AlvR$epewsPc1E^kkP3jNAPbE zADkKzR|tw`;1`s$Ss~v`4XIZM3b^Jp&k^F^^Gm_Z!ZTF$I6*P}G^ayrR+dPkeXSAf zYlW&fHr*7{PpUq9X|?*-)@(wd@l0#6AN&Y$c89f7d#8Q~+CtKL1GrSAjpJfGAF5m< zj)-CDT*Y8(IV0jtX{>83;5@E2WJeIp&P<`>-N3yhdqR~cpV2O}rDqO$#?M=16_Vb> ztVq9!Iibq;Nnh$-jzQYh9M~z!%Ha%Fj>&mDIg7n^QRhg-q4d!FX^4BJ-pOI_+AQz* zLEPiy9*B3N{eMp%RJo$8(3_p3yKutp-iW)r*j-c6)ULe5o1InBlv(zEBsw_Dq$}TE zI4mP{_v`U@ljq9`n3mCNZT!{3VcDU@ug9M^+d<9y8x@UhG1BkJjgSKv+}(BGUeCqXKAAc*P4teT^`TDf^k zcvzHO@Pu=$_Tj=eIkdeklD-$uR@=;7H@~5hguQnJm9VV1YIbJ$OBQH`)kQpMiLK#W z{ANMA$Z<3B!v%G%8X)V3h`Yh6zE#&uN9j*Ls?iAyyOb4T?_yVM=ihf`X+W^*D0bDDQJL99*xp@tf~8}WMJL9{N1 z;}7PHsoD4Cq@TOde{{Zjb56v)F)Lj2QARlZ(}+8s{dJBp#Xi7!Hf9esA7@0Oy|LEn zi~@R0cU9wfUGr5{z45E|3APIUR^~*6Lq{M6PH{{DQl)QU;`k+|l1;KaC=!A?wkpmz zO+XQ?P3YKvQ5P6kQGKMM5e$?ti0O;>a{gSP5I@uIM~0)**m}22 z@t(yMvq6-*tK**(d&9CS4rP{aRHE{%LU`LT%s;|4JKLI^^5LFH9=iRHq%JMk z87|xssw|;$-3(WAgqyokkjRO4Wl%P=i+xcNmqJcx&}JP}3k}#T|3sB8b#Q;1_+Fa7 zi8e7_oEhUX)m`+6qXwMen0aqR&?fxl35bY@)Z;1=4aVY>b$f6kBj9*>se#fsR7p>cewCq zsIn9;6p?E!K#~(@6O?(}KI~2=V3Xrr#Q-jYe2+%D6D`_89#debGMkL0as_ZRY3ja; z^t&ymycWM#c7E2U&d>T#rM4|)zh3iIhPj9St2R?J%_L;MVd%NS*3w-*xe+Sv!+FsTi%kDNRX?1r5wRA?}kRl zoRK!%Ejc4?s5|KlAP-3L^HT?9uI3OuP7cbR3Nb3}IJuwzcdld7lgmbbMkJl%;|_Be zu*J5(k`mC>X<|}iUu3e5;jBAzZ(^kIv+`jeiU`6M{~i;^7Lql>af8ny`}qrf&Z`yg zN(!l0H}7v(EaPnTXL3%^PwDsgmy$m2Yh1=sL-`-$4}o~}xdrZp7S3h%$6S6eT-owR zgjRFP`-7QUF?9LlA!C;I`s0|A?Lc+IzTRaZq<`@|+7j*m<0%M<`{$bXOZC3PzF(~O zckTPRqC_cIm#I`6XL68uz|yutpSPQ_jo_F;}VSwCa-bCpSlK%gE?GLE?wJS;W% z^7!=B&%yDDsh=0buhhLA)D}zvVh?i(TLOyO^SM?T~(xNxy7?}-0EpmDV&LCIPr~cyYp^7&su$X%NN8rU)6i`5pWgr|SbBWG1KTp;{?pIE9AeBLSCukCFV}IY=f>@@F-7VPJ z{f%j?YlN9tgbd9Y1i*WfD!n`x3p+e<+W~S1@^ZY6+{aHdw4eI7`_-!7#&k@vpK2kG=F%L;Yv&XWM)b7UAxjf4iz(Q{ zoJNJ7ys8K)_&XJZz4JH&pXC(za%_D|q!?8s6dRPziA2vh@OPODH2MBas*cXguvu|y zGQf-cjTpH%o1{{j>QV~g^B`0)e)2)PGVS&=GdNEn$ONeqW5<990?7fnT5IpVbVBw5 zw6zHQN`Y6!r`-de&+O0ZBrEmMdtf7^#cHd6x8Ju(i+9lkOxo{MaTm@Mm~q5AT_@^K zLbQ`TUL7dG;=XEz__Q|^gRuf<{JmdZi=}w=_6~V*WnYbLL05%Q~yPx8s5845cgyXlkCC>J+TR-ABz~(32yUm>yxI zspbp@iImm5Ezj|$c6YqXdX^OI;nr}ubksWObh6hjEI>n5hbSwib=o4wld%Fr zV@xF+uADV%4&%#acU!Do@2XL~-cZ`o!QGGfW6muYVslAo;h|8)JBo{5v_>YJ70dbP z)r5H`eliZO4s+o081|I3vN4jLeaLC5H};g_ipKW0{InU~j@P@uA{_T2R)=g`;JMa`l#{H@aKa*j;aQx~#FM-g@e-uioSh97*tS`J(`nOyaib zl957{`7l{VBvknwaa%j6Pu<2s`gp#J-Mz&XTUpF+j1+K7&x#a$sC{tuTcnmxzlnqza#vAO1uD~t_G zr10(Xzmy7}bfI@_I0K_qu(*Y>?ED7}L&?;=8PjEt!+DF@zkpX~LNVgq_>pn%$r3hB zbSnCfTmov^>w7klDp6jh|8o@vVHpX5xo(mChxzSqH9=R!h7s42|2W*u*vp&vPN^OAoX>GQ;Gd<2-?D^SteMUf0qZ2LqeEJtS@(ESkC-lcYDGLcBE35@G z>u5&4Sl?Q$>r>HCB@>$yyVZ=vF(BU7i8>q>U+Jv*#EH&ma@Oo|Dpt^P+7pRpgoi~l zDk9PBkyyTZVRvo(4A~Qxx?gY%3)Lo|ni0(0Mcr<^)0B_P`Sp(cjf6f}_^>}QU+vCsjx?Om5)_KD* z=5I3`){>5&l$*FPDJSw6!hDZufP%N_ub_H}FHP8gK7!;BF5FOlQZYK2V)uruQm<#J z+e=b-PO1BTxaQNm(!vd)yGj5;ZOyGWw}yXd^k$CC?7D=Ig+Sz)MG-tHK99> zC!-VX@fiJ@hF6vWZsZvtTZ(!h`x#nNTDX4BOR*TEg6H{;v(ADH;PYWj@zutPZ`mS{ zdZe4N*qsR?1F6Mu+e6v2H#?p|@4+|;1LcN&E(9F*6)K?5wJ0js>~Pw&rm2ndVq%tX zjPEd6zdiG2CwhghT)ZR_EjgJMH$wm7=QPhh>cd1iJlGqvIBT#w#sj{c7Z1Y*KJ2cE z6uv&I=kVyL4sC|=lRiA!b)<$VaxdBlz0Np~*Xb`u80>!p_TjzO79FThFYg_R4&hvY zf61lMneC4{Ynnv0yTMvUvM@a{9dsEv*o#zTmAH|Ys1d|*GhUK1qh?Q@JVe~}IS{Hl zbo1Mh=m1nMHQ_a};nB`v)o!OakX}QQVbRWu1+qcjPbiBnlzJzkC0QJCSHg+Xr&nV( z{wV2^{cRyXw5C@+w8oqQIU0dpIhI~It?8A{w6~^LPbg`#1C|9L@f%rtmOB zu=F#US!iwF&Oz=UhDB3~8 za*7QRBQ7i|R+hkjRiOOCM8L?8eh7*M1uJ%kfv2QC0TPyDNSG)bG^{U8NXk#GYxasw z#k$49*6WI-iMAS=hCd$Dfnlay30#gLaH4r` zPHej7Xv>L*i0()nN}Y}IHZA;(Q83crUI$N6z?_pKjk%#H3uB}y#XvPGUW4IjrM)-o zz89|fvQ0DB;>h2oaCB%>I6Cks%|p%7BtCN+fQW1Ue0S1(Lowz9ewt$#MB&CzMGu-J zpbrt64au0c!+bDL&f*@yu-hIrT(LVO3!-4?wJ8s-8umubhi5FLw2b+Vo3T(n`U=08 z#Z?(&I^Yv=QU|V;&bX=$YC9ilOCuYfU>OuwO5?`zPQ?aW27snu zOPf-uz&_>z?L5?(p}XX1of-dDOMD4OuSUAr?5v3~&BIhTYDqw7+ZaNdu>{ft1fb?6 z?xZC&ml>R~g!`#<%h>d46dkcm;D~^-w|O{f$Gvo*8v4{W3`+~UE1)l>(b0f6Zde*s zq1L15&SCVfOG@393H@iO3(5Z)$Sphj9PJ5NMxr-bLQ6}Qz|A(67QQq4C#CNC_*ur@ zn)o^Jj+@dv8Vf&rk`{Mfa;e+-NlGp)+%oGt`g1P)Z05NDxW`BV_(`#|IfCz#2EH3x zz_&WRyh|wpXO4FnBl%;>J1OjP&*a+);DQBr4GMs}C>(Hu(&!{{13ClrQo;Hi1J=$< zhDR@&!&b-ux8ya7Y;&ob@fyBJ#J*mR9oD2{VCBxI;!xY-(65&j=Ph&G&*G<}KLvd! zl(@a;m$);NC|R*A+bMh}R3U{bwG9lAai`u{Y~jBj?Ot1O5kU-(USZh4pvw2ko^FLP zRn@bv5LbS?)U5@@I3FX?2LzE!5&>L>K|E;P^6>4$|IbmQT$yN*_; zPp_q|X0F68rO%WY)0T&h*X`{Xel@{JXPuG4p08WPxzr+N1AJ?tnQlywio@Bn(Y7ZU zH0ul)^zdc|y`KYbaMLgj<8a`xiVWYLd3`tOn$p76v%Vj(^RJ`>G^4ATfA@nPU5kVs_h(FJkT%6%Y^K*3>lYuW zfgw{LgeP_h0P6rlJ`^r&DDS4;43Czi!P>Kn26KvyNyN`EFPr+Z7lvWH9D&tJ=B~ED zMxbZ$#9xwLGk;k7uMDsGBq1G)x<8npmOppBe>k+Q7-jJQcg%d~&Uc2nUFVm&tR@)) z=4W;65zr~YpPKpeh_L$wYlZN4sqXrEgeY(L1 zvz|CCS44j&+$<@M%vJuoR+uHl9$55IzLDWF5dU!-n>Ezyyc;~sSinzbEf7{a-t0y` zvX@TdjO2MMr=0?yjQ`A(&o}%x?0rk*68zIQU(-$wyB9m|j~mTxfT{3tPY5$9r8`$M zlzt_p1?h)1TJ=N|l^FkU z;fnI(OTFw3rEc~rO&Zh^cZpw9m&tg0OZ+R1UWq&&M;;&ZZKlzeRQ;hT`o+*4MP#EH zt4iI?;leNHtTUH+nC#8)Lbe?0c4kh-=-&hM92tUob)a$z?-xf5M!FWM*Z}{s4;9Xym(LAjEQH zmVJ?eeRiF|2G*fYGbglO8APUL&>)r$F>5%VPv;9t+2H}|?lyg6Bs!Y|9thFHW;YK9N{H|&pm znX*4}_RqCH6828h{!2$RDV&NczBYCe!R9UH@E`Q~|6cye8;)82GX($3@^NSzosNEN z4jZ{mW#37eUlH#Te+F4mqoVHO{4(1-F#h1KlRsW>y=^cUo!Tj?Vp&gz-X3TBbV+la ziTU<_@6X|h$L!BP3H}%TvFFQ>oNN64SX>_Q*Z?o6yEJ;y zbIiju|7l8;9~Wc;+N0 ziFC<-yqtZfORqyGmgHa3ml=f^aL)8kqY#0Ok9JqZa@3_3!(U{^pVM+9Y_=&%ndx8Q zQn&X^9^^6Nz0_S)lHJp)0=*}bO^w~a(y6ady#Ba_X@-SJ7Xh+< z;EW)2gyEbiI<>`*ITHb#GDLU{hW&L7H3=xPx20n0_#yk!@7Op61aYgAaZM9U-xUzVRVU;AY2y?S z#66XatG96q2;v?{#yw`^6cEHMO2#d;aS90H<|gB&+BgLSaZ{3USJ*fO1aV`Nas6zZ z0)jXv8Mg%_hs5+u2CT9HImv)mY=AtxT2OYaG+Fj01K!gNsjpCg{_GVl7{dkrIY#)$ zjb&r?XV(V)UJSl_-5mDNKQh{g5Ja(LL{u z0W0#s>~b zcltMwH{Mnou+iJ5`29FjsSYpxUd#S{_5;4_p)QKXKR;+{{C(83EK-a>3_u_?V#`pJ z-gD=S#+_lwh8Fa|nb)q!FpU{b1g!wt-5kedNm4zQJmuS#asN(YFu3Ic*<@;#-<`pd zTorqb!Dz`)?f#Lx4Rm+kVriEkUF>P2lyba_{Wy7gt*F`+2@N{bYlXJ>ep6GkEu@VH z%I;-vD&wd*YHj~oa1#oLW)4pC82!;6ZC9lW7ak0U1~6dt_7r#~FsrIAKclE}S6LTW zh0<{90oJGbh`ETtXMavd>%|4$x=mc5uRm*y;R1uNrbiqq#GW2tS}rgEjG?&lr{lF` zxQ`56S?fn^hAuY4`D751iET7j5Nl6XdxP24N{6{#7n|)v9@Mk+ZrLJWkZNmM`ax4u z(Q~MW$`rE2-$eFa7zFm2Y%1-NqX*$ROeJIUqMTTHh+CF-R2xqghJ2T3zXxL@AnXX9z6|zI}G}VQJgreQwmp>_Yjlm;IE`T49IE5 zE^-emdiPyGH!&<^-B%J~JstgLj(Xik9{09>HLTQ~ERI zKaemvnRkHg!Nmb=nRPe=g`HpdeoOG)3a%LN=KMlq+U4n6EqKLG<3nx5|9zFI7)=00 zPEQm`VcD$;>yapInM(1xOb7~_KsZeF(qG9#kpE+lu=U85b@b zX;&(HxAL%fe8jszba!y6HyZ-nBvJ7ad`D;b=KvWIoMjXdt{-g3UN|~C%in*h7TZ5G zP}-)jslq54nd9rW9uv=E-{I!GcYYoXo=@v|2d zEmDo$d9m_k+0s_BEBnrX z(DRk2Z{>BE_wwm`d1|#bKK2~gpAr(P)c!&7+&O*I%GzVlG~~)S1JPvQ9roJu`f46| zn@03x-*3PITjGG_m8Ywgbk*CUuqzUUEhx!vSJrJ%==nKU4yZ3VJ@-tKN4y(+8hDPf zrj<{P_f?g(heEfCZ2DQwx*+agrJq|8&}dJbSryXO|ZYzGQ$6{t`+=eJ#&j6KQ&{ z19(J%8Wj-?<-*jtpP&C@k{G`9J5#<4*7_MgD5uy|9Uy9XAiQBW*OZGmV0YJN%w-}^ zU52@EnrP;etQ%e!O9Am;Sh{oW6{>le|MBI9FFm1JqJ4fvGGkGMJ3rmPJb$C5Xu1@8 zi~lGXZRlB%sin)dGE2|45f10tu1{0<){&;}wF^w$BUSg0say03VEDz@@h|(4WtIAU}ISc_6(u<=h1q9iy1A9?aEQ3-URQa01QV^ zS5WI#sVl+-UzBw>9s_Zw|Av18a7LG{OY4T}a6nt6M~o%M=P-pdgkvz;gyw_(6WGj6 z-xYBSbVviuS5sM$+GW19rc3C1aA}hz?NBLW?Bs;qQZ(G$hg*;D9R5%%=OW9YA!{sO zuMaKF3sq1eKY4RPwg4Q?Q@|j6JIOUqaC}CY*n1@7!SZ*kh`&NeDZ3cQMSF9jR5Pd^ zGiuadD+DEH865Dq5|)vF83pp}yG)(0_rJ}DdYR?Vp(Ot>10s}fVv`p(u-A`q1?s$`#h(pjrp%8D(;C6P$?VUe$th3bO zAx7ukNe(AkbaVs1wDlXvPVvDdKT(}%G+v}vqK(oElCRA<% zprE=jN7wv}cxd0flVY-()fHm}uj}dy4#kwZJ4*`=PAxGLF6a=JC_3 zy40-?7i>f??5KR`K-o!*nj?GP!+xo0BY~z0noB!n8r+9)b(d@JRJ6JgE;v{Qe(Y4X zZxKxtmqTdjv&X-~pnzzMY=waYy$q)>kGNBXL8%5bG`hK+Ww>{zXPl_`q-C<$bDD|K z{+nlkY=t8cY7+Qu^(P5&CaLiH8EHhejtD2Ma6*Zw;IH`?`a#Pd&_W}gG?j-T_Sn<* z3^Ok?{iPJ5c{DhWxi>IRS0+Y>p{K1W>&q%GUYM$X5tCj(df^C`fd63MK8$KR935hG zUt9efiHu_(1XXH}{!@xEhp`Mtr(q5;7=Y>C@E>ExeOa#f)9DdeTzf@E;_h%5!=S*! z?ZG(5!VYqsQpQHjcTMWS-4WQbKq|^Ufu^=&sQN~{@kMIWx7TIl1R2b}SbBqxFmP5#4trm$QVUCHwd#$MidCxb9u!eO+x zDW0i3cTP@P`Gr9FwbKajyZlI0x}I|}?V7>|w}iio5Cg{I`MKcew3D2++F8|?S%3aI)?xx|UoZ^a$xDYFP#+jF-rd_PLSMq%i!el_8q>2#*(bQ!{zCG1C5fRCL0@XofyjU?;|2uTY@`V{FyY=2n#2f7It}dk`)%7 zB|KP3(0?5sXuYM zv%8(s#DW)_pY40mqK@R;PNtK7sP53@jt6g)pTx;sn(VVr`$^`ze@M?;Cug2)pB=lI z@IBRpbvfOJoq7(>u)Df$+n)F8Ht+eUZq1%uwZ-4w^HFVaC%tvpbD*|3eNS?~d)$Mw z(kK8=kK-OJPusJCx7FhwyeVzZTHZE|d+=vzl=Ic#t>YfNI*ro48tjjI@ba`hyLj6> z?oUU0Eb7{R+=HcQdk*k%?xOsVN&BZG=Pl}SvWf29ZQO%{()Ki&@a;zm7L|lL@ZK

joVk3w{F+CUVkW9w{={v-<-WJHm+CnoOS!h^{VQzjuTJyX{e~e z71et~RXR6ZThnFTo3%BW>o(NwT8G2TS%oXp>-Mc%S$Am4n%#P-tvNLNt-6obRo8vM z|Mlytm8K@W?$Ej#(lqSWi_%nMxm9*n!k?_;{g>l-w~h#>@2!}Vf&OdI{&4yi?5LyP zswv+#Zp{;Ex9R>M(8}Ue*;F&5q{K@zld7&1X6E-s5zhVd&5Zn{UD~+d`n52Ar&%H! zz0)jKz~xzf`-rB+9{>JQW+iIOJg1@`$hrm6>r~v9#p^n;cBaA`mj4}G%Hi|43*X81ycU!3oxO|4}}u+`V{RkA*S7Ki0I&CVyOUZgTrkH z7lqxyqvPFV&%gm)Z(0#6j9zR1Kla`QKC0?!_?|!lg9IihXi(5d2T2e#QB+byGmyX; zoggaJU=c7NV#R_m0WSd(l31q0D7Cir)>f@=y*zEb)C-_u5@r&>3l|l+hy+kL!+<1Q z6K?tbYo9Y`CJAcW=lR~geZP+%nVGZpK5O6BUVE*z*Irv*KaV!cR#df=$*eP*eSC}6pp8DoijN$6k~x|GRdH5iR*|-( z_P$>7ANvww94i8v4zh*=_gty_y`TNKx+Ek@pM}sA%y1zX_1X2o0k~%GFN;p<~_T4TJw~g{#s9;SrSGbMSFY~ zgNS=X(DO|FgQc0Xw@3{kWD2E?#LJQhh;KiLfS5`NQTgyEn(rH9&%AW+Myldw0m1s^ ztH^}vJifqSzBsuzh%kF)xEF_`jG|}xnERR={X8(t zp_SFuyE71O4{3pl6!L1mytP^&kM%fjrM4uEQZxDEsS;?Vs2E&UjSC#N`ZcfjOv3%2 zrih?Cw@w|X>!}5b>L3@+)7>r{2DQ_l58!%{O})KvL{H$PwmMEH;p&l?8HHndDb40? zQ8J3LLY=M?AuVvHuIzGh=CqamJ{T3Ch2pMR`n)D8yoU*+woT;V4vKds(h8cq#ITls zZ8UkP22K8Q<6nC%`zYjfN@BY^hB3tSI??w%87s7^<3holw5DyS^InXwi6`kIA-fX& zZrXu#7ilm4et}+3=LZIS$&i1El#%$QxaO-944P8N(VaQmoiekRlKJ4t^jQYnS)8bq z4l5e7P<9`(B@`tqVsgCQi7SEWa)eHf5y-Lgyphs(Lg2VzyD39m2MmCdH*`VnmudO9 zzCyC;qN*3+jVUxaG9{8PyDjnj+m~Oj+_IIz-bnF{nlGmwg&0E5{BI!1s*L1E>DwI`lSGHiOxL(| z(l-!)+7zkBo&sgvap_ zwNS^Jru-9Q^7Uv>;4XXZNHu)XU?(2mUD&61cvj{4Xl|K5rM?cG5V1kmefFu{ak`9gfj_10FBHj;!~-&;8e>Wv$O%>G*v$ z#Pm6{XMIsPd*$5jo+0<7V$WbXJ4z?|HF*JV!Qba+@Aang+chLlJP$&Vu5t4L-Q)w| ze)_L@NA%E+5L6EN-ag5V4`SEwZP$?r700xrOzxmI7aJ)T;WzXa)LTj|qUr5kGl#KQ)RY!4vo!0NW?UsDD4 zM$kQv>PsL~R8yF5QJrF?aW(`9kpp_yqM@@{QR}?`ZKef3(8{tgVa5h zcnTN`R_Hg-?(ZIdHMkge8C+|twMmHtae@GBx~!bm#X?z#vwlWUSv!5J@xtRD!Yv^Df)Y)VmxVneQ0n;{bA(ujrc-3 zX1e4)5SCD-3nqSAxc+2%aZT0{&f}e$pLmdu+=FTAGe><+l;AsTXR@qv@k99qs%go` z*&p!uGGT)Z&dQ=g;f|^?825+_G@ak^O>Cr2;ZXotm0EPc0*R%>&LyX9>vMg_%62$l zhV|S5kX~Gc5VUt9YHn&7M6f>p)cM)V3brYBwfQ+dKxceSf_ga>ls#`<}q-GPTo z_=(ar8k1asfTN4lh>2vJA?_&ofxkU#C4ldK!TRt*Kqx+6-WFASUYv#cBC!Ib^KN%B zx@_$UMzP}tR>_5LBZRHd!@79ZTcZsBWRRQnC)v{x7Wg`IUis&m{*#`-d^Y9hq|2&0 z6>}Y;(amY5_v7bRw^&w53k=u9A0SD8@EReo%aui%mHJj*_dX=$n+#q&3vVHhVP6t{ ztMO#6r%$w5PcJ4hvdOalimoi19v7kun*vHFa>!~Oz?w=8w(5-xRfHR7E|;2D#FdLb zU2MJ8q6z}`dUlEDr|6ZNjyEH+dgM68IxJT1KptyDxw_3(6d!dd6^ec2{Mh`~{Ki$A z%{5%Gh53DrQV zkEceO|J`Q=4!%s5P<;>Q$cez@=S)YivR5PK zuOqY%V*WZ7EsgkVD|y*bKA%rS`rq(H{Ch}5Ss-)eTF7UdB><&2Uz6{Q6_JUG-#H!* z@eCLG8^@u|;6zKzoF#mRWjTqHFQ~!$Kxikrs6Oh)$2ifI_@3Y`0UCkO-c%(J;^F$h z)qjp=*SK@Mk_^0aF7IH(kq(oph)Rl*%$Ck({n~3qB{d^Dy)i_l5?w2uCGWz0mP88@ zn6#Od;bDOpvl7}MnoisB z_}a0!S~Ss}gcaKsmUB8W`X?{AZvi*EZJ4u^z5u%gL0*Vq^{?8jC0_(^q8Azqp7AD2gn4ZKkp#;u6flGe@6tB{hQL zU?(}UDOq-$-Zpm~yv`i@J4hLl%*!e=nTOhA31zW7o=|kI05c6WyVZSe$mmjUIM-%< z?NvsPQiM{p89GDleowJjHMIJFi50o(RNN*BM{^t8&A0|+DpRPIye((_;BFH-c7cJ?UE=qImVbtco^_K-3s5ka0i9&CIjEW zn+$vhfk&nxeeAo7O%2BAGR8=sJBStshJFA{zmRf_5^I6NYAIv}J4Soi6;W-Vw1KTk z%a4}!HO0vNL1t+cQrgJKB%#Q=Baq1o{KF)QYHH**f&C=X-wYp$F`5zZ=IW18pU6;2 zWFoJKTrMA%$Ol3zyi4-24yK8gl%(HDW^rsxia@kT9Y^(9)Lv>iTIV_g3dDbwI;i+2 zEXL;2eu>bfQranWo1FRR1ti`@HEumu@lDv2y4DT*zrlNBM7=exf(G z-h@*QDy}6;YH|#(!~$irJX8|g*@w}bzI7*5qB(tQ>{$wh3SCN|M@(9FFKRj-3Ux^= ze~|a*`s30(-yU&G@%WrhiFY9$m0AKC?|$ro43|EU+L#RPuqPWVj`B~WFJ#L=*t$J( z=3<$=tSKV%ILg)1X&4$2`n%=_rm(f}5fO@fsAqmuD!e^f#ZwZa5*j$k;5& zKLoJs6POU`Zh?7$g|Fp7D$ju~NBO4|DG~c3z2oI`Bz)CA6~ggNX}#4xM?Pn)P+Ozz z>!6xZ|MzkYuh)qCjJ^G!E?xbtJ6BxX8?CyZsRu`WokxbWfJh zO|2fx6j9_zQCH26`xa8fB#1MjEM7gqZ_?Ka{TZ1dOrAsPyH+n=+tMDQ>}RHXFe{mu zJUoHvEUA}z{MUH=Q@}V`wr6yA*@>2+-PS9Nn4*#_%J9kxnzHZkbe#RIRkCM`RdUui{g%swq1B2>7(MR<0_j8P z{AD8Ry@H?{6rCG6}Hw2l&g?rRNFltk$S{H9IIi)T-Q>T^D@vW0VK?46m#+Qeq4K9M4`6e+Fx-z;= z;_Ab}F@~?bdi146TyD!H2xrXk2wKWvRUM`K9)}>!*K3_1^$lurH*?@l3}%KURa|GJ z#SaWn1TOkN`>8g4tPnMZ_dA$m!aA0!?^@mU5%K9Peqrzmt?X2SqkOsQROnMA`>P!l z4Lpwu3{a**u@Ap!&^Uz|fhFIeL{4mQC-U$VvSPMon^On|H%VM0E92sesXibMZ6dvc zhZ@w#>Gie3OYh}$31_5%RC_udP>xV%X03N0JtCRuzoK9yWy<2L^{yD?4Z_@LoGHu7h2?xHrCsy3}^Rxw4EDWr_R@Jz#m@yrD zKJd^)b|WUrVp52DIT|ZYfoa8Y(30pfk19o4psa*qjb3?UnyoG zP}6?%hc@<`dq|A4-za(Q=F#FOENc<_5Ug+yO;rXf{ev;mvn&;U-0d>Bc$kyNM4|AW z&u2SFg-@zRW|x0<8h1QviTe4sft-01h3Wfn$7JDX54bTQnni{x{> zJ*z;!m2zCZ={R=cPtiPlqG+vZ(lBL|fA_qRQR`Z9U*%Td!`_X%*OEKf^M41iO@$dP zuyWmEVirS+1|aN}`{^ zAbX&09NlmY(0s|x$bi|?al*u` zY#6J)g&Y$40>h6S_7?40@5ubOw(=kBFO%ou`~)0KXW^7;L`ye$k{Lz~73RckNIT;} z7L(;Luv_XO9;^@|K_tBaY7;imS~4 z+F_CK#7iS{9)g{Wt4LJ?n7*R|lMk~u5=>=`MfFZGyh48^lz_iD#6myiA^{Uf2AwZN zGl>2f8xd5QiJ}3g%Zi@HI2_fY;}CvYHd7cRvCE{0wIV;`knCmA@NW<{+sz#8;)*yd zWaj0^>ibI!LR^NaGvKWl#;Hfu)mKhRegL$17<>(m5Ej#zhKEmA^`UQItJ?;0K0LS+|0wPcVF^`W?N6v-!Fxse8}Apy8~72lpXzwTCA&$q zk~h7dAnxAAVEzno=xbp{X>xl=u8mM}m!oYYYQEd-?&hF2iO4b^IRYKjt6fJp-X3(J ztE`6G6(U8S6ZK2VfSqukMwFcx7kbb{k1vAX4LKO=H>bVE zu?wuq(IXWX&}F(b`!2dm8M5^nr|Pae-=IG-V9MmI`b&8t?a@2J3EY8Rf0UYw4OR&s zGkHg~+~fPPGzX)~s~+Fa6swSa5wl=g_#8$HB`+TH{69zD02DVijz=fb0ji@}r{j*6 z;vhZrfo?1$J^BbQBFv_;JV9S;ps)HL36S%*{bK%MtS_?JgYi;)TRxE-a=w8lj7uNz z7_Z!^^JH-sa1ZA88e^l}_jwsOl~1Qj2{PKG8V+VF76=bz_kYso68Dm$9_vy(?I#m+ z7=v%RXn4?%ljC=W!rfb@AVT{tCg2@ZQwEd;O@h;USOmULwuPBGdqiza8Bx%2=6E%xKD0X zn)0KCQl$V&!ECXDA8eIu(-Vc&z|J{}BuaFJRHmgLkg_7iWhqocv`xQ`Ss%wg{0N@( z8+-d1)e?ElvFHy>)X=ecE?o=Ur&AGkz5m7&tci`9|3<>85+@K1QI193s1vN4d3Ix? zgwG}-S5;H{7AKn5p}u4}7L^Kn30$YbxzRaiuxg(YEo6Z52Ux{ve=E7=U0JB%o~HsUIFC#y%|vGp>7ovyV@IH_?P(ONIrJm+;j zKP~#(gMT6J>Dp0&UK{iLXXXt#YyOjwr?OTZsZJ>3-1AJrynTBuI}(=>=IxklLSHDM z6FI6TAsYv|Irv=9s*}5xCZ=?!t=+n>#jSVVouQ_A)uIc?zZn0e^7;N7d4bA{6|bAT zuo87uRJYle(1$R(diOe=_^dgE{#~m=@U4~luAWoH=enZK4|Z~8cboqvvH>nz$?j0o zhQyVOy=Nt3FJT$Moy?C$z6nvoXq8pzW;~OaJj?mOR0#=(+@Rc8qXL;{$XzMk4AxF` zcxXuKn5VYHMY%p7pBvfbPel~Bhm~iOY2Hvm505|MuN0-0$mXzv!(!}*@T)tI~S8}xZrnDwI*6IUpC zh~-w~p>5^`3B6zus=l%;AUWuMaJhZwOZ}03?xldl6|&ayFv)!92s8J`g-O{!t8+< zQ)tNsZ5?6e5@+MSvqBB;3S)Sy)$pG2pgr3$|7|fv3pN>DT+6$8RxrAX(X2?8Rf)!T zX9~A+ustss?)pxz)9WuX*h))}8oRDJPj#%iR;Cm7aEx8OSb@YCLSFpWJ7Dc-6y^Q= zuox92>F8lhBe4|Zb`pr_V}vRgl_^Ycidd{7Da=>X-i5vVSP@%ln~Hko51Eou#POxs zG90pcXP+;YF7ITb49H?MR%w&WVxvMu?`3S)}Rdy)AzrY&^Q z*xPGjZ?9vISxh2jEgaWG?>x#9YxN?ApWQ9mou2g*e?gPuwQPG-fd~Wya-!n z)P5Ofuk72>UU_TGuo=r#w8i~BR?JB_P5P>}PHm4h{;(DE@8`%=*`}Yzl+k}nOT_B- z2S`_dN7Og>GrSXT+!g-$FWED{pl8tzn97DdbGMY;%4+zp*)u1NkRJHoZO`1DD%925 zo|$#FbY87KmT$|Rd9}g8^<34%+w^DDcdhPx+puTG@iR^mX#NSbXCom1w58Ft8rMok z`7Nx~;uI=oa`vyjKOg|%X5s<@U;|_E{?<4~Alt3^88xOU1b5nf%D7=I~|J@O5k1*B5L&!ioo#b^cgPpB1oWL22J<+IL}Pccz%Iet^cTptjY254$ZAK z>!wUua5IitTWjoJ;(#Kj&|s#bk(7N^m|b7r87BmKNWZ z_=tRZ9w8EG@oi%sNt3T}k7Nd)EAA%uw-h62rpcGOzoobv_qWI;E%~_UV(F~*X5jm4 z>DTB39Ye=%s|+0{fM!`O_eIv=Ou98d#BK~aXcuJ?x z!SO2@56&=)*Ly=zL|fWq3hu%T%3h?VvL}a!xF;|R_4a!HZcaBfQBU@sS>rejkfvpP z3_Fr>y0yFFznnsojA^Yxi#{onMZC=s44FNaIF}g1_zMubQv;w9~UV> zN)ReE*5PT|QhxBS{#wnanRfrE-?PlFo(YD(O?5Zf4~~te#6M9(H3HSw?Q5pxgg}{z z5vLGZMJX0jw79Ez`vj{V>ed0{5k=3$ri=x*RwA4^Dt1zCxPwximF*@C$1!b5lyefJ z!cl$lC;!kda3@Qt)k zcR<%v6QF7N#R#$0CfT;K|I10can3aJI@GVkk?dNSfRUg5Q;Z^8)b*tLOJ_#G%ATY7 z`eVnjV-@~}nieu$XmqrR<>^Av9~i^m?1=PlrLRiYz<@z7IIOo0P+gJ=I_778Q-p8k zW%(J0;yVWQZ#!A!Jb_S=SWiwyjara>y3DogCNX7wD0hN6hHazvMt?ZUD|i#pZYy6l z{W!yF^-1u#hq`gVkiJvhpv+l2^ubJOBg?43G>#uqSy3b~5W){)uZvaB>$8m8hK7*r?V{nnkk{bmvPe8^Bg8W5&1>J@8xjsajG_CSQ=J2 z@jO4WDJkoc1lyLYo%*J($C|gWpS z(`8e$&9<06#uZTYG>gVjUG8r!Y}jq*5cYAo2#L=z&a~i;@KRwB1^^kC-+07r#d^81 z%iRMLY+J`U)71;Wzmo(E zs5mn395iAOB-YRV7OlMAt{vT=C2WXn*D_>_o|zTQF;4bSXi^HLgc@#LrSGTTBEO0m zS47|R|3Zbd8Y&U491H6CgvE|MC#zDvk^%#8@8?ICMdpv*Pou5~r0)VCaz17Hv|2l# z-r96Ni)*#QS)Zr3uWrx7nA)j@vp!C5|8e`mS)ugy8~ECn-hNyA&`-8(59MzOhw`>G zgznwaR5yg#Nn!1ivnxevH?Hg*ToYFntBbDhAZ@;jNXk z6~(nVAvx=UJ6C0jHna{xa8$@ynI+p^vp*?^``0D}cQ&0}lc1H#LH82P|2sQ-0-FEP z6uw-(wXV`Svd?#S5_6U3@?T6PmY8ot*{+16UzP3>B%UnHeDB!grP3XO!uLt(E_p@k zBqk8;@^@y)E331lv64kL`qEsfo}sRcI?GFN_QCVt4p&k|K0t( zA6KB3wSfYqdP}9s!5t5bO?s-o4EY0NyF6+B=<(8VBdPbJKw9y;(6Q;$*3|PkXiaeE zjgGJFp>+)C!L4@2F0@GoYSaD~)~sa-U^MLgR`YnA@C==%p9meBU%W;S?sPl8v4@zx z>Lp!X`rLS_PQCPr@lr&+w3nBrtzl8CN+dqUF0Hg_rjTvLY}k)hTcz6nqerEYn*SW% zhG^$~Vszd_XhL<}h1Ev4Nk%amX?RsUs-}TI5v>T;5$;R~{;VqB%D#Us_;CV`j~zKM z;HxU#zKEa*a25xNGFPkgGf80iU2$6d9-lD%L0K^T_p?OFI>o(rhTqgdyOIG^5Cu;s z!=>DCoDvrh?rZ6+9H;R9aB-*^C$<#PVK6VTb%C2xkhXgKX=*!$Z9EW9B?Df{s1QZx zMT(;G5tURy4 zlAoQFTAE6S1nx)hxSwzq*u;L$vMm>-vwPSFnF8ld`UlxZ&S5_X_r$T1!4$Vg2MVj#9)n z{cLKe|0-EkQ-89H0c=p;_&dEAw;vaCt@&a9uF(wff1rQYlO2=}BK}>=zrx-90h%Gk zky6z^+w@K9yH>yB&;P1_SFJG@p&G*;@&zUS&)7r$J^o!^Kt^rtAr~e|zt!q)zAbym z0AnubgH)$((-*1lTD|-KYGV&MjeYl$L^TxuUVC^3t^N1f!*~Db^!8nRf{7z_vc(p{ z@ph&z-ojtDUWC%9zbp?&qyDn+=s15_6&%lx|JZbPSS)|pj%xGrH2$)SguS7#p3YzP zAXFd=iR?y1{beV#@^fwBFFQ=>|7txk{Besi1vmQ%_{)0lh~jANEBlGsP!RuJ?^=&? z{|Q|2p>iifZk7v&S-QrFqOncSp(^HjAp23cydv9F={=8g7&hMJHQjPr#F-6(p8C9W zzZhwFKW;l4v)BfMVly-<~rUgC_M|tCv0pe`~3)pN77kw*8+$U%#dk{$2ES&yU2Z@gJqH zP3wPvzUn^n(Y}21K z*M$rJ*qXlB?{4GoKjc&swZ#Lq1^?pEV~l_CVDT^bR2={MfEnr${`KGY_rIK`{QLa< zf5R;M-{SAz>qsl&Y_We0^SkHw@moUi{GoL35|kq?V4mJZ{V0(0+7l72*#=NoL*zQA z0=XZx@mluX%bg`MKFVXqpY+&ya^(OT##A`827~rEHC#OLBH-Y+C-9~^qMNlP;%(ut z;qe*AjwSY*E*w+Se2Wra1Fse^jwtJ^pdiin7j-(B_)aR-#l`bi{>dA!QCofF z3GA&a%8s)Q@~xws8*OaMWPbyW%cTR4or^DXpv%A}(Xlt;lDz3ng0w9;hYXk|$sHt;^L5pP`;+NM`>T9!AasUcrQmW%&zf@A7;sBM-sl^WmQ~dbjO$cUkn5sp| z=L%+E91y|g7U>@6|H6JvJ9c}zu@z-hfEu@~fOL{7hXrey)Jq?%_yeU#Y^NlB)k*pr z^@v)C_o8yre@nsMGmcq9A*I*wk@KVLv)Is9QUnVyKbM~YVxr4CmakQVEwzw^ zSFcb_piA8Pkz9LPQqlR+j|H;loW2y~MmFK`>H1hs;7I$VxsLLOIFGDvfMCM+@#`NT z#~+jTpCiX5P12#9I>!cEbgu4(p354OFgaFK26xOlTKR4w6?Ein^aNfoq7a~FPo-bv ze1bFr!|Fuygu=aaaCF};*IB31IcLbIokg|K5le$IiQ@#GR`wU^PaUc<;`w+vpFm`% z*xgih`iCSEpEoe{cM_@UW2&xil-~K2Iuh3#iAXwD?|wO*hy=1otrW`3Fh(!=dIeZC ze_MLd^m`;lp;*TbgrIQ8P{@$r6BbW=MSnjHo}wiNibqTo9YCRXQ)R}}Z74iT3_aom z*|sY0hb69^O0-D-6S`M}m}P?K&mE1{w4Yhb9~rq;{fYYp8v~I18@$3k6O3c)WmuSx z2npu`TG&04TmK}4jqFRU)3U1_>i#iyi$^-%?rwLL92-}($sM>cfuvb$^Bl{QQ}O~g zzThf5mO7`~s7cAcawn{EHE(y-Bxa6_yyGfs>f^nZ&wfQ61=KI{f=U4$*C+7mLQiG_ zr9}b)J|pt5e02#I(^UD@DLm8s)knEm3$F|7N17~oI8I(H-CK|(BlM}}!zJmYaD$18 znaiqBKy!^K9@1awL=w=mL=J^FJVA;^C<8gSGX?f6&jSZ){94Q}sMA(c?v&F{HW!-~ zDm18L(b8VL6ewHFT$M8J=l2ZtyWF{vS9lz4KCV@doMa^14B*1?rMQ(wHr;%zG#*!) zkH*qgR!^ytSvOMGnvZqH<4fkFShc~NqTbDANeYx@ns^#X!rEDoG#mqHsA+z=GljNm z{-m3`v#f}v7woT*$)%3)$dQrQGC7)#yP7hn!T@KayYgrA4{*AgYx0PQ8u;Kmj98Ou z@R1GS_Z$(|_5#_@+jv-3fo5fEa1j8>w_QS6oTV=VC``(@k)93p`iA-AwQt3w*bLkJTHEpJ;(^6Yx$GJlg_aC*Y4v@XHqX zS^>Xhf=ey%#RC3=3I3S{?jzt|nBZSo;By2#!vsHLfx8QMtO@p7;C2EYW`b|Gz(+0x ze6b0hYJnRB+}#8hTHsv*KDpm${5T8zg@AXN;9D&4N&&Ai!FO2TnG&C!{75Oz@>vZZ!cl6P#*+cS!Cm6I@_{HwyS%6MT~e{#d~6O>nFxYF_T$ zXEZ+6-)dfdZi2hA!)Gw+C4ywR366Eu3j%)81W&Vyd`iHJO>m#oD3XT-{D2AWY=LJB zxX=Xu<;-aA=>opq1pnCrPZjVW6a1_NzFok*P4L?m_$C2&Fu@;LU`3ky_o~KM3_;mN z=w(yr_L-fxh6-gAm1WGIK{v6zb9req_3j((&(aV3dKj*4`! zAXq0ZMl@QERLM@7I&UU7)!ncE{Zf&k3{3hh6dZQ22riDwI@uH$m36ehUz83nT9p)o zwbW650~tj4*}|*J6pI2$C?>VlmJvlP%3mrb*DG1{M9?xS$wJz2$+yLuE#7gI?>&S%K9Z`3cknCP zyPZL9Q44Lt2>((l?UfE)MkO_Wu~Ta$A0L4Gg`WdStV<@)_dnbv?(p|?$(OJdvrEG8 z3JxR^*Va+7j^ENf^b33K^5EObWnC!!_M${-vu`-3XU)DrPG3Uz%Aq{F{!{AYAD$?u zBAJMf)^jVA-j70fA-t0Z@kA=yQNp>UiCN8|lhc!aOniL4yWGV0l*NS&bJM+^Yfh@~ zG1r_@-{Y=nR^QHR3LEBU%vZ_t`}0lm{EPV}dH#U;$=7IQ`%5%Vjx1H-&eu}hOEVnh zvZar^L{f^B#PuSneA9_q+3tD{WXW|uF_$39v3rxdfv?m2DI1<9Y=Z$E|)o|?7^SR3*m(%e%HHMCg3B34S%uds5 z`b(;zF#7#MnI**u&1YzVXVi=j$Cg@MCdB zyu*V2okirGzKB0uXca*yd8>$`=fZ>IFyH=tm?OJRTlw4==KJF?J1T~=u!J|)$GWB& zA^tn-9XLJqU1?UmIk0Ed+fnfi-cI4-IPA}TANEV5*sVSvsHzRIaY8L8vjys33hE2v zP>=XN)SrE0c8i62y{fZ`dQc10mkR2d{jolYpi6ycpFGe8^_+OrgIl2f3?pdx9#EU4 ziLo3P-P_{Injcq|R94np_^?5mT_}qOj`^GwUq*4bk+n2B35UjEh^^#8*`It(FPK6D zIM!Ss!)A0r$wH~f>js)Q$)z5JHr8}wXpOp*wnVld9$ASXo7@K3pS}~>pq9w4h({(n z$>DR_AiL!|k?r0Y-%Mkz9e#e4L`xS7zFIn-4vTAPhwsGqyB7HLClo5P+ZGA32iqW9 ziZFKiZo94}vg_lKJub)wwm~-SJCU8-5#Lh%RPVR}eL|4QEtxHwdBJxgd#wdBquY`d zMqzrT&j>ydMqA=rgSqGQEuGi`pMFPt*d6tAVKU)xL{09v(YJqR$xm*N?_;A|4r59**8*J0qO&329?DJ$leIqYF zO|g%jYxQ!k@Y!Nmk1P4swk3p~gjM(~LkAe@-#8&9*jtqFv>>`q5X}Hl{CmH$-h0q|@3y!K zd@S!}%X@=k6}VVc;6f@uX}RWGx5vHJ{sP`=FK?Y7ObMATFf)yd%XGbD`buc!3zdnP zT2nGWp{@Zy9pjioM1llQii(TT=`Jg3@mA@!_srdqH`QvbAH@<8o)SYV96cdCJqGBf z0OMl-y8`65DR}007TjJ6ww7{(D@_oK!4ro1=POMCXSfb$E&X>-oNBhT_hQ-AF)R8v zDk_R;NppA|8Cy2`2)k3UMoZWbq~jd@mtV#THgs8#a6VA@GioBaa@pEed)M2(lC1KG zio)SI;(Cx(wzbkN+l8RW(XUY57~5ZVR6Nbv5N)62IY;j%9Ks);XHKg;se11%t@GSW z9=WdWoU+D&-cDtW7dt9))N51~5$L0{&21(d@`YR!evUMP9+Fzkr)4=MCkHwnCvZjj zBB$gD)!%CDNrjF-wyLmUcBd2xU167y&8=F$J#K__Cl3T97A2Hv;;5L)?G0g$${3uy zrsN_9%NF)X&}f4l;q3v<42c1p3eX=w{QS9*z3o=~cP$z9LEE70_7oDI+1WPpM#8Yn)u8|Mc|5GKZ$#-Z)HToe<7!F{ps?}(%p;HhMf38Nbv0|)-?(P=&^?{{r8Mn zcqYYSVj_S9Aw&5sVJyb*rhWsdui@)&?BGdM>NxswR4jvwh29Daz7rfolt zzB0fWK2JdL8Sf-xRnI4`d zKc|N86tH*)hPl(n7CuQ)k(q#->Dvmbc`g0+wl~KMo4oB#XxzEimlSVujS)Q`S?OU*7FgUUo}U3(ZcV^$D5}~ z{hcKIK|X%lCbej;I>{fI-x6ifl*o)$kTH?*k+A}cQt6$a;ze~`BoA2RXOU|nS4X(} zJ0#b1+q_*PDUkuuC&M~?It?jV&wVnJQ8{A@+Qmq1y>iv2+Bih32;iDbe| zd5hHLXej3N)gW^t;m?vV*h;w6N~n>9Oe^7RE8#0i7-c1#q+$lD?hrssxYxh767nQr zg7wCSR>C4l=xZgsVI@2%36=t(KWQa=Aqk_cH%hDo2lIFNcq`#9E8!=Spjio{RKhg` z$)wK~7?b{()lEUkjNWf{(}fn6Rgz#SF1p=HI3@`C?13n( zJFvG6*qsK9=orH@B|HWx?O-#nCFs{jbK_!m#Ry>V?Wg>nX>k}hZO)$H+SEb7dagvnMynM%0E|LbP}QNLf3OjitumGVNYlxh;G zpCgv|Y%KARk$3=>XH4bGQC*i`MFwNgdsB|3)Typo?mx zBn}^Brff4(4z5?@Eh&S|lyuD%y^d^hwr>1FRQcl!R-ogtM%KPb48W5sz7;b1ov$a!iWmyxDGa zHiNJh7@gaLbUNI(R_bOf(i{yV7}EG~k2T`Ln`*Z)d5>O>lo`9N+E*Ri zaq!40iDSe?mfQiB>?8a`S!1U;eb7O((*79=uTd+xM- z4f}?1AbG@Uq0;VLYI-=*ggA#tclM!KT|60_Xy2yah+9L%F}n+4h29~ovX@9$T(sWyTzX(=!y(rC z$5u(zut}=WP)YjC4@DL|s^12w#{36F9_7>+?bWkJpMtJOB!f1QyK=hwh9^sq&=D-) z-55>dAbfXsKrSK~o`&1Y!>J6?!r?jcv#0v$a``*>+RL--TP_?vOn!>VdExLp`6=e; zg~Lb4Pcd;W96nrriYa{I@R7vZNf|MSNGwE3ineO}3T0l2tFN3F9dk36Wy%U|)LaY5 zwOuoBh*m$>IY!k#*E!B=0Q$1gf?VfhqX}|#9)+tm7_Ra1sM=BP{1wQhEgYQ^sA^5Q z^D%+)s9IF+{EhjjT2=1+t@)^0R_^?@`KVe~?i5AGY9S6nT6v1f%kBvlUMJ4U%l$(V!)ugY`JKt zHgA8o|84Qjcl&=^&lf>#ydC*1-uo{9C_=k8>#l@m+}Mlm_V|7zw=&3>lm?~O#mb<< zmHCst`C6F)-2O*YV=x)8E^-6y9elwm$3t_%TGb!EC1+ z8B&c5Jrwco*5_885?6e^=)F_w9FEMba3R`!^t@6mz5@43%_ofi9CZu3rebd)-B#^r zk5%{2HL+tnoYZP@3;Q6md12Sersl;mV+QlXyia6FoBKr0BQdfm+9l?BJ_wX$KwT>0 z@n5UY2?{@6k?T(rV>-Q!^%$npOc;-3#VOc7NEKb`KdbCmA20E?k9fIz zrz7OXjWkK~CptYDjovhWXF2iG*Q`p1id9AJhzn{gd+K(Qz|cm7M7=De z>akaT&oS#EIjM$D8>8bRywyL)t__)!!e!-h-;%GZhrscHVO*_N>Fws>x*TlyzELTW zuHkLyyb?N*vlHDgpI;|%{@Q7^!|c+JHN!a2S2og~w!(_X7ae~U>G8N{5{H@QEB@Yz zsK$im_I4NY_{ZAY#HfE~d)wZ}@|2of++Xx7 ze#D=rmH2o*qO5SYA*0PjjM!SDfuZ|W$>JDNw;jfZ^pVq0>OwSKMd9H_0=Y1Y2ZHDr zVSNbjK!liRSf6DwSpL*}i7@WJo(cQ4_W?I?Na5!82zQLz%-A`ag!2iInQK{!F7OPQ z#C2#Y5+ep9$a9C@oyrS)e@P)cPh#EOAs89)XtW{X`Wmr;K<0fXV#Ifd;V;S(+#R*D zhY0U5S@SC#y<6c}8;@g+Y$f5IFj(%)k$o`3&?#YkxSFL({D{-^ixoMpc!O;ikrv}) zIS{#re6{)$6Hmu~e>lF)4BFf#zRiD8k5=)23A1Fy|7~@D8AsFq#rwmFcc_-Ev4}=9h-fr}h(=dC z^Zk>BAox(RE&4*Ft-@QZ2K_%3ejr>)VT`36v5nC-;u;yF8M}&5g|*nW+-Si_W`|yF ztPT2V)x2$bAG2AnjE_ISZpH5~MX`VAezuNk_8~Q!;_^B4=>Jsw9vRpF-SLdip@-YX z@44fB>CIZ*$F~)~XPPncx>xn?HvJ>@U8|3|UFJ}{egeTj6~yk}AaU9e{+c<_pX@!i z>|}FM590NwxQC2Popl*{gUAU*XGBK0eEVdXzNW0HxhNZHmmDKf9cAK>!AZ|%N5w7t ziVT83vC21(7WFXM00y!S9&aCDbBX4PeS;$c|N8*W|Emi0i zt%9Sh8yTnt%cjSF5BI9vnXn93OLhtB(NX+2v#@_Q`@*<|?c1#NS*!FjrRAKoPLXQx z?y!Xe($zg*9H);7Eh83+cfdWOvJR`sO&627+48|!b0ybMqnMb0;dDisbih5*0aIcf z@EnzKm7Qqz-W49BxQo$#I>0PFiel#bR^PaMLGCds62;A^A6cb)UTCF{M0@K5omM?~ zt)u(^g-IB7$~WuPLFyHma5~gh4I(+l0eeQpf>FMsA{W_K>dqN`5Tiqan@Ys|`QIah z+*o7wzB_PltKQeLKX*L*7Q5taMqn}1&?=4h!;g~7s)saNRiP3lClum6m*V3I!BKIL zq_8*>8SErJ)`itvs|2AmlhDFzc`+uBy37k?y6l$T&^-G<$7N-bLA%lzRn}wM77s(eP9rQNzYcXAiAl0 zK_%3L^6NmK&$nOi^9&;3kc#K6A&ga4%tmY$ABCa_4cKh<{kumvV z_0<~rZq$4^^@OZwq+PZe8eA4i(*jzW9B%exle*H|Gv9YFZ)x>}GcI}`3lBjy?Pb zlue^R2)dq_-9_(Fpt>-H0^gMa<+h?E{mgfTnh+>FzBG@IAS7Hb?Me7X^G$7iVSs88 zWc*QN4VsG^kw^b#iD^HJ%Ckyx`=R?!8MXh|`vYdZBdfzUealkT+FNJ|@_;^GEg9SN zx6P&Fnp@-7+JE`}fZtJuvQnAr?Tb?OfAIYQFG4MC*V~4-q!VlPo^Qvkw>eb?$=|KxP*?Z4}Ok0T5SlAhw>laSL8*0^sd(&~PXdKyS9arA)iyWe9qxzI!8 z=EN5FdmNxAtouFgq`-gTevfUtnWHWc$Lm+6dzGJs?{Vj|aH`T{HaM|k(Rw%^vY<#T zn5ZPBN~cA}PXZCACarUDtmy>Ni=O2L1z8b=hy$I1ycLDiAzLWOi&01=kgP2PH`IKF zmtr_XAyNB+oV&Wu>vXi^h*ppNcrz3p+?U=r*V%PXVMCFmq$HBk`>;wmncnxqz2+O; zIsor`Qo#?W_YLkPK#uZw5uT<6s|Y3BA;rvlRyz~xnNh6W6AKT6_ob;KQUl}Vu`%f)6bfmH*GoR##DM|^$6jwi0L zco(h;HD+KlBqelgLKU`I420Z{MxJAYTQ6`2Zq13|5hQC(B$M?U1cSU?V!SPHMqi(- zULWla{H}!ZTVkj)F-+CFL@~^0i(#t5pn;*X4TgFX!!-RvO#iA56Wd~#rZD7!!A4@s zI@lO?>53V;FN$GwTMRQ4h8!^Dw80=-X@bG4kBnlFO!Q)WKX?^}tEC^>U=T@IFx;;v zMKNTy#c;pEFj)Gb4Tek;L$SVsZ4=cGX>BpE3C>Usl71k6%YMjF7#KqOSf3X~lF}N9 zkwM|fl-_8AXS9i@T<}~O#baxWM=~fp1Ef#d;F)ORStxkEg~nB*>eWke9czT6QFzj& zXWHPIVd9ZpdHv5(JaujH7~A&rk5l@G$eEa5rwyKGO*|ry>7S6S`scN_cqD_W5BEm=uCfiD*GxR( zE}%awc#cg-W^3S*F8P0i{XN`Xja^OHz3>IaB*-a0XBau<=S0aF+3V9ltZ2U4a)kibE;H=}PLRy&++B>$Azfai(fi;plw6}nVf)#}a=A~y< zs?YVI@U%*{cIGE`2pt$7iu8)fx6ys8v5TA~g6PCpSqQAIquWA< z3J9rB*R3)-Fw``Ss|@a|mw@bDP9b|2{`#=JZ{kIJf~EB5MWM~(xutK8?xYsi2gY4= zlPdfsu}Enn9q&xOXtdlJI-0TuGI^8KVM;3*;81IQNl9F2)@zHqnH!Q;7VTktHy8DLu&;?# zZt;@KqFlZjSJ^J^OWi_$_pL}u5O@4MOwj|BJt4HL|H!$th+p1Ij4y&W-C^>n?SKw_@r)suqD`&43R^|%8dI~A%fI1#LR zVBQH=Dd!;O+gznV!RN|8RNUU>J48+Le7hCWVYUM&Ksvc+LY^EGchS=$78dtR0Cji} z57IH=i`f+1-@14;Cw2ydE9!cusO7&p!YqA5boutz!(f*W65*if8$YmYUo zQ7&J#t87QdvS$0Nsphk_q5e0Zx9(u7K@Z=1f+=#FjZz6fmh}&h84>p=Hig1?<=|XD=xe+JM@> za#ZBg?fLA5UyeIRPcF=T3d@>U{4D#)aN2W?pbajdJ?}~@?LLXv-BzLSwMT5)y)3KML<0fI)D&?e7ajLWCT{LLOfkY!a&of0~;jQvFRS`)M7)jCkU z>|DlkZZ_LvGyCgG;siKtS)zgxr+sGsIwf)foxIGhpzT}bO;*svR(VqtG^thI?h4wW zRo+wu?bs@>k~_IE)~;nYIx6z$QZ^sF{k4Q9Es(e@YrQF-pmTJiV|J_7ES4t7#kbiT zicpVG3iMmohxXjQsjy*k>aATeaWKMy)Y)`Kyd4EK&fgh-f&ydb6vySLwf+?-SUflI zX;Nl-oc`E1NXZY}l@Q$VKx)!ftZiuLI0A9OVBy??9!|Ciwf7-)8*kb|^X3y`U#!_F zW_@MvB2~L*;~0zp#l10F%+Xi9CI$@*WXTG4lfFa%VpB;DJutoZxnK;|*TwsFO;a*YIXuqd&jP}G&WB#JC^8n&f4o%CI4k^*; zk=-@6nZ?ewnj(B_fjNmdkxx{_uI5R|XU>y}JQgIqEsss}apn%XGH=B^@wIpw=BSgh zGJG+8Qtc?csSK=Y%j)VQSmb2Os$AJ@jr}F=?!fY)Hm;2H*jCe<>c$PVcjZ#A8oN%V zY*%$nGm+Qxvaw@Tt~P_BNKJ@4nLv+x>hAH;yRS8(XJ}hkbd8d@MS~HOmYjRZR zL%A8Gtdif&RHzC+QeMWl8V&|Jw-}O|+*R8<1QW29INViriDJI;I1&rD7uzYZL*eVa z>|?46*PTxh$%XgL^QAQMF6Q9E=e+hYs|#ywg1_;an7&og9_+#Ujg;;xMXabQWj$8otwsv>hx1NaG^HB5 z@~1{hFO`xhDX&XGg?EgyPdH|zrKz->Xxfg#gz@%hSs0qZm=aA}U6?T29xW|hrLC13 z3eM`ngn4%Qx^$jBWt^k4^i;+;XXnBTC)+5mw5x4GlbM!mrk$bEb`(B$zHP#OGp&o6 z)=8zUFMRG2+k|h-w612FU8PkQK6j06!j~$|>u{(vud`#}a~>)0xly(Wp9<`f9=AMf zvyBRrCkn9@b#VBfM_DL*y@Mi-#$S14FfX!KuT3aiccs1hE51h9tH0sv7JKy`zM!J| z1SrT}eJVkSv#^0dU!Ba@`%=CvZ0KOC{)i0@@FYTvli3hxTr4;nrgW0zvu#3yl1xH6 z8@1A14O7lYVJW`IMyWu{)wTxE&M6xUe|4peN+_rr{DvvX0v%zi{@Q@5fp3`7MWDCX zsG`cN2EAcQSAmYVQDX&F1Ku#j0rd4LHfeyACPS@inW$>HQ2}L)6)-ans0LSRYvg&& zs65qFuRLo!Qa|;$*m$I3>aoOlq((fl9Sd6O!BkD^Lc_2X?iA@4@r)HdY1e*cwQ#`5 zEv9_HvZV7D3RVs4e?Y|F6`}QGzm*?*@r@VYY?j20p*>?a$dAuO zPIi|yF~8-EjtHN*kDLf?9=kHMY}_i5I#*j#=dt#-99wY**E0Ebl^#3Ue64LRk0yj-m$*wW*I!x6ZWW!m+w;0a&M&p3W=+Dm#w z?Il>mjoR)sYddIRX!Y1#!yLgw5;HW)H9|EsCv##bs1GFKHnAiFR5zwR&ROaqA81AX8l#g z=k72nZRK&4kE8n319R8dUDChbNFBC29u~ht>hy90CD+jTmDrU(WHdg)v|r_TxG%|~ zVz`>wElU52zt6(mfGy6eSnnwBM-U>jBYG)zA6xJK_5QeN!GCOjoWpfkKhz(xe=9`! z57Wo6(*MN%kUb4ZECZ&*t`96?MZ{zg;f`z_8uYXK=nwbTj(>JOomui9*k5WoZ6q=} z{gV_6*KKNW_utpCB*3`ODye-A@(Du&X%lf_NW0zi9Z<2lWJ&zC>X4>-?v z)#jc*m2tPZ`7u#`C@N~aWUUAQf0bv}AzxU&$5&%{xoq)rh^jfErgD4YFuUiXiB)?N zeO2qiCEHI0cieY$wXeEtM|%?bOhw<2p!41dEKZBcmAJXAU=_OAD#!9wWla~%J-e)F zd+~XwLe*(x>^SGoa=}!NZRrJ+O48hKmex^HEdAjUE*(c!PFHUgyw4itxWTIne^qHtZfkAUEgw-*h%fv1wD#Nuw+q(+y$`_&~?Cl(se zmM=?Qsfy%%sYPCaMt&owW!ts(8Hm$2vByO9%%SZuZhdOFO;$zhVrjWz85@NVgZv)4 zoZl_1E?(r$dGp0mbI&(FAC9S|-!vJ(@qcV;F5G_7K0PVO8(3Ay!6&(|_viLQUrlrl z=f_lxG(^mvp6m?nysyv7c7+G#+Kbm#>Fv~Gd%WNKF|H4c?d2p>-zPzZ`Dx^5Y`f)s z8QghyyHArt;VnIW1-$TJk-cw^-}0*weYd+4)ni_AKlvQf&n{o%`lZO%c3V?*c0kMc5Sfg!ddImg07#e-&r_wuKhth=<2;?SK<2E_KQ9)ys>viWVhH{z77t< z1uE$Jf7p8$_$aGuZ}+p&wlpW*R|K$d+oK?-p{xNZ0#)1(-!PV<5bOy_T?5vg0)9pTKNYcP zMKP|*?4@f``)G#bdgI8qKT49%ll4PqKkTa?@}}1h5qXk+s8Hiu+h7X7cfk4NL!s0U z69Oq>%B%4E!&rep! zd^TyR+O;?vwN$^>Li}2YPYbC+2p!;o)ijwJ9(fFZ#P=(fLGObRi}XvEyoSoM>9SA{ z;7bkR2g!zn$_owM}nY573Mrtz2g$rHKPpq__7rQWcMGhM?VKnZSdG^{x$NCCwXx^R0}|(*A~jjSiqyt< zYUn|elOL1G$Mrmg zrp-Cf@@7OoTd+5JI{P8!L)e+_$IBzE1C4%A`yz}4pU4&5*F5zl7Do2o+)*|G_pYFS zQOJY22e}Y3t0ml_9P)O;i=vz`evQ1fI;RYpI~3FjkthNQ2*xX`r#{QMbw!UaJ5Gw( zth`I=-c*ziAl|%Wn~;fA#a$WE*7(GsaNbS710rmAGM%jb`f6GDTFbW*SU8t?_VmpL@#&S7yXG2d`RL}}uP znT2g#kb5m1x8k@xq%iD!=oJO5V% z%Z64Vw0fkUfOrGG)cCrm1<`4Uyf>FxPy%aFB;1XJBwXfTb-8MO?JqV# z9NmNNd{FCKGAM?1GdKf%yB~e~NUj(on~OzH)A$JTnJ#*|cJ$REBN!tiBeFq8GQf*p z)JIZ9q=V;-G=?H&{sI=OT_A!jAc6>LT1=EMT~I>V=&?rvbc{0f^T8O^_y(N#{GKE~ z8nt<-5UOsS{D}Pqo_Vfb#xn+nzlydqN54hu6=?7))<$VmttfuYu>OlIMVZqv?rj{_ zgWc_)r;YFV_D}S?b>j#04CR3Arrco_9cAM)A#fwVC$Sq7;}4WQ-3cG0?IXF!s$nC( zscRcF2!G24VgEUbo;Fz8c7T#b=U6(Af)3sS9UQfQ5GhV4lh|?YGOSOJOuvTN9=l-*!4FwSOa9u(8qa& z;I9(==j$bWKpOZDC468SI4l)7e;Qcgb@eq7)8aN5f1ivT^oDOmj zJP8N7@;6ENkTmHZlJKEv;J=gbi_^d#k?>(@;3PIt{VquZe?r2Cr-A=P!VPKQPf2)o z8hEFKk4OWLNcg2`;M*kpvNZ6W58FS(i~Zy*wa)?L>f zNboPL5tRzq1wgc?WjgK0)3$tUEB4*jp6vwGukB@@ci58cNnk_Jk=ZF#24#xg-fHa!ee?wnUvvTbf*1G7bv|({iL`@$K_N-{ zf;h_oLb9kp3YD}z2?M$02u!tDG2x&_<7Lasv<$glshf2E}t zWkVC8mFed+{grv2mL^S^Rb1xAv}Jm=^nJ>FG;Nu;YiZJy>E|+cr!8}emR{nRuzE}@ zZ6Ec>?|mX`i2~sE8jK|JylvhCFfDjXi5Dt>FAzOTGI0=XSAdnWm2{0)vdDK2hp(e zC}*_PkIK}!Jr8QBcZ<~LQqE|pcgxhqo;$VFowDq6DQC3g$7J$xJy#<+DQE1S6V%md zbu&M8o=-OueSsI(|Nqp@Y<>R1bu&4LtLbJAZs<=pa{_`&PZXe3-Hhew93UB+f7sBA zulITX?^zYQe$daXfn5cB1I7N|WN26Fb527&gQbR4&lIyh2I?3J&&*+MOh?UEQO10m zDxIQ?Il5RVV|;y;F(0fyw=!lelrew)uP9?4$I<25iC1H{5gDaZ#)wF&h?GtlBO;BV zNSS|5ql}?AWfR99*^o{dqtm~v{ose_FEXiyvR`L3>c*x3Ut|S6N!(%-`m`)t$CArn zMooJH8yq}0LtTdne*W~$^m-Qid5e!4llLpRKgk$&ZIoEhIu)53)(_x)2dL<5o?{%0 zdquKHH;L<1G166b)K7pl^-WduW-NtXX9B6mOK@2k3?gxJ0)kde7|=7G0_eDVE)1~; z9r_+s0$YZQvFkVKEu2V}+D_76L+MlXSjOW6V25A^<-#D&-gXctZ;sGcWpwmP^2%QN zgX8Adm3!6R@z++=8?xfvYV?(5`!l1jbiWI@`R(Yby9->O+?sXGj_B)0Kc>0J_d)dZ z9XJya{vB3~t455aM2oZHsgS^%#YxYZ%mtks(q+ffX{C9y&z-J>)8+I{mlIF-CRg({ zRI{6^iG}N`5w~M6uM-TqbwKB#0iA~ju;R1hrV`qIw!&OP$i6uAWmrew4Ys%Kq8rAj zUva5j2)EmHj4LE@)dm_WS#dRew+O5|1I%4JBQsYB#$|E)0RjUwlHdzxWR|zR2HrRP z1zTXTjFamQLQKva+}SfA7c%leDBN^1@~d1L&a~cO%fN+#XDin2${=O0tqt{Dfy`4TH>PJaldN8M?@4(?(u+4=wR* z3bv!;Sz}sAll8)TNTq6122yNuH`N0p;QNN3Z0UL{vuwx?bTjxx)??v4P$W0qLx$R! zu%uB98zzH-DzIF%#

%A+McO_wDfPBF~-#jof?BdSJme6`Qc24O{Upq6&NQt4t!G zsUK}AS${Ml1HiGZ_lQC~wQ=}_=ay!CC&5KYN7O&5JfY+-U_!XxqiHD&c& z`x4c1?bB6f6RH!x*36cxT4Pgnbv;i_!s-l}0kSr>l8JLXdgjse|b1Xiu%U%*l1U zAZznBob|iQvDvlkh9TL{r9B$*EG!6M+lQV1=*oO#nS3;4WIsnkhPoabhrT^QGuR&hLokA~)>~h5J zs0N`EBCiUFI3co;Ac+$qUkaGYsTOOnbDaI{qoNE36Y7l zg!(4sw!L&tgs6=y7=MqgOFk#EC&Tp^&U_rm;1eS6;(-$)&*LlwPIX+$=R|ghmu%M_ z920>#0zro3z(dyNUE+mrKw-Mufs-QzZP_?Rk~lds6gC16@nI;OGZ~t_pO1~mBOFW? z$GsP4@8$y|N01+ma4G;W|;=~gl8+lE@h#DA{t-&gl&3gr` zRfD0W)wt;!t+m?3*N2QJnEhw1{e5s25}GL9l%FOjduz~O+R#+<`J z@rq{g>u&KnRK9kKSE>q~TiKaGHl$4kTDN6SG;j)Gx(dxV6!||Lirdv3NX7{{ziEn`7XR<-}>sE`=Yngx$5je)>{}G zr{tMJy7eexQE%%$IxnFwu<$qEaF5IG=~w-Ie~N5bU#=$wtR z|4uD92&#@mEY;V28reJqRQ=DeV1?%}x^GXk8V7c+TW~1i8uIW#>e-jbw?EN+#6?>7 z{pg$Mz8@_(1R8joXyEN{uJ;UD?+t@T^xVV&vgh_v&(T-UBVAYGh}7?K`Uo9|>3Rp; zYld}~5oq)3oahC1pw@5a?5?v<#=GmvJNM!=HFwu;yVhNNmN&t7Hk_{#{Fd5ZTeSW< z#Oy}>#q2gju-h)`ubuxl^j94;Q3+0)hd1UlEZDUE+K2vnJJ|S*_f_>@;(bM|B?$wT zNHkgK1jVCQU`Z6$r{XF*Y(PSVrk%H%)*9Ws;A1*JgO%|X*qGyv!Yv4jBF8SQSpKk+ z1gC=Mmg7#s+ML@a?U(~%`0E1PAyEAAr-+Bkx9-F+Kc9#BQ;AbFG@`8@BWH+J*XobR ztOT_myi8Ac)ZP;#D|LA9I z!Ue?fVUlkYU;lRV|9N~xzmIcJ^5|q7TSe!_7M(XrhpjIdTdf#W1-@E8{o447SO)Y@ zUe5~hYO44O=o+h$mr+k%EocedhRF3zrmqA2)7O7DzN&DtMlfc2`XciInzB;(@qGB2 zX8r#HUqAZh>5CchTIgedC|sWu{7L~I(bB3+mE zgCO&{8SqkOz%C3|!+5xgk5#?kJ`Bb@j~p3|EinD~MB-E@jxu1?kLz&wt_BLa>LxV% z6zpxW?Xlo{bi~f)sn1nOAAt*P*Iz>m6B7T*piZF6a}`Jv@W6temO$qa1)*hmKvO)) z^K0Um8vpI2+jX(HUbFt1PLx0w;~24__npN217mC4mm9UN{gM_^IDd^3DAEl2mmb=A z!$)u<7Ui}WcrtDby|f%l*8%?o`4tE6enEe0f1y(K;|K4~Rb@n+KWZaHLU{B??ly4H zxsKd@j2Jl9wSF|MosZD{q)<8by$4s{A4&UT2Y>_GA1kE&u`Eq#cos%5eCH5$K%~Yn z3l}>dUTvIz$(cVwoFLz*FErm($2DKxQTXT{l0w&^v&J>@Bmj9I;BRPh=!3a_l*(SL zz2Qaebn`zcBCS@>0ip!D>Ihp6e}3xf-FPEX`OXqLKX=yX6G_>d$PhihQRWR@;-l=P zvvvniEo`7oe^fjt$#+wyc+dOcTr{|2{nedFLA|yO0t&uP@ECfPuZ5xQw8?V3j#%!x zJ9Aj{`M_RWId#|FPYr9OLEXmv?1E zX00yn%z)-EEAQ=-5sM7(%!t(WUcL}#7dYS8$!N<<3v+WQ@_`KOKV`ejcH`OY^!{81 z8MzqDdnUd|J*L_@xJsFYj7OR#GdR9zcm+r9`iCqHd8$8Ay2_pm5x>(c*oBYL7a6cZ@o0G9G4in94hI zIgvV)QJBd=#(`9?y^Nl{q`XuGbP#3sGG(@(3qg!JU}84MHsYcYpyE@) z>r0g8MNwW3=YsZe38>DC_)<`9u8|XEUqo5$MNMCdd^cYmnGIMd7a5|M!oeJ*4B$s0 zKB|S26bzYsQMB!JH{9EBAHvZ&h%wzj`WUQVu_p-+-EcMLZB9BlGlElx1QGA*AYz`$ zkvbHZGnio8IbPO9ln}avRd>)%+L#Zc(-w5mA(Rstc%5`2a;W$26Op%iFaIJVI&r~v z3F+#+`&eX4@8!p7<8Ne6^pOR}vAFl{2QGpNXH52$2*pEJe)hu}@ z58`fB2GdUJ==-pidp+_W#p*azi7G;&58X#BfIQy8DH__J8}l(|2+)J~(I6Uq4T<=z z6>qRT%yK=}Y&^T)x~>gML0ps_P(Gd z*S=_0-u{z>zcYltQ-rGC48mOuEG0M*DCF>y%q+N~NU-4-#BA7QiPWtwepxcxJmc9e zA|>!EayujV49wO^6t@-m_Fle9GFz0=AqcsjrBY94M0>rbh~9Qkv z3J5_(7zmq(3}&-VjwaY_yWl${ej5Tuj0oaGX0s2eV?Uyf{5S&)*eN0C#UAR#Ba+RY zSb&qxE#BuiOEp7sH0lmsi_md4F#B6(U}|7QAfGspy{OWEQ13CXjl2&^ul*E|D|iia z2{Dx9HJCV%ZglmScR53V9uU0tHWKk0@tW(gu<`6p;yCNt-c8_A!-hw|Ze#_V2~LEG zgOyEJBD@a-)0HgTCx4Qq2oeoaHZGYTkF?{8OWk~(ykF@BFm=C@^N#OVYC?t|C!)IE zhg`v%j@L}XFoByFe)?N61CO#x&sA1aZe>_Ofbk&e$ZklwvV2}JQ2S}&Ks{%f-QR#DzBHp%ik#q<7yspU_^S3xv3l=T00KjnYWPx+s5S+zSNu1Uqu zbJ}}{pGFuaAJsNIlO`L;z83QtTrJw--}6zzJ5PAK@&kLag}1I{)V;&;Nd_(T$giGy za3Xc?S5KXlNImhZryiO}UG~*e4@;!3{pzWQCsNmc_0+U|DSIsR)l<{=NI3Pq!-;FZ zdIlpC84x>v^$adgWI%lT)icORWIzo4)ibyEnMhJP-j6Hh`4iJl(u)bN9-EqgjO&nC^&Dm<%(FGhq_ z!%O(Dioer_Z(ToJAxz+xjDihkNJ)po>YDsDE|cXieDC&R^Y$a(1#-lG2jnaq(f`%z zF}QV=9<_#H^jK&ZNsoHNXvmw};Yg6Bg(C^ccqhS@7Lp6_K4LiBtyG~qscsF6MRvI& zyKy4Bi6Xmc7m%Hlc}M(o6Pkq#f8Hsczd4Af$h%DBZ4-Idio6$INZy1lQ4S9&Tn@_y7f_BuG6JSN zhILP4bye9>Ysj=0!<_t4ykWN>NxzAWOq8ietjH=;A4Cf$R`JV`?gkd(KeVG)N z#bNk#l`h^e4m*<+md#;jlfp)F81(+Sv^fYH2K|aIXf!Ul!c`-kSruzfDrlPc-qMjz z<=8>@QyiI!y&~QEkG}^kKM%ebS1pkN-r5)^zBh&}Dqv3E_Hx(9S-2^@+U9wW@JF|% z(IE@90R6<5*C+Jv=ogsTll@y}0mHJP0A6#Jl)q!DwVN-n^#Gj+A9_$Z5^0HN_9z?y zU)U>z*2p*aKcOLlrnT31)t|Bb5k20$1Gl2#!29Y#*vj1Pb36^J%Y6f)FD~1cNr8K# z&oA3In2bM9{u`qIp}RZseRt-p_REPs)weZ$!H6%XSau z+M!+#6+A?&u(tX02MUNRwMsrx5FvJuHGHDrZHl!ytbFO(i%ec2?`h8OHS)d#zS%3w z$LlWFCt;L?H8(yskDY6%eZ>TExcPTiF-MAPUtyeAP)I1j;k4He$vxZ3DFboB-iL^IQUXePfZDW zihg(P?2@_b9C+AcIa!IW6_pg|K4nX>j@Q}*{|VG`F?-~>>b8b^#EPH_fsUL zx5g*vuWvu@{4@G4>HJqx$-HBH^SHQ{Zw+Fdbbb1cF|XSIt7MoJN0;p%5Otr~68#gM z#5r<$R5PMP|FCR7u0+4v2U?^DQQ%LOK_?G?2qcKds)BmgW1kzlNx7`b{}!Q)oh%+89KgEnIChdMqLcva6R_AvHNgj^PxNh%9(RY^Z8$^LmFo~1zDJE|&X42zS%&$<)vl11q>6u$yW;9?= zq|}u8qRag6#_mHn1*KN+ifq0L7(&-{gCX3RZ@M0P(b)Zd@FiNqL z{?d4BA?ENnZ%MvllTM81+Ia0ZN(NJPdR(?e^{h%6wVBJBZ08)b*9_>nnPbqX z4Q|Y|w9n15wBI%iAEEgubM3nZ{MugV`gp+F_X_b_duS$pL%vvhm;ujreKcV0y9CIi zaB%OoAukcL?t0==KwPvD4^9Iy}vLJ=*2Vu|pMU;5!sR59OW~$92q(|>$PcTdHXui$$i3xPabqUJ4wkk=Z&>l3%JdKX&uGUfP% zNX)YX+QB5ci>AB>1ryOE1RF6UbuWGBVHhl!7Sf>Cf(621&tDGmo;Dx(z?E72<(9wf z!GQzXg4NUMtDw`PS$sTz^thM?A{o&G^GSbx#U5NChf8P?CVZG0_C!B{M(EV#urG$G zXJVT0C(&`-{z6aBG+$`+yd{O6X*ZbbfcXUU98WLi2TVoS0Eg6&Js4gvBd%@&WiUqAQ&Gw<#Auq*gmZX)R(wr?x zce5wsry8Yh=R03DdR>c2U_7p|TV0HO6_D)_+L>tCPGe+fd?BWfPjPHB^%ify9;Vs5vG+ z0H1f&pgp@#Xc6-ZCK<3^bQG?9t-(2;H?YS( zPr2k4DihT)ErEyeB4#O?R~S34eJG!hl9%~&ViV@Boh-xcARPVtZ4Lxq$|0Qj1Us3Y zzvF`oBXE=w0Yq02lSp3Wr4KlX$M#o1dGN}yeeJU(Hi2!3PFZ@Y6Kn%=C9)-ENjuQL zKfhDTQptlo$?JOlpz&TWPhFZMPp#d$8)YX%D%ARlL19o1DvHC!{5m*Sz81#yCnR6d z6~ZL1C&^cVn{e`+{U<&WhM6x*d5zyq@%MN5eNz1WBYvMAe}9=q zuoaJ@`%h%LoG<`c&$c73e}S}e5@N&&Q0Cpf``O|Wo}X307REIK5>O`%pbY%2pX{3e z^lfyF!1R59^TCZK)QLX^S&%q03DoQw0hwnz5n`wy({&kWy$cZ2GFW^dY*j(7Yb{lv zpo|h;_B_swon!`t4CaKGH>&u4gl{&fnY;M!%dTI+f53IwuZ?+oK5zx_@&?IQXGwh6 z>MELK%-j|^Y$Exi+}PU-JgeQ&cRqiUJa5w^&7dzGS?v*Z_3ljI;_l)3x@D+TH2;@xN2c zU6=iYA2i9}i0(XypRRv_40ME0@*+3hPR=57Cg+AS9zK#f>xkWBa~ zVM@*;go%jjF#hgje7wj)4=!Iu$UZ{EXJntzdn|HzP$mYCd6^%&9$RGWJwzMHz3&r4 ze~JA(wUe0>m|LxW5{>p^k%HM0YnIjOyK=n(Ea6al*jNptaJ@jagC{CXT$(@yPJ}>} zjk${UCiQbw!G+0GDRWWWcp2wG682S9WJA^VrM2ygo~?6wV^$~>+UI0jhitQU^{Ant z*0y7~Nc)T9M+cMVi{o?*FMqeSZL@XAbJni+RothG5rLyv+xA#9j&YnliASDjJG@ru*-140q! zhltY?H1*Q?q^+}gf5F<0j~%SYCL$YhOe4T;L|rF9fbe_)T0N!_;3q_Yp^F4KlYtp~ ztZj2uYy0dhjx^gypJ>Af$n#5W4Tya|W*1yr@JtE1v!}eUH&)TM)z(&MZJVEEYg>$O zvaQVds}V4KVt(s5EWKsTY;DJELk@~6!EfGM@x^CHv&zV4`QkHM+d(d`%QobYwd)91 ziXseRXhp_L_O=zOH6K^=JB)N{RZ`8J7Kg5WHFTT8Gr)~PdMQ!ChsG02Zn18~wZK4N z+e%c@FDj{uN{&S(pItW@FBx0c72`p_t?Q;~zFCaezf$swwP)$EV(4G!K~+0}rf3yE zo~ZZq*y)`kKJdYS?#NKX0_i|Rn}4Zjs?R_;L>%cuE5>*Kw?~ewzi!* z`fZ)Q1@W#3e=ouRW$mR^sPoBtK)UQTb(Gul#GN{rTr1CO1U;m*|3b=N2YO2>AF3UJcKiQ<@@ca!rTk4=`OlL%)di4XTtxUf z?MGoL<^N7A-;VOF&%{~aFupou?fQ6_&9jwA*xL58EuZdp)Z@PkUsweFk2KhcZoZ@| z#f#zTg~t->1iyd}a0Sj0@WFP$2iq!&Plh4A4Ek9aO-hq7Dg9(Da_<=RgL`=Lxf2G| zPh!1^$%od=Sr~14KD`D5Y{x8>47zdQ#4Jn%xDN@Q#pE%XKeX@=#9!7S1M3p;y&h{U zN+xq96?K`p*5as@E>1FZ^bEqPsJ;G!tcvz0a$8XleTz3_ZF`f7y8?&WC|vNuiO@oX z_hiaIL8&J~vnlXnWJ56p`J4z9QQ)gGaFfjTDhiCsK!NBJA>1JKX3wJt#M9RHE)`4P zGlWcay}7fk(~4^*t@Jq?TU%2Gnp~S$fjo)Hi1Rg?Y8FQVg??)bmObFZI`v{-OP&iX*XIo#6pGE;+I237_+g> zg3mHryFM~`PEh*%;~2)ndyd?MaRCEhoc=00GHr z`X^k%9yNS_G6$e{tpnGRY)XOaNK^xV5he7@Bv}HtYrufg+72n=>q}YBiA?HBa@w|9 zX}113>Q4*<_Q4fqATKC_J8YvAe2QjZS;t&90-pyL{F?i;KQHam#wwK(r^kbzN+Y;r{=Dp}wt`jDL}x z4ejA?N2|L2wiOo`*3p?-gR}7%kvSNUzM z{4VCtff#l+V(C}18-J|Fpw=kq#2@QHtVv2>m~lFM_y~=sbTOD}ZyOqK3;JRGL;yOh z7+ep5(N|GF*BXrQ8B}Y=%eLYd3>z+`q8QYHD>%?*YugBFU}5aJw%c;BAAH|2x)Z2v zPuq5D+Y8qGt=5j)V%)e1XxvF^+!t@k?czlHq9>t}JUQZ~V->i33HYYA9kt~H_tdJ%ceA#w=-p@El)ru zJf1v&CSmpS&$*a=!YZwPXnk^PO_g=s)5O^dMsZ#r+_|OwrmA(FXV;xH8}9ihqUDZD z9$14%UFKSA#!``oO-U{J z-S*-w;TMkl9{DEd!`6|R19{Ze{(TU@G2J!-fp;R)V(@ z!vTV`lBhEldsj271F(BH`t9oNMcMQY)F`{%GP7{dNNB`-Kdbq4~zz!Wv?b-J3|YRIQH`JDA? zv#;LWG*oGJdc68r{(!I9A3`jO=517sdG&Z%>h=c5sm;!nYF>T*P-W;)BvGfR4Ngy! zo8YQ@xf7YGceuUvs;@x}xmSiz9WEm{{>IYrjg8GsCVnk&p}??rsn@r{t1Nfdg;2LC z0e8?HSniI$JM4~t+ljW+IXqr~LJ|)%uPe39yzJT=fuGiy-|c?o8HBe=e3mpc3@!n> z*T}d}D+<(1u{CSpVv6FoAnM;0Wj&rJ;m(#KF7yRZ>A~Stb1WOrV|XH7?2+iTH~#&` zA54Ou^6P|q2L5Lp{EN_DqcmpakY0W#N*i7y9m-jCRraz9qYvJ0H`h|X} zPZ4^qzpXQUm#&*a4vMLnE1q66bH&cN;-Ov{t{hj35$ramJ45>Ba~sE??Bs86UL zs1FG5globx;g;}8I3&Cgt_VMb6T&~?obXJzC43SN32%fet$#JT(E3&DPekFL)^3fC zG&&JjP5<}gd43td=(+x-wkH)IsrX98M=HKj@eyxdhWHJTzk%|1_RN=GeEjA!YtlU_ zJ=KBmLpUV-5Dp1HghP#vHTu=)R-;#qPc;6}_(J0ct$&ae|FnKceb>TMpQY1#V+Y~> zYWaHZlNr4+6ZQ+nmh{G!mG;ImXZ6P3fTi5maf8cxV`XqRl=sFq%$CG)Yo*IknS0{H{iNpM)f*e11!eBj=LD? z^5Lv-Cpvp$k9YOPj>Fx*xi|LUmfl!5+>A(X>;RN|4Su;d_A5NUzfI)-Iq=SP`O;rSE9e;RH$@+J2U z!rsIC!kxf5+<4?OA8sw&@8R~rorZf8uxylB40k2sliT=wZ_JLmoy#3X92e5G!~F?) z{P7uBgX|K|tp|EzAHuoue%UL?8?Fa_+iShC+ogLQ&x_y&$tU^G=P0h;`S4CoOGo)@ z{&Y`$dc9wZpZPlQ1gE|K4*n0|E`B2swn*pK;?Z-8&Uq2%$8gpNc&Qci>0Dy$b(#HlB(tgrl&#{^eAx70)N}JPkJk{%d|F+)wc&SFdvf z+XgrFUr)te`Q@ot@W83qI=KG{_ZZwZxHsWWz+Ljnsn}I;Cb;QvcDO}w&2X#XI^g~t z?g_XE+PvJc0pCSW7gbxm9-3Vc*D(3EJZw_DQe|ByG3<2VSQ5HqEe8@+2py)T|ZPA zA8ZMZuXiu^)VYJ?54oGid$G*oKY3v#uw0#?7OK}=qZ-5Bpr_I6uFpr%TZ4g*c>wI2X^Z_(QVAJ6b zc)TH*y;z8vOeU2Uh>i zBb2WiQ)1=k=W)N;%k2(}tzveiI(@oYsD!X?^#x2byskS67g3PJG%}iuY|aHY-*BD? z6N=6ifpOd5KX)z-Sc&Uu;qy}!xgB$>9P=tI)eC5yZCvhgugDj4=m|QSouPndrO_!v z>YQGzmHlpK$mkrWy84E?#;N+a&EY1>LPj3L^+c|rfYTfF`-1NJq?~=>5IWL_z(n4N zO}*`D4Ew^tcXw*O~|Cc!Z;0DQTDT%!9!nA1_q0$rvVV%X4gam56Q?*y&l@<2R-%<$45sez(KrtXpc#R~vi*b=DGRXtvF+dVL|))9e={2gOv2J2Y1GeWETP zIem#}0_tR5Bm;}M8X~Z0kw{A%rg6>Eaq2Q1i)q2GLT0k%x(3=6@g^Eprqt_T+M9q? zDoYtA$|iCZ?-RH#M42L|1YEo);3Dq?T)ZdXqQT-Fgw$WJ-K6!JGvIavp*?gkOQ!IB zuntp5z*v_UIU57eC4#Yeh;D2^LmV{a1!<;J$Y+`n4Ai01xrtg85&*LRh`OWu<0&LS zOHt>?+@S=qR+-a^8J*65*%rFynfS!=18Q=^pC)G9UAvy>Gs3^aO` zF?iUkYO$fy^Lga&TFoQA#O9D(oGq1YCksMJ5;l`esrGdI`CC z7}I1GA;Tne*hEoWAu8VEMPGZMCMOb6AwId>fpKah2AI4;A{~7kNi{N-fRTeYHrGqmY>#dn2OS4+qZ@so0sDY#kZPoc3HNo*|4A>%3P9ctdB zCP;9Jzl&tIV2nqMd=*k8A??P32@UFa`J~#cfVRCKz)%dxOmY&_kQ)0Ggd!%R4VX|G z93fxG+2nxN;qyDf!8@qc7{=lS4OJTaO1+0WN}a9#6RNIy-76q`EJdtPAncw>LG)hl z2_VQ;jer)~)>xr52HbA&rl)a9$kh~fD=U_GLTkrUyeU*x#70Hh7w_D>0Ry zb-pId4=6u4RpkK<{(!sA7XZA;>2fzs9}DCIH^KZ>3bs1#$Yb<1)h|+4snb@@$OHfD zLV;O5h~hLN&e*9cc02qY@6!KrZBV8%Yi?ytE&5eg8wEufmAnG8TLH%crmPlJQxA$X z)%vkhgJzg~_*+0Pm~JJ1(W{im1$)$ooBa+XQ+)oA3A5)4XP~}7d#6b?9uN#U1E6>H zmN^q0g<|m32boiYCiRUq$SbL7yRhVdlNH8zGDcoF7kz%G>o{p?2P>Bkgi z;U(Z>cL0>;YzjhrxJDhTF*6A()ZcxQEOob-!0%zV>UVkqk|9$@1~CF*U}*3GpKh-^ z;A|?u7}4k@0he=?QLfDkUMmL!+6>dASx{LMIM5ShImM|$cnEnQtWX>mwz`&zr51MF zu*KF-C1vh3#i4QL8J4Kz}CEpGoxR2KqA@e>{^F z53=xJ+Ao_|rC7?!$e9((tXWFUyi&zJ&!&`B+^W>vQcbQ_vCW+gfSsJ9dhTsCioM)Y zNdrCPUZuLcs@zg5AC6MXtT{?eg>9bjRoiPEm2<1@isli)D8XSzqPg>G9oWDuQ)*^e zY~@N#d95<5Y5_3;WmsdiV8Xn$qWm_cs=V4hueMyNt+rIwRL!j^S7w*jin?eXk#kvv zWi~2rnT`Bw<~S-UZ_>TpUS2&{shN9IEr9dm9$C}5D61NO#48q(r4`C7o5fy5)w$JS zE)&n9GNo$nTpPXf6Yn?KEHze#rM9+GdT+W(7bfz%t)dbew!kHM=2f!SZqZhH#IsC)_xl(EJH5&2Z&RzeplAL2H{T=UBFacv9 zm>_c18TSc^9Iru7Q#k$-XbwUm3^a2J>5l{FU$+1SFIQSj3ulPR<)bnSXS67^!W4xF zITYE-6$^#Nmn-zr=ngpoz7;{eKVCw;$H~p0C)T~er5HdOC}twyE_c93Awge52vN9g znm?WoEjcJsV(EbwaKFn#p><8pWyjHwxBNIV93Yl)_nTVaV_KSLY#mv7<6FR zLUURiO?Z)M`-IC1P!@929=b0KR?ZOQWMNvMXv1rKFjRRgMR*V@98aAG1*y>J_yR3F zuF-^xX1if%01*k+12RXqns-A%QG;qP#Q?|)J#j_XVvI+yGaUTJ(hDhtQFV<0CUjxWLz>7h1(MxuYUNgX6MVKjJ0{7o+HOsZHh z2*f2KOG#0u52=KaN)4~391vMj^XU_jilc}SaS9WQR8fq0k8A88%V3qOX&){i3i33Y zW+ak{u14?abXlTYij&ZS=)hVjB7)SC=BZvO8+l!32uq2#qNKM8x;N-hNy`MJv(J1^ z!lnv9vwCgtYml~3(59$Pb-LB-NrmeG6JR+BrEeN7Dg;6=w8m3ZzgTuVpdEICv7!5J zad`ctgkeLVgeo0bYI?u=Ak?*JZVCw#s{2JU$A{3y@%wfUhUp~YKy*nNoFJhM6|76W z4iz>PMv@@MNzrf+aGXlTDqa_NrBo72?JNTO2~D396?q|r#cLjCNZxd~2~t^dJ-8@M zm7G$WBp8*P;EBXO1`R7(xlie#qiX~-6JDO6{IMW$eNXf8CF^RJ`Y2(VaD zB*10?HcN1kV6J%TA^|Rv;0Y<<2?9*&S4O6nm*hsKhYPCJd8$=-o_ZCYr(%VvH6v6) z7;*-j%|X))PiP4@NT54)pF@(czKjqsz#-HbE%WtS(Ij5}O1 z4`WXU0tO}L#k)K)mO|50=k|vjP=z?m#&jcYaxBqsE+%PQNRGuF$C3Kwb6rvW!YTQ= zM+!l(5^EA!5^-i7;>ua2(!Ll3k8+A!D`#YTckmX53Ny>)UdP9-c(A0M#X zV)#UxsohL#XeRj?YwVM#(PJhtydu)L5j*0Z+WU17Z}CY zL)hVM2JQojLplS|3q?x3QV%x*(S|$>SUEySGx@3m^!j`8`=N9RxwB=j__>#+sPj^Wr z#6y$v)p844KETO2jmfaDDjf@vrW9ws0ZL#fN&EiwL(y6U zO_We?P9g2y4Ayj#_YUa6vE&t03;7?)O{x4OKPxt&=p3tQCTXRG)xX=V)MLr%hJfvZ zZaZDpeUdYZF%rqvH9%1E>vt=m$gp5<^fpU%Wku!eDeCxhTOW`H%+1E=&{T((;BARB zsJfurg*3``;JhfrdJMWamDI{mAHZFt7knk|TUw$4o0;FI0G zqAZ5%gi~Q9(h7GFt^@`yo1~G;Vi>w~!l^KlX@xroR|4bHO>k96N47xpnG!$TZn#3E z+X#0It_tZMh08`7KiqD(LZsUWcMLA|r`f3@8$}kcAc;azHCtgyLooUgjDC8UvKj#t zfsA*Mk&1`r&IKz$Jn^fVdHUhwd+n|&!cQv;}ZC!K4ID+K@ROmKprR%;DHMicyyE8DZAHaR(5*RXm5yz; zP+VDqHF3b}Y^t7PzEMP=?K26jHk)~?Ko}5tJmnK|9fid_iqU$fYDrZ?b2hK2R!Z^+Ap|IvjN?SDMY{B9}AhsdEHju;Xp4sH+K?&2>whFklKeJ)vMlU3rZY zf6B{iERL!-U{7xX*^M{ZzQQaCLBt z;g-M!;nu*l!!^MWpWKu1{}S#Ga8bBDaPPpKf*TI_ShySE=D;n6n*w(OTru2qI15}k z9Q~-x8gI~3bN$&Nl6}8+UPC+MfFk+a2GNR%iKw8??^niPiO}5agw+@}yXuw(3wZW4 zl44E2Z$$K_9iq}vpy<8i{i&tP9d*vSB_MTZOyb%Swpqy_ z2T$ylb>gFH(f~o=11e`drbI+d3oa6?v`r_BDzD9}mu=R! zPmAWk?jhNnplQG=VNyUuMJ?h{s;{t2DRvp54R7#;VIYAzB^oSh6xZ8@gM<%fQl(WQ zDTHYxB8eH@(SjZFAoeCHOQ_KU6r~BOzHw?82z9F1dQ=hGFri3SqR7r~3lz_L<5eycP))m7Egks+6 zR@f#M+jf{+3hJEL96_fExCq23at92VpMa)wbm2 z@mMkhOOv-u`8Dzu73ZV0Y4ZO1^%i-vbxvyDu-3YDJ`hVLCgg>lgrAelI-ld#QhP-L z;JCH2l3y#!=fcz)@vH5Qn_w3YFBI6b_*n(x=UX}GHcO4&QBz?rqwrGfl+zPN_)vnW z%B?kAIy8s0wI}Vgf}#2;Q-rN4ma~CI>YpV}Z=3}%9gj(wp0%^YbavS3ZKT{~Lah=XJri~y&0#OwX$ZkmP<1lZ zGb1O+J)Va%7z`7Isy7yv$~lP!NgS5;sOoGs{_l$ZhZ zB>(iu8<#m#0|R6k;BfoFjfe^BCawAn4I-+psD^-h8Fr&F1&O$Fw&!3T$SD}6PSpMe z-WZJ60GzH3){I*rHz7K3$a4Z~7%<@lVVZ*x4Ac|ARHBI@#-Q7+E(wMF!71a%V{}~- zb`@Z9!l8C|pwa2|Egv739>@E`O-|P51;YoM-^soDgJ|g*^eV8PSH6kyYdM2|o?| z7)L^G%;nb-;;vPVK1ddtNhb?QussyB17@hWG{nLOs0B*}zeX#pnr={`VC|bpKT*FI z;;4RzEZ-F*)7VI1MXFJRac7IL397LP)udfYveYDnbF(iPYHCqw!%%u}A%n1l->mmz zFUE%nR3Dp+#yuc7 zr6h)(TAzp_EF~h@vPqj1x|;%*USQNAD{$q;QpG%>RGCm%s@yQCRKb|56o6Z?%nOG+ zO$AF}p#)(@JL$nM)6|MeVG>5KnEAw6AqA2k)*ZsWo%V$Uw~-w=LHeB}c#43HiHUYx zRC$WY=xm@h75a6#2ghd6ZxoGY1jH0(!1+9Ht_N#X{I5DO>AWq`6m4YENu7j8|1+yc>8E7QJbs^X& zf|LkrZ0js!QCb6I0Hu`~Ek1EQ2-pXvJCLPV2oH*h7?#>NHU?q2DO4adD-2C}HO(M{ zQabac9rPmG!1Bs6$E>P(Vx*2=ArdzN5|%! zQvgk>cjQgHD?Fb5EP_rQrtPp{hFLOn&+^C{`2|T}l$cDG(e_fhSiBZVHn&%8jPe_{V$mL})^PEHv652KATG;^ z&#zO|pmRAjMij@@#U2TIW>Q$s1406X&ZTbY6xHGIszw-#!#WJjChITaeVU%3<3ZC?9^?f<|ElaVvLR8AZ_{7dJWOSE-r3s1`1`dHV$`>2Un3MQ&T9&qOh4H zEoQKX$kxET%%Y*EP~R{xW|Jn_>ae1%x&&Idr%AZc#gH_@T7%#cUl@}dkuiueFKmi0 ztZ}|2jFnxTuh9#{XvZ!Dr;u>zL)EeQw?Z>sX01_PQ&U*I4lD7du)@<}!W<|uKsGz; z!7y%PLBTlHZYjIfaa)DWR#{$NmTWx~mzev+ueMZ{&9x`TPSZa0an+9q#A60&UY)HSFhM#d#|j_S8^+S{TAFdAypRgxam*N< zL0fEByb`BfMal^xrQ`Z|NKqmecttc4FEziI9#UpRIZe`NxB97MoIDO(e~0u|*71z^9lR5J*y>BGu$6;~5KxLYr?gc~KZv0Cije zpWn1pG1VbC=_6@=g%@jo`~{Djh#yT@@zX0tMiZ^ySz(+MzzceW2wBhsT{B*Q1CyVg z*pZ6O|$gK-%_P`1r zYeJ;Zyr!mr{*BK2pm`0ACa=2@GyZZU=X>*HsI0vUuP!o5*RZhE?WT&?Ey1z{b#I1W zOid=j4wURpY7wc%^@h6Q7c!ShOa=v(X<08ll%gF5X7MtydowpQA~DAT2%5bi7oaT{RCD>0};M zuPutlsmaN4Fpc-RG?^F&sIa<#uo};n)Gc%`p;}#TDYKUsGz*!TPF|4$j5bT~pG`<; zil;JJsVFrzL#ZW5PE`hy8xtAF#RYW*CjF$EV`@Lu{a%_bvrl#fJP+^2ZWD5bBGRA5MmsxUIULTr-%?WfI={WGD3rp~j2ZBu| z$ek|SP7&ub;zz8dL86H;@NfW1|CQMG0CWisjXHxXoQ-(9)A~pJ^-^4ez~cf@Ffkd z@uM|?#xR{G)Tx#eN0C|BbMwwDmLgED^o^P_j+3T{cr>BP1o7$y5?)2oEeS)F#Kod_ z2>uK%g0#<^8pMq*s%lz7j0~jurDm2NSV=%PMv6{`G-J~TeagBq>IY8fp@jNUDJ8Zt zP?J8D;RO1~Vnj=yfQ#a7X|X=T;HCyhyQw-?I9o*dbayG$Kx#d>;_>oDAdM+B6ys(d zFa!@}mL}|=f#GQbnY711LFzEdIg#FCMUZzNF-p*O9ZqyY(}QVorjaa|Z8ZiiMkEg+d1&vSz(O!%X(mPjHFMfjbo)K?`o`P+rd_8pl?Vrl4Ab)gz&bq#u4!)#}RG7^v}T zlv=PUSZj%mYr(k!O0+UfEfl>PQY$GJDLJW?Tw!tIpH5e4;ixImMtzu!d3f>&Lr5UP zNtDG$chPiz9QhhYp0@+#<}TeC#^WTtSYBLf9TfR2_REeSgRa7f=N?T)H8R_(6L? zA^gB+T<9IAF2r>P3t^$Z5SJ1x91FKl(zvLIkObA6=<*3wD%5;J8K-5;dUGa#2BLLV zqz7QQrNalEJnbi2k~yi~9$2twga!Syb2%!f>vr1SPoM#cN38<1V#nytWY)L+6p}pZ zF+V4dK_CDhjtvTxPC&?2QZf4!kcK^(OoHWG;R3TBV5SeV^)%+C4~*>+md=!lt=ye1fYV^9)j-vnjXi5#`X zju<~!70ZK1f;z-GzN8FA4la>HVyQrR@SD(z(r6_chX?UCb!=il&_*5FVCA8VJ(5To z1HLe>)`C9TjctXv0thA}boI7stSz@!jTtkB$F|AL%{H6UZowuLwoRZIjgO;RDYRS5 zrRZ04s4-qa@_}ZRA|z#NcAF;Yb56vCl74VkwHjG9tGxxajLnXzkC|d$apP;2YVr8NruIVmKXSpj$yG(c9$Mi=g3$r zSCcZws>|7g9U-}t?KgY5V{^G zlI%g@tdo|gmPN^>v?wc{nE=M;9G*u-`8tA1-wX^qNvm?oU1lk)|Fj6O(Baj+zV@7L zVSl~aM*)C*ld_FR_-grTY66LHCKn8Gs$FUl;yub^PzqVe*IL}?G-sSz8Ln$`hcSZJ z61Ck#)JD^wO_E5hBp+ek6bSNmlw3j6bc#}GBdPx)VDY5m=F~o>kxi@cI65sAe^ZMJ zq^P7xcvF69g{6ijkHk{0FFZZ2Ao!ESD`xm6&r)}jXNk`zK5syz ztdio`$-*E+YMN$Aq0rB%oH*fn^J1EI}_PETO^Ti5u`tjsgt!SqOf`&i^K8x0k-Y5A{J$d&P}L#6ZPYcf}OG4BBx3= zU^|`qPFe}z=BDe#%}t51P21S!h9@%9`$#sYZO&$BTX1*B6iDo0?1ht35L>*s$5v)f z{lW%BGzgOk4{S2R0lq~f&Z(e%><5#Dv}g#ar?tUME0e;o|AU1%R1Ar}Npx{Sb)_Xy zNGITuiSMg>B|o|r{D{KUyPqrC(JmQ7_v zR4UcFsmxz&(zu{RJv?(9)KZk6g>kCe};SuAI7!u z9Ha64q#;lG=-kk2oE!cPL;~eMr>6e7-0EM{Aob5PSpAERQUAO{)IVRW`e($$!0z8U zET20VaB0{l5*pzcNa;5Hj1MM6K;wH&kZe{^)!U(0iRiI=7QGbQl;e^LyvyhuUtsC$W38p|OKJ{AGsnnJ7%_srSbA zZ8w#hTl?Ofh`}fxWn2o7FDRi^mrKvZS5C%``|Y5{(IWz5aekWOG*BlG}0Z$S_7z&c)(O>fX%=Xpyyra1!e%(0gHf}fo;H6!W|(!4Ieoy`Vi>@=6?b^ z151Imzz}ddux1zRgAXAVeUAJB`ddPJ5wQ77$OGGewZIT?3ozp=s7y?!Ti&|kXU>h(*JkXm2J-b7CJ}?Kk6qpYz1Ny%U=?%aja0j^q_Y&?2 z=~?(lV-c_bSPCoww*4FOzz}c;(DOq`Zv*DEBR=?2Q9dvSSOi=QZ2mE%*8tmqo5}qr z#EW=fU<~~I3^`yUa0M`9FZ2WJfo*jC3(8Lb{((in(tXGWV9u|w6A+(v)VqN-od|a< z($fV!z$)N&y6#4L=o%P6CG__oJYW@Y1zqd_7hSvXZNUk+24(^M?hbt!T?5wvGotX#G++(T zI}!OXs6$T&HUo=+MV=153fKm00hUL1=qZ!n9)s|JZNOE)njszfc3?fQh1`JxAMC1$ z#dqm|jliYAHsC6tC$2-^2n+(Z19O0TfcZdi5afo!A20+g0G7w&dw)RxunxTum@&LV z_Z$qpBOnLN_v5>Iz|hDJeFv~+6w-YN{3YQVeZZOnJML%^Ct;C?9d zq;%+Iz~E%0kFJ5Oz{b=L9q*JAZNMC02v`909145VHL#wpry!odHXvRDD5?(c&np9(u127gC(=nH}6N8wv|K>u{a7g%~U!aE#%7VHl!orU}a2Js!sW?&7_ zl@7ZA{lKDZgaa%EmI8xw5g%X;u$B0E(02s%&xd`0LEtiAc@FZ0uJQdy*HqXQ=m*vx z1AV}%V^Q9KHNefl{NoVsBVm{05pQ4-umqTq*P%BMo`~|80r!*ey+B~b$;c;Q6>uFe z1Z)BpEyQ;gfx$}=?ljn~81@3@Uxs`K76FS1uR^%M`oF?1gjXZ~rz1Vrz@4xRb_0fh zyMQ$nu>TCCb2Yv%mO$@#@2+a3&>f3?MLp${rVB@e(JtYh7BRlmhV9_Xi(+^larcS7k$59!of36JX3(`Uoqv`&2m&@;VLZvgs%JAgr88!!Xd4a@wrOEJ+O2p@`J8tAwPimd9W9-2pBgH@jDUr0_GP$Kd=b6 zkodEqpRUh=eqbrEg09bneqeba^aHDa+krLbK|e5ONvFOSSbBb^?wODH7Io@Lz>Ev= z%|~GA#hrRKu;a^U|trUHVdBQ5p0Bb1J%Y?+LK?>Mp$mSii1I-v!Kn9D4Ggrw)36rS)BU zBjKxEdcui_*FU@T0$^|(+<`^FDqz))F1-!t`JzkDI0<&qyYzBk&d6@P8R!|)tp`sA zKdxIZ0hTW8*0%r~&*|0^79!m9yY(!fzoc7V1@x@z)^`C*pYPUFPC+=ky7ekx&ez>~ zD==esx1P2L@zA^VWkA2^(Hntnz=Tr~pCLVZ8L(}1kKP2#7~7+#=YyXB9#{jc1U3U} zfNj9dz!0zr=$Y7~w*vjZ5U>;&I1Tm(rUR>hEf*kO2leP_7ee3U9(@JSe^`%R5BGZD zUXsh`(X&oR`eyX#tAL)&9=#oMrE_}p{EHB;;}CD~rT8{yH?RiZ=iCUs4d3V#OCgVM zbC#U}y}(LfK5!xUsxx}@b->1R5fAWf!1Tq4KQIT_yrf5uyBPW}fIs4wLJnAeNsnF# zzNiH81Xfk{=rhg)56lNvt%f~-ZNP#{5TASDALw6;cmkV&!9O7$_rV^QB7QIT=q13S z<{rJS81Vr%1N|RE?kwaFa0ajum=6pAyWyVGg7jSu|GRMwECSZUy&TvCYy*aXRlg!V z1@H$fyaIBe9=#aXrNFhguIhr_aa{wf0yY9S0-J#iz&79xUvG_#66hV%tG59CV|(>I z=OTUMdiBby;BP{&zPJ!_z*1lha22o-SOaVZ7M~CIgM0PO!1|P4Jx~O@9oDNC0gI;g z>NzM!jleQs@JL)EzpH>Xzz}dRU1#*_NyxWGpcna+F}+uB!gUd_mGTwnMZSfA-Ehx8 z8hUWu2y8_DRn6?xm*Kh%SON5BK_2;3KdVj&)jj7y z4jBA1?0X^nZ1~8!%_9;fZfS${H^>x7fE0B+HF9p^Di%JmR zCBQ2YF0lM6qz_mFY=eB;GWf%_|L<`B3;bP;bmKastXFRa2CqZ@gU??MI|9pB^yLw$w%vnr0L)m8^euzjy+|LhZ4LAT8`mPgfI0U=@81#b14u8htr~jEaJ{}) zPrn*^fq`qG_n+W#UH^Ws9s=g?fPe5sAEG>6gZS)3c?M=Q!yeF64=jOu{>R84@(;{` zzl={%FMz>M5gyRL3+W_(z})NL@3UTgEwJ%(*a=wk1?;pO@n}K#kZb$8SN9`+p>I%6 z!25TjJ^+i_kUsEL-@zWZuK5Z1umb6G=z1wI=+gBqzzkqBun4#p*c`9xS=U2ugs!gw z<^!vN<-pBA|AD&hS&96dsOt-X<-j6fJ#YoEDpl8;fH_C#deRN>4@?7=19O4(3v|5- z7&=kcTY;XFbUg&j049{f{R~|X0z+r%`V63_K-cqt`RC|*CApu6_yWs|p#Mgs>mpq* z0QxW0^%}aq4Cw%RE=N3X!Zk1pSagN1F9ntY%Yfy;N^-v%@c>p~4ld(n_`{rB8IXz?=yYe2`x>9ir^Mkp3LP5wm@9vwi+qzQA(Vt=er)?L3Fi zFFDUizQE&$d5Fg(@;A#Dx7>NQkD^ZL zqr>snyj+?3L2KT7T!>MI4f5bd=`Dl}&W^(ROcxpv_(yza`vS9l!P&mFS-y<SPD0pp zkY5+h3my#VaFPd=2|u{S;ARn%?F%e}#fYbO^j2DU(vbsxJ9szA;~;J^xJGc4ZaOG0 ziotCGhi*#RvdHJj_CdH1m#g6B`A0}k)Rfx*JD0h_?W|36+8eseb&j3GTwj?>#%nLa zX;>H1V<-+dD6Sraxfz^MCMe&Nz%_zH7b^1z#fr+QcB|8Q4xOSPc*t)yjp~(cJLZFs=#QI*VK@ zxM~X*0=L$}d0l8DEL;FwB{(B4l!xiyD1IkUxG=++R5FnmN>3i#w!qEEbK(lYHGuPy zJPzVY!EFY&L^6t71;uENYH-QmCNq}{tOJ(?ZVS++K4$yM9Hw$|g3GQrkeDiy zE)?2^M??Cf6ltVnxl^l&LV2fn2H>{vDRbVYgDU_RggYH1mjiAYIDkBeTa1fR3s(%T z#KNrrR}7AlLkIb-0=E?0U6N7UMsT4pzbSBQ0Jj(1a`J3#OApeFh7dJawaN7E?1h-e zPb9ra>nU8XJxKdv@Z*SwH1br|_W<}3@Ura$e-85*;8%gCI6yw_eDKLQ%>%y2F<_tOYJDO*DN6E@-eN%~ZO&j<^r z8-8lu2*}=6zSgs zUCmg#ipDt|#L?TycYrg(r2I+%*96We7ky!JTGcQ|%X5S!R*rDel+K0lpSQg~9IB&5 z;BvvmQS#H;ciX2k+m~lbr#nK|pGRTsfG&YGH@mPX&3oD1NHf{lgTX*I z*745gy3m(qdpjTaTXtRuU4f?l^i!Bc;F7?Fhq(-#AKd>&-Zny)_kDAiyTEzCg@@S= z&IRs&Bb_tQp$9+c50mOi9=K$17vh}zBQ;juk9x8Se)Hk?x9SOnQ3tu&KNv2# z7=8?RW?;SckECNE{1(ISZ>59k*)qsALJqgY18kLzirwF=oKH6zrj~{LZ-##l){-~a z`p*mVUopUc0DijRzZCv&ApfY7=*H2k=JD-HnrwRm7;v^i<_(ifQJ)OeoovWd?CkG* zey8r78maD(t_tYd`_cbg7nPG;(3SsjNWX~U-q+USzRX+}PcvH<>4CT(1NT+%i|Hk! zy$ZkQv$VU;0=s568D0yZXAAWFR#^x_t{ienSpQ$vA0FQmU=vbKc$L;v0e~gqw=?B*Y4#m_cJEpO4CA+^GDNW|F@Z1RYkz?T&ivMo=uqZCH z50BhOb6ifO4CiqdCRkroppX$n9a24Q|Q-FPSWx%-LGHd@Cgm@YzPFmsjwtm0dPv7QrUT;jd zsCnMSkfptHG?yAV9POs?`zSTbi}nCs*1~`Ad$xlmo~H4k=64Jj;K53yqXjxjurKdc zvgL2;7#mSXvLE#fd-?9M)e$z%)DE`Okw@_;f{vvBg!G4gSI5+dIyOQ_Ira?hg1+BO zi}Nl!bI7vQ4jsuqV*bXyY-Rh3!|yYl|Fmx}ilwd@Bhep0S6n1rrgqHvroFBLnb+me zwfQH^AO46zurlQyS)ugTcY`R7Qw%r@_`*QIRU4PE&i zA^kl{-+;OiR<4~l+1h*tyk(6-djcKu9xQwuFiXt~@siN_VQq;qRU+>*%HY2c`%*Vk z9K!wE&O12Q+ogx%(Fi@epy!Xq!;^&hnXdm?JTedu>U6e1&wqR0Onz6vZwP+v?q|bp zX738KC7MgsXEeg^((eEHc#O(x0u~?|pz8~q??;}zW|t>4wxeMV?TcR66Vk`9E}B=T zFiOEM27gtga%`#-+92Diib_FYY=N$dUUL}4?*dPImT}7`({UWaCVnr=hx0SQdoc(o zhy0f;Z@tIhvV2OI{rk!?k1YJn2e%nql*txqjA4%Bnuu{+3mxUy6JB7|q1pz^oT&3h`*Nt# zOZ(*4Vn6wf_PQ)>xfT^6y;)Rt(*qb+U@!W$`=`qjNf+6v47!rBZ~ZK5pJmNUnr=2t zf~m};a<>IKvarY9>%>K0c|f@%3vp4Y=F!x+xdHhZxY{&RdCefO4#OaIvM%Kot< zm8mMou7xa?>*PUp-w3V>+<6r+e_s46%ea21Z@}kyIs^B+i zm^p46!O^o7rTZ5*Z>0REybeKEZbFCtF~u$1{^oJZ>ae(RYmNE>FB!?0Z-D>sK7{g> zo^xph|EPT&Ep3)|c!W6Sk*>ASm4jzTzQwsIj`n4wI#L-)g7-FxFLXt^=k`oc?%Vb+ z-V1*#-V33t7|*)=w14s57%ARlgKFqX#`7`*`Vo5@Y>uQW1-kY?R~vMFNAa@O&l`>Q zhk99*GyIY!qJA9Ep)bNY*?+mSP|iyQahU1wDV*+R$$@T`w9 z58vOnUlHE53m0r(0og*xK98}vO`gy9HJJByOwoLej@iB@{F?D=aV&s@w$BYV8*Db% zW0-Z`&!iOj#fhskAtJB-7OrL%d$@pAlZ5jnTwte{495^y5-#wrjlkLA0zKgu_uFSw z0~J5Aebpq`AI}ecigU7qv()FAi%mAvuGGP;1#SySI8uA}w7I=QwU%>S?eM!7&m%>V zUK~`$T*#z$aKE^45#AQ4GG?u_ZJK@D^Qeqbr@9W$H7#IWE1h$OL>&uwAYaDv)^RfL1-K-v?UeT^3COQv_)WlbTI;Ccw6nE2 zy+=8GrOs2aQ)#xZ9KQ<3EMFC{8owHp+*JjCIvcOhvm7}e$Mv{NqQnRW=u z>zR1o3~Q&h`DfXC*5$QRHB0V2qcKPs>AMim3F4d%ihCuvvWq%&M>H<_>bG(45VkI? zjrN$w#3*JMy=pI7hRUGP{fRx$n{j1_{yNSn?y_#qH*S}l&!J+j!t(?iGO^vcH%t#ELgDrl$4^2kB1$7ub$xO7~Cyoxi2Ofb_r9p1#Yl3?z-55rqAR%yaOoCq9k?_LN6%lU zSoCcN7X*jpZh6GvZwolmhbgZ%kgR24;Zqp`kl@_^HaMczr1a2cZ zV;o3v@*alqzJ&{bTW8_Y!I8djzd7L6TDZmFD1PB`#o#D@+#E_fuK-tJ;i|wh&S;Ig_p^l>x>q5tq7I6Ow^P2%~ zb5f^1Jj`z{xW)s`eha{D0T&%6R|2kKbf>;1OkX*;Ts%*o7AChATv4D?|5F%O3$6~& z+vkUITfm9&o%)fS8FblYdam zq3^j;-gcd|AAgUPGXL#gr8(T!{)B~{-?$kJ_Z5CVQ0G9`fijm!ooe^D+HoH4OP!w} zWfsZ5*hvn%o+JOB=kv_*#hoqxULgN2kTV4;6cyX0k|}~&OQvGJJf&0DW-A-_-9H(9 zUyIW;9y}q+uJ6nCwK%ovNp?;cdQ!Gizu1DX7GBb+zk>O1(^$d?tHBhO^Fcd(*}eut zU)qr<&$o2y&ku{Bugt7(wY|PFLth#6r9arI_YR7n4_n>Co-NakvmL9!7AU(kwrqmF z)|yWJZ!r<{nbVtZAH4=A*)9nK0e@Ymes!cViaDR0d+n9Uyr8%(g}$6uJN3ipz9xKJ zXXHh-$#xOqR_!ER_0TnATc>_08FHX5%bdk2_A$#-Hgg??cKU6+JA>K}J6+~`y0h$* z(H$MdD-XK1GF$Y2E%$SDx^G{K)BE`+DD?$cTIFOBE1#}g+ z;9VtF`TgA=KMh)V{c*P2865SSf+l@&(=k55dpmYS zsy`K`c?0LS_WCNwETpdh`jUU`)R$P(rpi2-GdzA;z_xzRf|$}*3w_z0o%$_)C?A-I z^L@4E^e(rLpDgMWKU&yY5#Obs6RCZ)=)3xN^yNX{I=r8xg~laz@hdYwE4#~HUl|qA zLj0*PS3&E>Z>-TS9`~{4$SpcYtA?9f|>;v3dfU)_UV`|{q;y~lX?Eh`J|n) zxtM;^p0b?|mAf#MMZ7IV(6+lzf0e1pH|CWEA5R4-MrLQ16_-Y zyY$78%7M8rI>#=1Xv9F(W-oLV|D{XM#yOR#x+pnCtJb#mO@|A)-L^d9$Q<+eVE`g z+b_@dLCjeYQ~ma4=xfG1i4Kj_wwmXmmPaV71(a6LEZon$)1@DYdqR8LnZ_pCwu9~S zq88&3(w7f?8}at%T;Klbn_`<6FbVVpp`wQL`MP!4v8cS|we^))mE(5|U>7mfyB&~? z8`iBicx}VBmeXtOQ&KJ4tH5mZ4|t!`_lMf*Li=Ja1D{1OatoJ%V(3dgpvuW^y|c}!(~v2C41|KQP%8xtWYYQD&>DRWT2R^lC3 zk=ojJQ(qLJ4Qkg&S1EKYzNK4li&S>abs@s|saB4~=v{B(_HO;(m>;((yOy!Ac2$CX zI^~*s92&)SHQjytVJvmjvQNml!cG!;WFc_nGfgw2U}=h1E`s|Dd9f^u5`w z&%w1#{#a~E`@wCxjk&(8Y&#yl$<~bsqr0(NKMD21CJf7(h%xJGcf;Lyy-gXVyCk-pD*)E))3j@Gvy=^A6Z zgZXtaaz9bRd?B*_OMi49lM8?O&E4vq4fEw3d|!B&U3s0R-T5^){jaNmKgz#y_}vV@ zpHcz__T2;4gkEvm6$Yhi2Xy6qj`lxN+hQKWCD;y`F*>6(1rWI{Uv}#c;=F$?(z3T| zxp_?;w{7y>2aDmi=%;QyoyvWm{pl%F^?I9ePSdM;q#Y{Yr(|!pK4JgrN~~>ngWzQT zLs#>sJ^$7Ff~YJdEWrG6uel5d!7T%a=|Opr{u$ufp#NlI`qHNo7pZ)a{u20W8H9K0 zkv|-`#S!J;x}ocCB9t!mY?XPXCVV*yD>Yf?K zBiWL41&_h}zPCqzdjHDQYTGgeJyfPjpert}M}LYca9>$B$`o(ieKxF2q5e}!lOSFT zza_BKIGmH;MgC9k(dSTHkZ1jK%zcm2%C@Nxas_Blu&gN$@>>dSnZcR-E-_}L zWqPV1x58+PkY49pIgKi9LGKE!Il4!`lC+|+bYkiiTf*njS^|}eU2yl{-HV0fZVCer zd1LD-y}t;}U$${hWit+g*Mu*7^gQx|gKVA*E)U#zBBaeR_scd9g35+_;F})(Tyj@s zx4)q~&1h&ihwt>~{k>V0N#rgANgLwP}bdQ4?ePROOrq9!)WR3yH# z?VBN$09CorRdYj+dWRIo;-?~D%IiY#wct;p@DL|BEk|BanMJA!O`Dxx%=G;NmP?5$lTylUv66!nq1?F8Do|J9ml;?HdW;U8K{(xO#B8 z_w~1TB)1)04!G2?Fj~MBf-}-acG(NA0Nf6eGuMl^jJl8or=$~Lhx>c<*@)dDj(=k<0e@i@$MhEQGA!dt-P+M@7-Dj;3~isw)N;oMryN7 z^T*om2<0(vv0cf)AWEhmVPyTQN2gm|bNVkdk}uooq&&2jkjtU;t9m`4u4)trST7W{;F$Xe}g%$b^P5OEJNrL_gRN`6K; zW?gd6Q?7a{z}ZDRo>V*F!Jsqg*B;BWVCHtoi6>velbY*GbK>O@;gq~rUkF`+jvgIL zPlnwfqs9eG`Zg!1QHWX^zNkta*cl(b75bfvg^^@*fQ)&-)Y`<~-kj-02mL+y4p+#2Av8|PTh=XW%j%g!~l@kZ^tF-pIs z=$tIaa#VLJp|8N#t9Ot-#8usc8~S*Bgs0&83LcM-$YxFOn;hG#$J>`Di~a4FL#Q57 znv)jN^B@D$O!mrv+l&#twqpV+OZjl$nAoeoO!2@$aa#&5e`IgueQ)M^y(L0Xm-7_$ z(6#u$Uj0z}bXw;;qJK-53ms&^%jkEh6YH~g-x$EEYpU(AaJDb60N(SUYs+isgZ8ie z)+5F17`yfx`m^z?Ll_lXdh{audqvZLrBfTQyg0{)LPq(~4L$X5_2@_2r`u9iw1928 zk5zRwXA$yuOs{^6eSTPVT@)eRvffuhR~Fu>{m%aBdgQlswLsUF@xA)rBISp9j(Cx6 zdxRH6q`_V(r=mXK9o|<^y!ytuG!jtvv)D!$z&(I;@xTw&g97-;p46)^p!N_4wHKv` zQ|;z%y%BM89X@V<%G^~E;+2;Nee^E;v{$BvV{tgXHQ&rQ5!o0+q)w_DAtCL!KQ zuXpS8?oLu%U`)P!*Ol^bh)&Si4 zXdl{6i{$vEo!SwOA9F0vqr-ANU>gqY#xm8PD(mmre>?i-BTc6~CzL*I5D$KUrY`Z>`gSBnj=}5Ahv~dl>YUt?J7g2qAGhkm9 zEn&4sXcP<^cSC2cqerK;F4qQIodecRKee?nJq)DELf+}P|BLF8>u;_*Bj~iOS6^bg z=t-;6O6NN0totYK;VJEZ4jS#wz;QShcKZhe^2R>RdAFIOITUKgLeN=u2)^Nfb61RN zFXRLxo*Yp(rDYDh%fvX9ep1fB_%NkMKaa}KZk69wKN?=$VR)i&3g9OP-^!5Tyl(F& zJRDd|(04EPZtu~Lq4Yk0i559l)3Tgu+C!FyddQ@u_UMnngX?cfuVtMAb2;X9q*q7U zm*#e~Z&Z)Hi!uH?zDGY3Yn`r(luZWcthhDC)Hs|r%w8$vvY@j7Iz#XD$n_7`Zj3y` z#}>$p{u@ZVAIh|qopOrvA39u3n9m@)KNqe8<3Xyi#-r|chUK*!59VQJtiMEn!rhyq4{}*)FA6Hbb;T0KeQbG2T1BN6y{5wki89$BR_a55_qW zILQ=4rVTQfh7+zA4H-;Vv5bJSO33Uj>d}vng3MzP@}c$yQ$EZ~6{n;M=gFz!j8t(> zs+g-T@&U)E3cv}e;&l1~lROH(lm)^tNmp%1F(q>3};fB3mL zRg|P+gFyWaG3Xm_h{4lAH^eMXjXTZEz{^Rg0ujhd700HE3sOa)ij(b=ELg$5A?BD= zaklieQ2xawB8T#VU)7&5{=v6fUM3rSFxFgd`u571Z2-@R0Yb$G&#p-om!yglQ^nP( zVlfg}-k0KYQ^onx-T9|faUmrdX)5$p8;OR0MCbBUu{1UAWM2&>8d1f=V>iT{k{Xw9 zW{_wkH$2f$N*Qs6U0FesCKC2HPXf zG!8F^&gxq*AC&-|CqQT4y)Nn_0+s`azL$Y&F+1CRFTZi?fj&}m7p zT+HoH?{yBF^wPcvng7t0{7H}g0^;hL*_U3Kk9jKp3yo}4&7!hXH~ejczuDCPkLmM= zel@HOG>tj6<7~Tsw83Qe+;cEq{;Egcgm@YKqox0_uD6_KTP$YzXo_5{g}$Yjmp;S+ zeQ&}>1IwB9p2`^^AG4C6Zx8h4evA1yveOMppYcv^a`&Da=Kg21JH;mj?i(NL(MMCc z{{S;Y1LI>YSFhNn4^mmsw+#AbU~cgXoVyYco}~;>-^kA_y%5$na!pkI7=06{9&?Nj zU;)6jJ75>5`MwJC{Mpa8Hwc(AN4`-y6L3S4j`_j_O6MWAcDIa^wT%(dNuSaah0y2P zhxq^pkjBZDbW$tTpVVImlytbucx{9pnxA}@^n7ibR%^T}Ba{c)qA9weZx!b0Vo4tj zr267z%ro(OGu{mwM6mU<;D5$>n18_>r)+oc3HPtsto}r4c>hcP;Ks;6YFo;nqrL~< z;-Y@ezAd%xRiY>RZ8k@sb3r;N|8_v<4y{*TMCm#f@v{uc|A*j*e{O z2z7QA?8uZ8F zMQv5eg=jDFy+7Ihet)2y{jFs*V*3kqa@@Dh1ong*QUi9?UZ~k}e!^ z`0WB$j&JH=+DjfZUTO!o1Khufi4dmqGjla1f5D|l|Kk77Ul9HZ;LnBc0)9G3h1b7F zLVI_RF`9=HmGM=Otv?yxfD-ALC)=xRK-SC830U7tp3BS}{Xy1LK* zpRb?D^k0JU{DgtuSEG7E-ycmt{hLDL>}{A_8Q5-NuExBF_zm0n(L9K$aUXrlv%GLyZX^$dS*)dx9yvE)ZV%#=qkLVSHI6^ z(Smt~M(oqXt$xoQmu?1WJoLxHTJI(2EOHPQ84olr~+ddqj(jmWP z{93O*jf`Yx6U$t~s0j6x(ouRP=3MZ7-SGTa1hAZ|0b@(=LM zTbHCH*95Naqkg?vaB2lt3oei9bzggJw700#eG?GcROf&e7wsIp1+PMQ_@3`sRHp{$ zC=OFG7dsBne#)6NUJHj8(y3w;0jcjMu-jR&>c zd!Zxit6u$FTOFo&EU;Aq9aKkV{2A^4H`wz)=^0oaEd8`LD?-1TMR{2cT^o1z>ZjwJ zj&%Iif(wDWmxw-_TlyxvJv985ItbAn@K=CuBR|#e4W2LG)ZeBR=k>kF=OV-;a# zRE{rW`Jnl}tV?7EVlAmM{eQvS6!aK2A$7X$xKyAXU3tt~Uh#@!$PY1MhvTq4G2&_G z5g){e-Oj*!F=CD8`Dcu%ba`Hn5j$PkAI6A(xIM4Lh)3NqFG1ezei6(dH@?BPVbF^( z#8;lbe-$IPMkm$9h_x}^pJGH!jQ7pkL|LEaS}@#COlIIOwiQH1-b=Gh8A+G7atGne}{ zkNC><0?yY)x&P@A4@O~6X?s)xPInLT;$w_I5AtsD2z`*Z!6R<=V6k1rdy*hOFE~6~ zyy7*->S!T8jqyC~6<^295n{d93*$cRJyglg6K^Bjw>9^kc=5j0gSfsM<^C~V?1}i*%)Q00_belBygD!ES%lo`b+~t}@UMQ|di8s*Lzv1%i zixzLYHX+32QSQ!YQ66DqO}qT8lx zj}1%wcDlALA-Z+CrY9tRHC@{`+_Pi4wrj+M4KuZC6GuKdQ+wJUdsn9Rsvox;PmWBu z_GoSA$mr!qYx>Ceo=olQ17h#Y)P6bu^ADemO6bVcRwPAtWoora@xNwjl?TS&o~b=_ zAdGO!=!9P~wYt&KKW1vXN5}ssQ`BA{Q@w9wYOhT7LglupL(qv%9r4i&?Z>IXni<;aBjZuPjvVpnbZyI# z!H=hFdydS&midek?@rfV$_TzaUE6rn#7)z+x28>eV7m67X^$XARWsfHm?`d?DQkdm z{2gVZB~6yE!Fgj*=r5b2%8TxZS>YW0j#K=@Nes%L*?moe)A6kH#Kdh!Yeg$i7WX(3 zo;+GBbFRkuYAyQb>Doiu@SW4OCN1&h>Dn51^rmUrogNR2zkKiz}WiCAfT=_U_bqLfm!a=%1!(n~n@#J5Bri$l*f#eB|g4rfHQK!B39THfO{N zu`MIEVVd?y#)ucEXOnZiiQq0y;l>|E0#G9*m9V7%yGcx!^8)U z^cM~jZ#kp84iz6bQ>kf~@bjUf!4>n*DWcImWBXyEDJt&g!^ORW;@0APP-5d0G`zPV zZ}-I{J~Tz#;=K+kZySsOfSq#8*QTpHCAVLvNs%4NKZEMXVhb_wW?)!?5_CG*OihU6&^6 z5~3^9#Lo%QYtqDH!xLAhiC0Is8`8w`#KfD@#J!2Qy|~4n^nRLn#2@!&n)t#W`*WJO zdt~BmY2xvb7?(eIK+;=j;-v%P-bfRB4~X5CCjK#M-1}3+$D@w!KzK<-FCQjW9JtBx z+F{~_(RG>-Ka4HK`M2XJoxe;>DoYc$PjdfosJMI5mvH^{;OGwy71tgz>h(j#+Czq~ zJ5+QY;#qyDcsOOow}*<|DZ@WKRNOb&-!nx#JK5hgMZ7)PyAy#;_C7L2+?DEWN)zv; zdS6Zxw;bxdK25AXbPM(W`kJ8*kH;PB7?O=FL(xL(EjfqVdog_YVo`E{1jr?h%xX+ca{2)>0_C7IDta2~Hz{EZB zfr;pD6MhVerw4iOn)(Ia~%h-YFF-VTbp zz22?~;vZhnA@4}Mar~Ph2`>gkqtE-V31Wv2^rNvOKb;`%iA#7SC_3Z3A5Rb~hl1WU zbmTuLh_~Vs?g@&G!@P|XMEx+(wrFYqGdKDPij*u{X*4MY1S6 z5cJLiN4}LTULT$C>?E;%jCV`2czO)z^1#R^lEqJfgmo~|Snngr;@a_EnC+JFURY`M zcrT3d@OUq5@YHxOlKj$mFH-o{crOz4;dn3N|K)fuqWkahUc|0rycdzVKG}<~Z%_6j zsQZ$=(ELa;25K^YF5IHI-;NcVUG7b>;#JowWZT<=JdLs9!$DI822X=-jTOrWyYW<5 z`Cw$(*Mk$EiWQxM$83xhyJOs)v0_imw-`vg@AG!YiqC!C?_SiMcIH&oUuP?;u+`rlBRN!_>p7SS8mbm@O|eNpSlONx={{jo=jge+!3AN z-sN(P+~$(CfHV<5)DiUn#)2+~QbHVdv>!hN9l+T!s~lo!ft1r7BhZBi{6^fYi5Hy{ zTAV27N3&bHsE7A@#}GUq{;nhXc};xo7sIEgp2cpKv?YyZe+Y{V^XnqN|+ZGsp4L{}6}E?e-eh)7Mz8 zbw`|_k9|Dk7_;4h@iLa35XU!-~ozUtNpLvI1Wau6B z0=x{FQ125@4vB5@iI<1C*ZailL%a|8#JfY#54H>$hWW*PLnf^8i8Vgz`Pmjv{OWdhyM^v{uZnW4pjW<=s63F4QGyoJo^`nIcH)izvAW$!r+3SD^vE(d zD*AEFF`)uR?f19lAjcTGJ5C?^g~KtIO2BSM+^-Il7WWNK2U)C7SYI>6F>#9L0jJ}r zFCllY(~Z*yoRE0Z>8^8%=bY{>PRC{<`t?&8cnTf#Q-g-A@uJ!5+en8`jK}*iqgQH<$zP&?^rscJ94)2`F+Jf?hvNi{%wKT0 zU!j8Le%Il6Gu)I&V_zEn&)9u{4bkR|if(s0CVVh3&e^!(ANI7vaVp}B*uLoy|8%%N zayZ_##QA?s$G~!^JErb-1fKMW8=OgB4;HJm5s%}hSxdZOu&8#QS{9A6UyX?0qGbV8Ze2p1?=R9$9 zAhCC@XbKGPm@B>sB;GYoJTZ3or*p;YV-s8EidEx=zcg1oI4=IRxnlRY;dl#H_qh0N z+2XbF!|%)%AB~Uyd5(A@dH4-;#Ouk?c(qJ>a{Le3;<^d3Z)J;@Cd5B8SG+qR_U5_b zrit+{%n@rR#=bL0d^9nB)g1BN#00#iWy7TS@3O`7lVab@7JDbf*UuFzgRwWx6|V;4 zAIcUV1Y_TtBW^t?{?TkveNe&!+2Zqq;#;#t`$4g9WQ)fRj{jkfcb1k;pUf6J4j=wtws}#U$o4|sQ&pncp`D8p@_xuM@dpQHj^;})`U3y84sk6NS9$F@ z(SL!s0QU>B-5X2g{*bq=<$nL$J*wAPLS&72#Nk+Uu0Sv0#)EjTI^6F#99u1Q?q0dM{i+qtdn$!c8yTKd!^6H`_ zmG+#%>a_~vhG8s;15LX;THa86-dhTbS-xPnSuW6|+{->zSi$lQezRQlm#nW%VS^z* z$}HE=t=xrM@f9pzJK8MA(G-H?-FN-}vy)SAQ}MoFvBHZPFK4ty_s`5PW4xAeCF9ME z6^wT=-plv^<2uGi7&kK3G8*CjWs!_%Y*`j6X2`$|wp{I${_{FpgzR zVVuS|kMT6d^BAvSoW;0|aXI5HjH?+pFuuZQ&1dU%^HOt9QQS$nxO&0&P7XPZZ>c5#cuJ>XFhYsUO zkGS(xe6ib29)3pLG07vy?n!)Yr0$nq{ZgXZxcgLA!hRP_$I-#PlL+7*mku@ijn{Kw zx`__jc0h*_9@bh+2YbVwJ{6+TOJnw39tkXM1x)#)*tm8Rs!BVqC&_72`_AyBXIrZerZZxRdc) z#(j)YCvo_U6B(y6&SPA}xPiE%6APR4H;_c2CcJxqqrIFWHG<2=Sij7u1=VqD31H{*K7O^jO^ zcQSsiE%6APR4H;_c2Cc{#}O8IFWHG<2=Sij7u1=VqD31H{*K7 zO^jO^cQSs2%slR%vy2|P-$lcYSI;wc z{7_N)h8-U-s*fI6E;KPr7~`6L?PP}pVT@J(v^bD*=^c^g__oCL;Z6($inRLfl@qvwRtsw;;DGA<4&zvSU?!ui^*|fDL5xKPO({4B{+{8fIb*b8wY^EI3QSdosa`}jC{ zP1dD@_LNh;)NYSA@oV~cA%$Gf&`14tw-XB5oaStlq(4WG5 z@k3_$Y0S63tau~bW0-GgP&`jVi+tt_URV6}94>u-OUAQB@mDcV-(@Dg;TOfzMhQA@ zWj?J_U7yT+4f9#uil^t4>3Es>+Fo`2DDyj*FXp`g+nA?4xD;+#KpBjumj$-DsQ64) zg2qP^5|KY7Up-FA%kNVns1)Xn`Fd;}mPZcr1#B2fCmr-YKhk5&pBvweE@Qr!=gUX4 zp0&&y^W(;Mn(M(+{i@*lWpw{febkmvOV zL(j>~8|x7NVLcZb^1L1~ob@ba-dLY7^jyunu}%@q@^>+BtXHgI{r50$tY4(D{3Fbl z@jAwzm~UX-SjWg`{(a_+bq(Visy{Ga9aQB_eybB<55hzZ*}3*Y#mnz%f)6lXe3Rnk z_c6gwV?OPE#rLY_Se$I|>lH7*R|)y0hW@7%FTXtr{(3|H6~)VMNP@qg`C@E*p+kNT z5=hG`6wj7zikBa}1OFcL74Iruey0)qzYIO^DL#O5Mu&#@lAekW6)(S?2!1T{wVx{f z6_%gQd~u87e=75Nt%{f5D+Eu|dZa(FNAdEzgWxNf7u*3-`$Y#0cS*juOUcXc z0D^zpkf#^d;E><*1AcGFAFlWou3z+^AL+@Pqj>p!J;)!ze0#3q<@fNwFJQjmRK?$+ zxE|{?2^he8r!|_F2Px*+q)q%X(fm_!7m-@4P`zGxIHfSA4eAE<_je zX;&*gi}^8_sFU_#UVf_$Zkfzi-=O5>H`l-yFduh^;wJ@^qQ5eqMO*rC$ZwzlS2Lft zUGef;X5i}#J=md0hx{fP@LfaChl-cq7X!bC`LaREFu6P|7>rviiht8^#mnz~L4G3h z1qUcze%lND9OeU)6feKi1^!&-<4#q){N5J$Ynd-ws(AU`Eb!}@FQ&cKIOI33fLoZ) zyIb+{dsX0Dna{dc@$%bK;C1HH@FWWz?eeb>qcMR?=?Xlfc%!{Nn)!C-<@cT7X9@Gw z4N6{qiwXRlhWvYqpQEaQ*u;F+4#mswAVL0P@OIM0na^c=zc zMc;jpMg9u3D->?UTS}hZ7e|M2e^h6YkCVe)Jg#No$1~q<;iod6Jj)#a1>j}A9H+vi z_mk350G`Ir)jaM^Q?$62c@N)b8QRt{-)xb8gZV1v4f$5)i!AckJu2f9M;$8;ddD3d ziQon9a|;(pCz$#3SfsU`h zlilhp@%e@MP_{X}9(kLCa%G9;x@A+^nFrRPX2g`-(cu`{E4`jaD!cSp- zyM>?2ys*q$o(7)A6@?{gT*2LuC}H_Hi+nlrK?}c@`Gpp~mieU?ehc$e7QPugwZq0f z1zDU>(w1V%SdM>qUqOW|h4_Pody+TyB^dJcl8+U}K7=}!FUB(|cH^tRftTg~VRb_w z-$w&Kt0Ks+Hsq%$yUF)uK|a#_=LX0-#89!;Wm)-gsbLFE|<#VosZNjYg?| z;C%jj9M8NTRXdDfq&(WB{$T^w3p$v$t}l2;Q~Y7wVsm*O2R@$0QC&(ePpb)f6`s+C zaF7yCiol=8da8XYo|8D3o0+c-DJ|PsSvB*;lUP2W9A0GJ*jL!1vPXO*dG;Iz7ZupY z^B2gEL%IssZiVb;CK^miZ!Jv_;W&i(#o$T4xIhs)kE4oNzF~vnuV(qbFdukQ5zClg zW$3?65%h!-9c#hMA&t81`&(=1`MaW&HSoY8c#2P2oGQ<=*vHp~{w77lvHo`E#U+YA zgXOzfPgAVYBfDw19T!k`2y9h?yggN%4xYko`c=iGhxMN~mgSA{x$-7T!P}+l0Ym<2 zC3p?XKgE2Rd>T>otv7CCzWO;Ocp1xoD0#jJgXD24KE-T!zO4~cB_Afpr%$qc%gc%| zPu?QX@N!}8mh&$?0(k1F$t4<*k+VbBSl?A9m|&e&U}CyPE1G0 zV=43E9(8Tl;Y#MKx!o|z`PJa5{KQRAIhD%=rE;R;MY7^!nSXB*rEB0ZHmKy&5|sQG zEV2kZdG(w7itNuDpl;F<{hU96|}JS8lL>p0y2ekjIK z#=5_Yb!}q#ik}tH&iqy>&lh1Z90gU{xm?-pL3Yb!UTjpv6P%A{f~S11{z=&(kLAmy ze3)WBeS+o10;T^W<_po0QMhSL1X%y&k{_t&K9(1^Dz|kk|9AxXUY2jN)YG^W6>i)F z)luKeK?T55x{A+L#B;1ao#oSBSG*iX;xvc(>L?|6GV3`<@_Z2nWrqC!*WQ64D9qATf|i2uT1_+;eLNf$HHVx#{UrRf(OJaQfE0r>buHPJM^!u7IS%dy0xc zLzqNgC?rfW*kFhfgx4zg;5{@A`tbx28z)c%3M8mF_1UWxZ+}AoR?G3{FB*Q2v@@#1zh6~+=!bGIPvM7DeEn5|+x-4{ z;1KnR-?x}P0XwH%Bki;LK6&v#neldCV3C5G<{F2fieu9isJHzi=ikGlilzztl zI0#1iOV^RD?&szNKOB<-&rp2z@8#Wh%f*=lPW4@<5A5J)^U)Js$!GmjLTG$)1>=H+ z|DRI&!`Dd()h+Ja1f238%1Qn=DPO%p@!V?#|CVO4*MO zD)c5HcYa3kb+v<}v^cZ?_ywn5l!#}1Pe%TiY>{@beO|^tAJ_cXRNnsKljOyJkde=K zlz#o=Lbs;<_|JylE%>ck{>O|-J3HT$H=3S&h2rh)QX`9(c)_@oGuM&~&eME$D!z7) z0Q(fbS@Au(4-V=U|LTe#`k>tJRlKjb^Q%&x$1DCy#oIbA48Ke9Ttf&Af4|~;-YK}U zp>vPX-z@d|jOPC@jO$J4@}m<{-_oxLX!^z^;~_%*v!wKgVtQIx@irZAg5Qg@U-l~Q z#OPn9_|X{t2FCSeq09S#^Z1>Xdc92Th>Iq*za9``lk=mDhceJVXO!MKOA2WH;wygW zun^ifJplZIWH!!6fKz!&FP8F{yjq>2@+5z28Tdyt@Xu!8Ujcp@#@#NBH?w*6A2R5# zc%HP+>XjnTjsE$HmsBnp{}dEI`XS*@!*5|cS+Ac``qeK9rFOgXR>g;YUhfcpY`WMF( z@bBI}-g|m79{79uUgtGZU!BIBvwtkToEHP9{GB&S{^nP_U-9+l3jPeO^EWc`nS6od zbM)th{>PR6^}t!r{|5nVTzrV>l_+#MqV%?(m(PjD^S@R6h|c>PG|fLLKJ>HlUh@lG z{X%(fTj_1=j48fB1@lvMKH8%AYD`bK2{@1UzmxX7OWVJv^oL^fKdbnjn4bL0iXVyb z^T#y*Lot8Xy3((oCGVwgozm|yFOquMzUh+6pGy>P?~np+(Fr35oXS5<0ZaHnUF6>m zr62mR+~2Oar}XU@{fg4B>HKTs>y3tgR5Gyn=q}A?UF!w0&%bl0)A{gH#-W$KUFdAQ zUk#kwe~a|5eJA)g6yI>U;8#jA=Rx2$))eB)1sl`xZHz-dQTxTx-eUL_A~AMrW-Y@v zpT>xW!#Z~Wr*a;AmXzmGrGLgI=`Tn97$)b(fD^s*9qE@Fv>&%A{XvzJe4a4!swzID z^6kZ%|9;Jk@w!H_{ECbK6pOE8_$LrSEA76 zh77!#fxiMc)pz{{DX001-ciu@8ItmRSj+!$;6&d(Tj(tw@9&hp{dy_Phm`(|YbBqf zy6zA0$iJs5-qrxy%as0V#+4{^d9Bg61^m>Ra(!zC{YRAk;GYPciWTQDa4P@m6CTSC zo}=%*=sGFq>T86)Cl}``#n)dczz_Akql_z2=yI*mHwAoxKD$xzq07%=gyN*>x2X8q zy>f5kbid&rj^cM%KH5iBxM(jWYU;Sw#t(geRld*;fEo$D@|@ZNpU-8 zg}-BfG|y4oiLJj(1E+pzUoDJzp|;OUm3~94zCESibEVMR{P?(SQl6u#SD0RRjpEMl zNx^O3!3z{$J0!UA$+e0ftO;)8Xa+cyb6x$_rpLZP>Dy|5ee7aM_in{Es9j9^k?8kZ zis$Z=`(KrdbHC!#$`5BN{wIvkKB#^+ZW z{mUgEtM8=ZPON_mif>SUu<^UgaP41{d$%e+9n1gaM*nB>-ml5Uc{Olu=Le-E*K51| zveK_Ngz$W&|DfV)9}>XY;gi70pJV&2l-&7WOmBCC>s33_$Mr78vA(J20r59AkgTQn z^k*aw+y8Pqa4P@wh_t|8Yx%#V_|YFoy(;?ZOLj_sIe#vIvXOI@;)lL0_Ky}zyjJ=M2;m9)Q#Ip+Xy)5$}{bUv%} zM_w=VU(|=*i=|)I{#AhU6>kHl{Ewa^^!IA}Tsk9o?r)@j7nQyWoaX(ZI|ZNvfC@i3+Qy=3$+5yI2X5qvp={%11q zuWCNIn4b1s#n*pZ@_o7%fldieYv&z|gAeZzLTl%82L1b$euJ(rKU?$vWCr~gl>U(V z87@-#ZvvWI*<>U*y-{B)tDeY5o24xH+BNax4Az|K!A z{o0!axJB_dE54^Gz@*~uwS3giK;MR@-}$gpDDEX5&u|HF_P?pZXgv8$#oKR@^eb|4 z#uYF9jsR+gIxk{ei9(m2(jTS`X!!BBNpbx?1mgz8OFvcj5%5F&*YKr0cD|pr=fyuk zc=Fc(PVIJBD{69M-%BK)L*J72w|2Xo@nk-4RQl4p<(bv@-5K=vDgElph5k*NdgY!k?e-d}hSOGjUB~I$&yxJ^1y23wd{gde|2h4BmvQ+! z{QqBM;16m(cHZSx%AZeuspP*$&pk9bb2V@(=bFxs5aayYrg&~l%1_^2rr%3Fp+B-y za9hv1bx(T#-VU7dpMG7G{=9i9&(OV;E50El7w1yNHz^XAC%xql41U=WBoYRms2fr$Vn6XB9Zn zADt3<^Vi*?<9GGDLVxb*a^0)Ao!`mdmc*mm72or6p|kORCvcJvaXtU-O7E!u++_GY zj7uKj|Gy5L%6aGw(Q>XYr1RU8q2Q&-nX5AJRmL$NeMLs}^X1~)sW^SVn0_jzop&pK z^e3e}e|(la|D56n)h>RFz|NNyKl~HYA)iwGdx{VJh5*L@XD(?!eqVC9P}_eMIPuAm zR|^jFpMURAd~H+;g687ihc%z|H%mDmula0&{Y?3+s=)i8(%-1~^m_y_`MIF@9!JXm zS4w|7<9ZXikbO@1AC0Y>+^zU}4F8zo(;De9s^$DbM*e@9f&YW%Go<>4wdXmvYJLBY zl>a-@9M01fxAR~5TZpK~nBoV&A(b-wXp`b=+Q6p&%qc$I68fFm7Inqf|3U!sZ>%t` zM4`(MET8uZsNLY4xmU`6NEZg_Jbd~+Me#M|x6dg4Y{QjdRn2kkoS=M?zbVku_!@ef zG>plC|58t1V{IX|uV(RT@sX6N6*jQl?goZ4qq*-Fw^d`;zss+G=vQ~cl;q@w2Ec#Gmk zJ}Vi#R@>n{iqkio>35A>oR2Gh^i$D%e&6!Pm3}&moIh9G&T;1N7vuSZimxgCxtt1Z zfcH^5OzXh9N%6~oQ+pnHoaB72mh){)uSB8CCo}LbYW`~)m-SRl^L5RCkFH-IQ2aSC zOw#!9TE;OhRB^EJxI2UX14_THcG=@K|K9*k^*XHU8hSi|^P?TD?{`I>e@^LN0i4=r zgSOif6kk-nwezqg70$^~`f12N`fZep^ETi^`gizpuhO?aE~rl1&Sw>0(|oLdzoK|f z{q1*YX5UeKNbO~vHgTYXmR~czTInxfJX{j_`&q!LeM&EsH~zl%`-0-rrwRTf#aA`| zlFnb3X_l{5`aR=9Z{zXpJ;}cv^Pl_)aBAnn8i$7Oi}LS#jBBQ$%b9(lFFi&IqGH9l zjPVem{+U<$!>-VORPoK0&z$`FuWb<}2MdWcmPoQ<(exu+U#97w27! z>rLqLea+|KgmjeoE1nJkLivq-ptv(C_}%BrOI`w;%6W81@*mcA zxJ~i)zer6dG@lEhn9+L=>bn1omi1|hAH7KEZ&Lak#`+P$2w#IKi zpac8&6yNhIDW~a0SKX%V|0T)i^P0a4oa!}nhR|=)4r?>L5``|W%fQz(|3e><3_hXl za}V$fLh0?#F94_aPHWrM*cgLevJ-*!tb#1b6N3KUB5PcU>P{K^FK*Rc4)u< zlhSWc`Mg2jd+tw4IS+kK8U~-c;@_pfDWAg{$NqYyuNwU`&tl}mioaZO`_99;kCO*) zRebgPa(}ndzn^iv30?jNaC+~Nf0Xi@{`p1VY5jZtE2R8K)LyoE=W@jl>HKwzzGK?( zj|l)a=HCo(dT(w<_|y1gF9rg)r}`^Q?mYn9WQAtIm$O!-JZ+tDidvwH84pv+-_Hl0 zRwYeQxQ=(-ww;?cZu7iuf8Hy4l#$07d7+HF-Lt)Ilb%;=)ta?lBA1Z_GL}Hb6No#S zfbwn<$|s@Z`|>GyO(YXe#;aRtH5#oZH&&(7>-PE!3&Rzs>UaFbTDRwSyk5hr)LTuz z>v&$ZY}8RC?mur~B=0S_IP_L7%W$oW;&-||uTrkpwHS+j zz3=Vo*D8CtG*Y&14ImXxZv6x8&>g3shg@P5;`)uVUU5fpE&^Oz+_7;RRfJ#P^!Ix` zzu9edJYs19%hhW1&JEvIDxo`K=t06p9~BC+sn(W5cU{!Y58wN}&|Rgy61wZO+F{a0 zx$Vu@n&pnrP!CB#=|QF6?X?;soI34ExacNxR$uH+2PC?3CL} z9&uy8=X<@Sj$f|I6T=o-o&Dub)!ElCH+!|)e6Q2m@9i7)a$d(@0K+#ce&U&%`E2YM z>BhY5bd!;E*IjqxwITlHZeR8*7(~56dj$a4r|Kt)uT7$ooX(v#F%i|G%#{YK^kVsjlM@i#U6ax*<$xwF_PA4q@J z@!NhmqlEGdQna!+JuwT(?8nVkC!;2vM%O$3(__4uf>j$ z#3Dp!cFWt-W0bdAD`)gJJwa8vdoe*Rq!(*Ia+}|2rPFj<3q53$(aZMq7$u~cAU$iI zBeHwBT(8a7(o^Cq0F8`sTxm6zJvXC^_+dWdVY}6;XQt$bSt%FlAbjPSH zgtXTs)errZQR4yYiYj4xJ?p66?%l3i^((Ea@AZ2NlclYd!g#&a+6&<*~PuqoVe*c8x+TTt?PXyjJASaQdu7`@hBzv)%W zy>flWl;&cr(J!ZQaw0U}Y+IvsrweDo(wY^r`+Z%nUy!YX*?(yHNm5sB73mYiC z;ADn2P^j6w6XxicC*<^i^C?>P-NQ@;r z{;gnFm?F(;4V>Y39|8-O_&V$bVFb_2ve8()@ zmb4L=pGpkMk)*{uLaedP9p7Hq?#&cl?9FbT**LfP`dkfgDqfh_3opTKWE}6l?xqwH zcR5UQTmsG`Z?7-hv~gQJefGm`<7jLsIGe^GC0rMh@wQ~0lg%)i<2pz4O>G8uIg#J! zcvi)8wL!(>tc}gg2`E{xF_Mtm@?*k>Qa3Y)yG!M^uN4iCfLp}@U3M7F!*AkuYBe6(Q*57Lu=}N@(gKT#hgzlvCcHX44r;k#%Fr^QmJn6k<8FSw z+^tnG-a57BVzGkR#jPx1&g*r`wO+Sap&6_KnT1ha+G-PHEN5sMzCKw6#{+x=^d(|4L%qOeB4L6Etx^keHBcgByBYcLm5sHRjeFa@PQUrkO7`$dj*}2z54B1E!wNoV_W1B~Q&tJwe99=wCs!fd zd~%?=`J@zOHO0n*3d$S=lPvZvjxXcrQE}3QR79#Fm!YMW^G^J_u9Qq(VQak z4tky60m*39UnuwMJr70%mTJYFHP-RFey>o1|3ZtW3MZ(9T5JKd#11#Nh$VEn-<*dg zz?BSE9oWl=p5X@7XS9Nd#aj4lyS*yh>(Qcf`oa1py<^~93FIC58p!juC3xj>4Qrw! z>gH|EufPK$X(zpzc^EprU!3qguKsLqv|X-sisYzk6sV!RSyaE$bGyCb?1%?wwC;Di z{0e##pwOGpI%H%b_t{>(TWoA=jN}X38sqs<{68`^LI1n>FF!UuF*-Ri?vCNcC1f-| zH99puk)I-j=a|jfC48?iZGJf4elVK538Q2PaPaTXv%;WChd z^=fe5a}10)Iq0^bD+AFbBMfP9B{+vV1a1iuc5pvTAf5v2MldH*dnbsiFh=3mRN8%S zsf>V=Wm~OMV7Y4mttWUsdCQ}6TG+KwgNq$LAgKhcqq4+tN2P4F1?|`rCMaxiQ_C8Z zsRKdYC`-hJ{mf)bCq)>>u*J~9q&L}lFWQqUY0d|E{*xrJqz(>V>;+-Y< z#E{!YFQ4Z=m5V#(Yg!ZUIZQDG={XB#^uJ}!n zoy-XfjhU^D|7;a*mAs2)QPHiM3aNsHMx(^tBwWLin>t+DQXB7NS!f)SHspGtksKNr zZSFX`YDo@3p(51?stktFOgto}09*f+^5`ErsJ`q$C)-_ppM5Cy9R<868 z2rEz4T3w!8Nga#G9x5Fa)mBGQRiQb@g*LQ6XbTH%L@#;8olqO9w5Hv~mQf}Mt#Ct| zm*Z_((7tcKdu{ItOElgfSjj&7sc}Frw$A(GA za08vF$G=3&6bNV(<~D&6mMEbb1f29_hmnretx(e=yqB2w=3$sgA9Bq)9?T44;BX%g z3<{V)ULFyZ;kj+TU#nMX>5{?)5kK69%1H+4fwDJ`u-GPy(q3gLtcU5iW37!F8VpCV zDqed@AF;9Jp{aY`|jPg3f%_3IjHa|M9FcRv1%ogOgMxwS-gOdcbujHQuW&Y@u_UZy+WG!+myErJPBsHNSB4q+ z9Ui%o7=~Sn=h^EGmaS)IVXPNsw|d1JNLt6mHHU8N*nrq*;y#TnNb1-e88XF5ROtG< zS?V2k6!H5AqpBk6n}VteHHwZvD~87pnz>kmEh#E_EuyUB4kN*4vl^M0wF7W)jLGZ? z^;~C9$hbv#1$btOTRYOpB+*z{()&+#LSelO?6}yZh?mifS%t&xwE-W7YVNegdN_@x z*z8Wti#VPLV~2cSkl<6BP0v=NX)L=V~+u@P!`xE*se zOtMOkLb7INYcq4P&YZ$|K@BiV8tWq(k;3ILb;F6;tbn!QUux5IO<{si+spM{4PM6b zJc8uXGO)-(p8Q4DA?^+)>vF9kyJosAn3&Bfe7%i+6GI~{m&7VPjx^yt3?AG%(9WW1 zMCOG9nkkxGW=)M}RQJsU)ugiwn@Ao9vJUJ|J2&7fJJp#^jdetEM72xeKj@C4zy>bY zDBHo|Xwm`hOQub?*Rz?9o}}~K39uZSnJjiDRk4NF!;}%7D>~TZBXY(bc>6|p`hg~0 zp6}{Z*Yy0xd}w;H%`hS~2AUmS(CnL^h;{|Q@Eb$SIhX`twr~cv{q8Z`iJ2au0y2UJ z5aFC+bkv&g=G0(#bQb#*zM3vNHHe>B2Ai7-Gc{!}UFry?Sn>>E;*dF4_1m!1Xi}c= zd@oy-S_=ypYR+=E$vnLfBVr^nn8q$enJBjH2F(w=B!^1C2q0oLFOsGW4*k-chv) zF&MG=rQ7f5dL219BjZ`rV>VK_t6HQkh#O00nO1U=5#djK31<(@D1JyA-n$I5v9g54 z84l~Nv|u<3NAYF}*(n#;0L*>7-GzmEpxd03M+D}0)bTKk;13>v2qza|0Sf}I`D3V9 zW!h0_%+59m)k`$}Zj0ix%|VK| zlc*cE{+Rs0F5$%jrbwZMmg-LFk`D%TK$SYWGey#|RMRo4Cxv9t0* zIp%{Im)vNymJ(VN59TIp)%9rcA+KIO4h=Y-Z%-$$y}s_6p*a~Gj!oTMWV0Kaev8*N z5X*}c*gDcC2bA2A(cG|ofaYc)pTe@P0$C0-Xn>-foRyZMND`Jy73EN)&T#uu2=$G%y0WW*BTCD@w z73LS1y0AHe)nQsx;6$YBN6cKrd2$ob-c2q^_DcD`<0^lFqnf!q?zsASuvtQvmLL{V zqRk71iK-86thFM-ad~Fe^-6Hfb`jSboirt54H+PY&C*H|el=|@nARrux%G77WIv{M zEwdE1l*2HxF1D4re|vQ%%4`GOhWMlEYlBMI5j#b60CK1`T)JB3SgbXVt#EZY9~nHF zflco$GNhIN)0Pjdf{Hy3xrEp%UQHOsY69jta#KNZ$K8%unDnFG4h-1AlnQlykjXqv z%@Y(553yAz&lINkX0JHUW}B==hy>(_s)$!naOq~U%i<11xHsWk?Lrf)dXUrH!GobQ zVUAZ!OtqM~Z?jg{ucr>S|73$f4<;7FBNLXWz>82Mt2q9HoOh;z*vdq`yn@Iz)dAr_ zqA@K&X}XlEyTr-vga?q;HRKHg9e`ciJeFHnU4`6&HLNca&kFWXZYSN5w)(KEg}5#p zO$QH#MKs|_B8ZM6drH?XFGmf#Al!)s1e-JBC2xd$IPQc>S=)lmjc`(G0Mro_YMkBz zOB!oL9j^{um9;Tm!^3~$s)v!Ebg&Pc?ywT)(7N4#^C{+$TZ)OivYT8IjD7QjnnRmXx<_eUcNq)w<+>`T_M$F3^G&4E0`sB4j>7u*$DG%`ywc02oMpvM+Li)fB%igpIOiCyF zP_=m@fx=`o;83YH=^7mD5Mp;)wHS;xDzhFTFlb4ff+H=-CKE}sW-Fx^Y3YVWGv}tQ zEzymP1m*xdmmzb&ER@A|pV!MHLU;*+cd1YqDEZRH4KmY072-63Kl$I+gO#r3ZL&z_ z=;CrPcS!h$G?fjJQFYzI+>r;Bl?uXwmHWff!N3$un+?cW5e}*5)))UFsf>K`(12H2 zj)m<&CXb$R!eWf%@Iu%3V?Hq^htJ=5B%$We0A`}_SdmmD9a~|6jxGBGc~AT#vMPwP zHfHH|{SnEOix$aaaN-9E=Wu*Mc@`c49l1>o9pPfYfgTjz^X9g1#wq}B3edgzEzAgT zrh^u; zW#%&s#t``-1}3#}ki=9FPdrQ>u2J%((4r&<6hS+RXeDWpNLdUyn1Y}p*pVxPumd_i zW?`X@w?zfZ0tM(sA!)gU5|oUoBfFOO6mT!g`6D5g;E_2f7N0;w9>wU}IBm)SBr z$&QYiXk+_H6{ms4(j$-zZuzND)CA9(DxRr0dTNCT3W)}EM~SVK_YwV5m`PDG+&q^} ztw50yfp_)O;^(JG#7eo_i!z+NJbUwiRy$OyJ_T{oTgPZlT7(HQ zx`6PVBDN`bjd{_)dIcTK!CE3zt%%SW2!8IDu;EO7A`8oi3?Xj7&J*yTo*5HE z_B30!VBvh&6$0-Gb!hO`!Lle~#4;R^=wmE^kcHn}X;#3;w8JGNttEcYRA&khBQ;#q z5E_gKvNVf>SiXaEF2MM)gD=ASr(%0}NTkFfbhsgualI;VC zR|baV z?tEffDB5BCD!A{PT?j;Mp%-zJ$SzH<+s2wb@tCdq1`&mm_7_y% zu=F^=_Mx#I>oYh#*qg%6J!u52W6K}pi!p)0<-=N$u)f9{=W{qSrwV(G_xe}*9Xc6y zC1D$otaB%569Kxsiv0u#en;wfa6 zCJW9hHNRes&Jwn&EU{2%wlq&~;DMy$K?WYkf#_s+5(*SD)oU=-At%eR_i>@+l;!_# zx%QZrdKR-PUquc%2xj(VQ$C7SA{x|Yz|ux`W*OS1OJUoIm4+ZBo|PCJtsx;|7z^|8 zRhSB{jHwC^FE^06J_uV64@8`&Kw;~*=}1xceV)R7AsHX~%>3w-v*9k`h6pxfj}~DE zCZc^wEA2*w4JQlSGbd~xRrzm~jsSFxQxK|jrO;NWq9Y z+3;6^)X>EBX!F3wCoa>i;3qCGz?`AY2hC7mNa9d8I%Nj_G(LNYW||W@9td*x*nTS* z&U)_haXVDzx5S|e$Id_D?bg>N*ad;nwErPGB`{x;ubbV3s=?C!mE<7@@u(y^;)s(+ zKB2*%uSwgi6VZpcrY2MCC~+O4jNIpNWYSs4wCGu*6ekz=THLE$VbZxB!YZNLGR;5dj*4P2UQJB1ssDTCO+eE;sYk8E8Wk`6E zA*R`EOd5@%RggT=v+;P;*laV3Z>ue^>`6``Su&v{+kk6hbl~M;tSHhwgOyqi8O1A4 z!GA2gN2+4deJr)YDJpibkcokma0~KWQe;)CmctP;C#2`VV?ZYu2EhPndLja9PfgD1 zP9Vf|bXOYlIE0D0(eziZI7h+D_B}-eI?>k@=jdycLTEu>X2QcAEFowVSU_j%NqZ41 zcW}}M*4@LA2G|aXr5nkKNN@@u9ZutTH#T9hRlOOGdxprzmoP_$)q=`i@edyt2!5t5 zj}~8yJ$K**oT9@UqR3G(u}DL)*TNz;SDD_gmK{x=52rAGG@b_+mcL}T8k_TXSB~+m zA>vfxdmPW1ve`?&>*G*#Y06BV44M+YqNVx}`IPjSEDb_H`++7-JT%x(84)-MMKwdp zkQyeZ78GVu#(&5ZBArZ10t*l9HKM1Dq?u4)MeHn<4{>OGA=_IJGFX_f4R)xJeCMem zUwx&`w#uCvYag`~Xl+i))Urpk9jP{(!nUosX8dgidg0$xw(POqDS-&$zB7y9AG<1=ID|t4<83&u- z3D4*3q%uoH+K0{>v=7QA5IN|CnFysum=2V=f9sQPGh>KwY-n^KN|VFMMB8rN(J045 zw-P`PDSAi~H7PjMZD~pX-a{YvK@0_}xwOA$p|o5;lb6CXA2c?0=dgDZE4PWmwpxqf zfjyyJAl@)L7mLnLK4y?=WD#gP4R>sNeaNS^Ji6B0b_Sc6c+~~b`Zz-;xk>_i6Q8jb zZ!=DoXLW>lCCX!=kS=7znpvt|rW%(G7<|!AJCMvDZ2k7DIKmRg_(jJ4$xQ26>MTw9 zC*Zcj$HoD1v zYhf6Yy#d2;lZ6&xAGYxqol<$;e6*aF#Dg)~FekR{=TuY=S@VVIm%i5r=Qd$}j)kA{ zxR;qkzjZ9EKiCP75>~=uIChe0oSk5lg@lK#{RXC+IZZ@xXwW8J&0nDl6w4D2hb=_j zAB6jn_-T53?Ssupj zd(JSgjtt|^;YFNNF-%`TbB5VY7{>RR>5P|Qe+fQX`iSgO6`9x_UWK!2WXlIMAhNGH zGaQ7~DX&N#_BZ82Z%39~(sH9#LHbsYUJQg^gXK$S7;MDx#?Hg@`w?Vw8vdQG*}Yf) zu!{LOy0c^HpO5>; zZ{bO*UvRnG63hZpgm6BNcvoDkfcHMq~66z&h|E<6u43+;TY7qSF z`a(@$<+u0Pbr)#=HI)9XS8#I2u4|f}%1{5S{6gh8i}-``w)`fqlqYu0Jz4JU{g&S9 zzfaR|5;A9r8Wcaf9z0j>DLti$r+*nRD&k}DkAJ1;mG~?CvvL@=j6cHZhcvxi+x9|D zupxH6BbNRkH7I^|wR#FVD8FU(ME7JvT-V(AZjTGHF~ z>J>{VS8F#z?~SEjd!bO+wf;n-lxzI`p8-rmREMpz!I$(KZj|fs`tJ{s_WV%#t2O=A znqG1Nf&T2?u1A4W``P<*nm(uL1BTc1@&5l7aN>|4eM552-6Gc^?N}PS^t1O`K0gI4 z{Qgo)C`z7O-^-~6|5rG3D6Wq+xTUk}gR%72_cUEqu2&CU&3{=sgCB#! z6K3^qYx=gPfA7sIKs15X+tQzd^i= +#include +#include +#endif + +#include "gguf-util.h" +#include "gguf-llama.h" + +#include "ggml.h" +#ifdef GGML_USE_CUBLAS +#include "ggml-cuda.h" +#elif defined(GGML_USE_CLBLAST) +#include "ggml-opencl.h" +#endif + +#ifdef GGML_USE_METAL +#include "ggml-metal.h" +#endif +#ifdef GGML_USE_MPI +#include "ggml-mpi.h" +#endif +#ifdef GGML_USE_K_QUANTS +#ifndef QK_K +#ifdef GGML_QKK_64 +#define QK_K 64 +#else +#define QK_K 256 +#endif +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + +#define LLAMA_USE_SCRATCH +#define LLAMA_MAX_SCRATCH_BUFFERS 16 + +// available llama models +enum e_model { + MODEL_UNKNOWN, + MODEL_3B, + MODEL_7B, + MODEL_13B, + MODEL_30B, + MODEL_65B, + MODEL_70B, +}; + +static const size_t kB = 1024; +static const size_t MB = 1024*1024; + +// computed for n_ctx == 2048 +// TODO: dynamically determine these sizes +// needs modifications in ggml + +typedef void (*offload_func_t)(struct ggml_tensor * tensor); + +void llama_nop(struct ggml_tensor * tensor) { // don't offload by default + (void) tensor; +} + +// +// ggml helpers +// + +static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * graph, int n_threads) { + struct ggml_cplan plan = ggml_graph_plan(graph, n_threads); + + if (plan.work_size > 0) { + buf.resize(plan.work_size); + plan.work_data = buf.data(); + } + + ggml_graph_compute(graph, &plan); +} + +// +// memory sizes (calculated for n_batch == 512) +// + +static const std::map & MEM_REQ_SCRATCH0(int n_ctx) +{ + static std::map k_sizes = { + { MODEL_3B, ((size_t) n_ctx / 16ull + 92ull) * MB }, + { MODEL_7B, ((size_t) n_ctx / 16ull + 100ull) * MB }, + { MODEL_13B, ((size_t) n_ctx / 12ull + 120ull) * MB }, + { MODEL_30B, ((size_t) n_ctx / 9ull + 160ull) * MB }, + { MODEL_65B, ((size_t) n_ctx / 6ull + 256ull) * MB }, // guess + { MODEL_70B, ((size_t) n_ctx / 7ull + 164ull) * MB }, + }; + return k_sizes; +} + +static const std::map & MEM_REQ_SCRATCH1() +{ + static std::map k_sizes = { + { MODEL_3B, 128ull * MB }, + { MODEL_7B, 160ull * MB }, + { MODEL_13B, 192ull * MB }, + { MODEL_30B, 256ull * MB }, + { MODEL_65B, 384ull * MB }, // guess + { MODEL_70B, 304ull * MB }, + }; + return k_sizes; +} + +// used to store the compute graph tensors + non-scratch data +static const std::map & MEM_REQ_EVAL() +{ + static std::map k_sizes = { + { MODEL_3B, 8ull * MB }, + { MODEL_7B, 10ull * MB }, + { MODEL_13B, 12ull * MB }, + { MODEL_30B, 16ull * MB }, + { MODEL_65B, 24ull * MB }, // guess + { MODEL_70B, 24ull * MB }, + }; + return k_sizes; +} + +// amount of VRAM needed per batch size to hold temporary results +// the values for 3b and 65b are not derived from testing but instead chosen conservatively +static const std::map & VRAM_REQ_SCRATCH_BASE() +{ + static std::map k_sizes = { + { MODEL_3B, 512ull * kB }, + { MODEL_7B, 512ull * kB }, + { MODEL_13B, 640ull * kB }, + { MODEL_30B, 768ull * kB }, + { MODEL_65B, 1536ull * kB }, + { MODEL_70B, 1536ull * kB }, // TODO (likely can be reduced) + }; + return k_sizes; +} + +// amount of VRAM needed per batch size and context to hold temporary results +// the values for 3b and 65b are not derived from testing but instead chosen conservatively +static const std::map & VRAM_REQ_SCRATCH_PER_CONTEXT() +{ + static std::map k_sizes = { + { MODEL_3B, 128ull }, + { MODEL_7B, 128ull }, + { MODEL_13B, 160ull }, + { MODEL_30B, 208ull }, + { MODEL_65B, 416ull }, + { MODEL_70B, 416ull }, // TODO (likely can be reduced) + }; + return k_sizes; +} + +// default hparams (LLaMA 7B) +struct llama_hparams { + uint32_t n_vocab = 32000; + uint32_t n_ctx = 512; // this is provided as user input? + uint32_t n_embd = 4096; + uint32_t n_mult = 256; + uint32_t n_head = 32; + uint32_t n_head_kv = 32; + uint32_t n_layer = 32; + uint32_t n_rot = 64; + + // LLaMAv2 + // TODO: load from model data hparams + float f_ffn_mult = 1.0f; + float f_rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; + + float rope_freq_base = 10000.0f; + float rope_freq_scale = 1.0f; + + enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16; + + bool operator!=(const llama_hparams & other) const { + return static_cast(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT + } + + uint32_t n_gqa() const { + return n_head/n_head_kv; + } + + uint32_t n_embd_head() const { + return n_embd/n_head; + } + + uint32_t n_embd_gqa() const { + return n_embd/n_gqa(); + } + + size_t kv_size() const { + size_t result = 2ull; + result *= (size_t) n_embd_gqa(); + result *= (size_t) n_ctx; + result *= (size_t) n_layer; + result *= sizeof(ggml_fp16_t); + return result; + } +}; + +struct llama_layer { + // normalization + struct ggml_tensor * attention_norm; + + // attention + struct ggml_tensor * wq; + struct ggml_tensor * wk; + struct ggml_tensor * wv; + struct ggml_tensor * wo; + + // normalization + struct ggml_tensor * ffn_norm; + + // ff + struct ggml_tensor * w1; + struct ggml_tensor * w2; + struct ggml_tensor * w3; +}; + +struct llama_kv_cache { + struct ggml_tensor * k = NULL; + struct ggml_tensor * v = NULL; + + struct ggml_context * ctx = NULL; + + gguf_ctx_buffer buf; + + int n; // number of tokens currently in the cache + + ~llama_kv_cache() { + if (ctx) { + ggml_free(ctx); + } + +#ifdef GGML_USE_CUBLAS + ggml_cuda_free_data(k); + ggml_cuda_free_data(v); +#endif // GGML_USE_CUBLAS + } +}; + +struct llama_vocab { + using id = int32_t; + using token = std::string; + + struct token_score { + token tok; + float score; + }; + + std::unordered_map token_to_id; + std::vector id_to_token; +}; + +struct llama_model { + e_model type = MODEL_UNKNOWN; + + llama_hparams hparams; + + struct ggml_tensor * tok_embeddings; + + struct ggml_tensor * norm; + struct ggml_tensor * output; + + std::vector layers; + int n_gpu_layers; + + // context + struct ggml_context * ctx = NULL; + + // the model memory buffer + gguf_ctx_buffer buf; + + // model memory mapped file + std::unique_ptr mapping; + + // objects representing data potentially being locked in memory + gguf_mlock mlock_buf; + gguf_mlock mlock_mmap; + + // for quantize-stats only + std::vector> tensors_by_name; + + int64_t t_load_us = 0; + int64_t t_start_us = 0; + + llama_vocab vocab; + + ~llama_model() { + if (ctx) { + ggml_free(ctx); + } + +#ifdef GGML_USE_CUBLAS + for (size_t i = 0; i < tensors_by_name.size(); ++i) { + ggml_cuda_free_data(tensors_by_name[i].second); + } + ggml_cuda_free_scratch(); +#elif defined(GGML_USE_CLBLAST) + for (size_t i = 0; i < tensors_by_name.size(); ++i) { + ggml_cl_free_data(tensors_by_name[i].second); + } +#endif + } +}; + +struct llama_context { + llama_context(const llama_model & model) : model(model), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {} +#ifdef GGML_USE_METAL + ~llama_context() { + if (ctx_metal) { + ggml_metal_free(ctx_metal); + } + } +#endif + std::mt19937 rng; + + bool has_evaluated_once = false; + + int64_t t_sample_us = 0; + int64_t t_eval_us = 0; + int64_t t_p_eval_us = 0; + + int32_t n_sample = 0; // number of tokens sampled + int32_t n_eval = 0; // number of eval calls + int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1) + + const llama_model & model; + + bool model_owner = false; + + int64_t t_load_us; + int64_t t_start_us; + + // key + value cache for the self attention + struct llama_kv_cache kv_self; + + size_t mem_per_token = 0; + + // decode output (2-dimensional array: [n_tokens][n_vocab]) + std::vector logits; + bool logits_all = false; + + // input embedding (1-dimensional array: [n_embd]) + std::vector embedding; + + // reusable buffer for `struct ggml_graph_plan.work_data` + std::vector work_buffer; + + // memory buffers used to evaluate the model + // TODO: move in llama_state + gguf_ctx_buffer buf_compute; + gguf_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS]; + +#ifdef GGML_USE_METAL + ggml_metal_context * ctx_metal = NULL; +#endif + +#ifdef GGML_USE_MPI + ggml_mpi_context * ctx_mpi = NULL; +#endif + + int buf_last = 0; + size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 }; + + void use_buf(struct ggml_context * ctx, int i) { +#if defined(LLAMA_USE_SCRATCH) + size_t last_size = 0; + + if (i == -1) { + last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, }); + } else { + auto & buf = buf_scratch[i]; + last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.addr, }); + } + + if (buf_last >= 0) { + buf_max_size[buf_last] = std::max(buf_max_size[buf_last], last_size); + } + + buf_last = i; +#else + (void) i; + (void) ctx; +#endif + } + + size_t get_buf_max_mem(int i) const { +#if defined(LLAMA_USE_SCRATCH) + return buf_max_size[i]; +#else + (void) i; + return 0; +#endif + } +}; + +template +static T checked_mul(T a, T b) { + T ret = a * b; + if (a != 0 && ret / a != b) { + throw std::runtime_error(format("overflow multiplying %llu * %llu", + (unsigned long long) a, (unsigned long long) b)); + } + return ret; +} + +static size_t checked_div(size_t a, size_t b) { + if (b == 0 || a % b != 0) { + throw std::runtime_error(format("error dividing %zu / %zu", a, b)); + } + return a / b; +} + +static std::string llama_format_tensor_shape(const std::vector & ne) { + char buf[256]; + snprintf(buf, sizeof(buf), "%5u", ne.at(0)); + for (size_t i = 1; i < ne.size(); i++) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i)); + } + return buf; +} + +static size_t llama_calc_tensor_size(const std::vector & ne, enum ggml_type type) { + size_t size = ggml_type_size(type); + for (uint32_t dim : ne) { + size = checked_mul(size, dim); + } + return size / ggml_blck_size(type); +} + +struct llama_load_tensor { + std::string name; + enum ggml_type type = GGML_TYPE_F32; + std::vector ne; + size_t file_off; + size_t size; + struct ggml_tensor * ggml_tensor = NULL; + uint8_t * data; +}; + +struct llama_load_tensors_map { + // tensors is kept in a separate vector to preserve file order + std::vector tensors; + std::unordered_map name_to_idx; +}; + +enum gguf_file_version { + gguf_file_VERSION_GGML, + gguf_file_VERSION_GGMF_V1, // added version field and scores in vocab + gguf_file_VERSION_GGJT_V1, // added padding + gguf_file_VERSION_GGJT_V2, // changed quantization format + gguf_file_VERSION_GGJT_V3, // changed Q4 and Q8 quantization format +}; + +struct gguf_file_loader { + gguf_file file; + gguf_context * gguf_ctx; + gguf_file_version file_version; + llama_hparams hparams; + llama_vocab vocab; +struct ggml_context * ctx_data = NULL; + + gguf_file_loader(const char * fname, llama_load_tensors_map & tensors_map) + : file(fname, "rb") { + fprintf(stderr, "llama.cpp: loading model from %s\n", fname); + + struct gguf_init_params params = { + /*.no_alloc = */ true, + /*.ctx = */ &ctx_data, + }; + + gguf_ctx = gguf_init_from_file(fname, params); + + read_tensor_metadata(tensors_map); + } + + uint32_t read_u32(const char * key) { + int i = gguf_find_key(gguf_ctx, key); + if (i == -1) { + throw std::runtime_error(format("cannot find param with key %s\n", key)); + } + + return gguf_get_val_u32(gguf_ctx, i); + } + + int read_n_vocab() { + int i = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens"); + if (i == -1) { + throw std::runtime_error("cannot find token list in GGUF file\n"); + } + + return gguf_get_arr_n(gguf_ctx, i); + } + + void read_hparams() { + + // TODO make keysconstants in header + // TODO: read all hparams from file + hparams.n_vocab = read_n_vocab(); + hparams.n_embd = read_u32("llama.embedding_length"); + //hparams.n_mult = file.read_u32(); + hparams.n_head = read_u32("llama.attention.head_count"); + hparams.n_layer = read_u32("llama.layer_count"); + //hparams.n_rot = file.read_u32(); + //hparams.ftype = (enum llama_ftype) file.read_u32(); + + // LLaMAv2 + hparams.n_head_kv = read_u32("llama.attention.head_count_kv"); + } + + void read_vocab() { + vocab.id_to_token.resize(hparams.n_vocab); + int token_idx = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens"); + if (token_idx == -1) { + throw std::runtime_error("cannot find token list in GGUF file\n"); + } + + int score_idx = gguf_find_key(gguf_ctx, "tokenizer.ggml.scores"); + if (score_idx == -1) { + throw std::runtime_error("cannot find token scores list in GGUF file\n"); + } + + for (uint32_t i = 0; i < hparams.n_vocab; i++) { + + std::string word = gguf_get_arr_str(gguf_ctx, token_idx, i); + + vocab.token_to_id[word] = i; + + auto & tok_score = vocab.id_to_token[i]; + tok_score.tok = std::move(word); + tok_score.score = gguf_get_arr_f32(gguf_ctx, score_idx, i); + } + } + + void read_tensor_metadata(llama_load_tensors_map & tensors_map) { + const int n_tensors = gguf_get_n_tensors(gguf_ctx); + + for (int i = 0; i < n_tensors; ++i) { + llama_load_tensor tensor; + const char * name = gguf_get_tensor_name(gguf_ctx, i); + + struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); + uint32_t n_dims = cur->n_dims; + tensor.type = cur->type; + tensor.ne.resize(n_dims); + memcpy(tensor.ne.data(), &cur->ne[0], sizeof(tensor.ne[0]) * n_dims); + if (n_dims < 1 || n_dims > 2) { + throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name, n_dims)); + } + switch (tensor.type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + break; + default: { + throw std::runtime_error(format("unrecognized tensor type %u\n", tensor.type)); + } + } + + + tensor.file_off = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, i); + + tensor.name = name; + tensor.size = llama_calc_tensor_size(tensor.ne, tensor.type); + + tensors_map.tensors.push_back(tensor); + tensors_map.name_to_idx[name] = tensors_map.tensors.size() - 1; + } + } +}; + +struct gguf_file_saver { + gguf_file file; + gguf_file_loader * any_file_loader; + gguf_file_saver(const char * fname, gguf_file_loader * any_file_loader, enum llama_ftype new_ftype) + : file(fname, "wb"), any_file_loader(any_file_loader) { + fprintf(stderr, "llama.cpp: saving model to %s\n", fname); + write_magic(); + write_hparams(new_ftype); + write_vocab(); + } + void write_magic() { + } + void write_hparams(enum llama_ftype new_ftype) { + const llama_hparams & hparams = any_file_loader->hparams; + GGML_UNUSED(hparams); + GGML_UNUSED(new_ftype); + } + void write_vocab() { + uint32_t n_vocab = any_file_loader->hparams.n_vocab; + GGML_UNUSED(n_vocab); + } + void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) { + switch (new_type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + break; + default: GGML_ASSERT(false); + } + + } +}; + +struct llama_model_loader { + std::unique_ptr file_loader; + llama_load_tensors_map tensors_map; + bool use_mmap; + size_t num_ggml_tensors_created = 0; + struct ggml_context * ggml_ctx = NULL; + std::unique_ptr mapping; + + llama_model_loader(const std::string & fname_base, bool use_mmap) { + file_loader = std::unique_ptr(new gguf_file_loader(fname_base.c_str(), tensors_map)); + if (!gguf_mmap::SUPPORTED) { + use_mmap = false; + } + this->use_mmap = use_mmap; + } + + void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const { + *ctx_size_p = *mmapped_size_p = 0; + for (const llama_load_tensor & lt : tensors_map.tensors) { + *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; + *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size + 16; + } + } + + struct ggml_tensor * get_tensor(const std::string & name, const std::vector & ne, ggml_backend backend) { + auto it = tensors_map.name_to_idx.find(name); + if (it == tensors_map.name_to_idx.end()) { + throw std::runtime_error(std::runtime_error(format("llama.cpp: tensor '%s' is missing from model", name.c_str()))); + } + llama_load_tensor & lt = tensors_map.tensors.at(it->second); + if (lt.ne != ne) { + throw std::runtime_error(format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s", + name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str())); + } + + return get_tensor_for(lt, backend); + } + + struct ggml_tensor * get_tensor_for(llama_load_tensor & lt, ggml_backend backend) { + struct ggml_tensor * tensor; + if (backend != GGML_BACKEND_CPU) { + ggml_set_no_alloc(ggml_ctx, true); + } + if (lt.ne.size() == 2) { + tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1)); + } else { + GGML_ASSERT(lt.ne.size() == 1); + tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0)); + } + ggml_set_name(tensor, lt.name.c_str()); + GGML_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor + + if (backend != GGML_BACKEND_CPU) { + ggml_set_no_alloc(ggml_ctx, use_mmap); + } + tensor->backend = backend; + lt.ggml_tensor = tensor; + num_ggml_tensors_created++; + return tensor; + } + + void done_getting_tensors() const { + if (num_ggml_tensors_created != tensors_map.tensors.size()) { + throw std::runtime_error(std::string("llama.cpp: file contained more tensors than expected")); + } + } + + void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, gguf_mlock * lmlock) { + size_t data_size = 0; + size_t prefetch_size = 0; + size_t lock_size = 0; + for (const llama_load_tensor & lt : tensors_map.tensors) { + data_size += lt.size; + if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) { + prefetch_size += lt.size; + } + } + + if (use_mmap) { + mapping.reset(new gguf_mmap(&file_loader->file, prefetch_size, ggml_is_numa())); + if (lmlock) { + lmlock->init(mapping->addr); + } + } + + size_t done_size = 0; + for (llama_load_tensor & lt : tensors_map.tensors) { + if (progress_callback) { + progress_callback((float) done_size / data_size, progress_callback_user_data); + } + GGML_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already + lt.data = (uint8_t *) lt.ggml_tensor->data; + + // allocate temp buffer if not using mmap + if (!use_mmap && lt.data == NULL) { + GGML_ASSERT(lt.ggml_tensor->backend != GGML_BACKEND_CPU); + lt.data = (uint8_t*)malloc(ggml_nbytes(lt.ggml_tensor)); + } + + load_data_for(lt); + + switch(lt.ggml_tensor->backend) { + case GGML_BACKEND_CPU: + lt.ggml_tensor->data = lt.data; + if (use_mmap && lmlock) { + lock_size += lt.size; + lmlock->grow_to(lock_size); + } + break; +#if defined(GGML_USE_CUBLAS) + case GGML_BACKEND_GPU: + case GGML_BACKEND_GPU_SPLIT: + ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor); + if (!use_mmap) { + free(lt.data); + } + break; +#elif defined(GGML_USE_CLBLAST) + case GGML_BACKEND_GPU: + ggml_cl_transform_tensor(lt.data, lt.ggml_tensor); + if (!use_mmap) { + free(lt.data); + } + break; +#endif + default: + continue; + } + + done_size += lt.size; + } + } + + void load_data_for(llama_load_tensor & lt) { + if (use_mmap) { + lt.data = (uint8_t *) mapping->addr + lt.file_off; + } else { + gguf_file & file = file_loader->file; + file.seek(lt.file_off, SEEK_SET); + // TODO + //file.read_raw(lt.data, lt.size); + } + + if (0) { + print_checksum(lt); + } + } + + static void print_checksum(llama_load_tensor & lt) { + uint32_t sum = 0; + for (size_t i = 0; i < lt.size; i++) { + uint8_t byte = lt.data[i]; + sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash + } + fprintf(stderr, "%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum, + llama_format_tensor_shape(lt.ne).c_str(), lt.size); + } + +}; + +// +// kv cache +// + +static bool kv_cache_init( + const struct llama_hparams & hparams, + struct llama_kv_cache & cache, + ggml_type wtype, + int n_ctx, + int n_gpu_layers) { + const int n_embd = hparams.n_embd_gqa(); + const int n_layer = hparams.n_layer; + + const int64_t n_mem = n_layer*n_ctx; + const int64_t n_elements = n_embd*n_mem; + + cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB); + cache.n = 0; + + struct ggml_init_params params; + params.mem_size = cache.buf.size; + params.mem_buffer = cache.buf.addr; + params.no_alloc = false; + + cache.ctx = ggml_init(params); + + if (!cache.ctx) { + fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__); + return false; + } + + cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements); + cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements); + ggml_set_name(cache.k, "cache_k"); + ggml_set_name(cache.v, "cache_v"); + + (void) n_gpu_layers; +#ifdef GGML_USE_CUBLAS + if (n_gpu_layers > n_layer + 1) { + ggml_cuda_assign_buffers_no_scratch(cache.v); + } + if (n_gpu_layers > n_layer + 2) { + ggml_cuda_assign_buffers_no_scratch(cache.k); + } +#endif // GGML_USE_CUBLAS + + return true; +} + +struct llama_context_params llama_context_default_params() { + struct llama_context_params result = { + /*.seed =*/ LLAMA_DEFAULT_SEED, + /*.n_ctx =*/ 512, + /*.n_batch =*/ 512, + /*.n_gqa =*/ 1, + /*.rms_norm_eps =*/ LLAMA_DEFAULT_RMS_EPS, + /*.gpu_layers =*/ 0, + /*.main_gpu =*/ 0, + /*.tensor_split =*/ nullptr, + /*.rope_freq_base =*/ 10000.0f, + /*.rope_freq_scale =*/ 1.0f, + /*.progress_callback =*/ nullptr, + /*.progress_callback_user_data =*/ nullptr, + /*.low_vram =*/ false, + /*.f16_kv =*/ true, + /*.logits_all =*/ false, + /*.vocab_only =*/ false, + /*.use_mmap =*/ true, + /*.use_mlock =*/ false, + /*.embedding =*/ false, + }; + + return result; +} + +struct llama_model_quantize_params llama_model_quantize_default_params() { + struct llama_model_quantize_params result = { + /*.nthread =*/ 0, + /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1, + /*.allow_requantize =*/ false, + /*.quantize_output_tensor =*/ true, + }; + + return result; +} + +int llama_max_devices() { + return LLAMA_MAX_DEVICES; +} + +bool llama_mmap_supported() { + return gguf_mmap::SUPPORTED; +} + +bool llama_mlock_supported() { + return gguf_mlock::SUPPORTED; +} + +void llama_backend_init(bool numa) { + ggml_time_init(); + + // needed to initialize f16 tables + { + struct ggml_init_params params = { 0, NULL, false }; + struct ggml_context * ctx = ggml_init(params); + ggml_free(ctx); + } + + if (numa) { + ggml_numa_init(); + } + +#ifdef GGML_USE_MPI + ggml_mpi_backend_init(); +#endif +} + +void llama_backend_free() { +#ifdef GGML_USE_MPI + ggml_mpi_backend_free(); +#endif +} + +int64_t llama_time_us() { + return ggml_time_us(); +} + +// +// model loading +// + +static const char *gguf_file_version_name(gguf_file_version version) { + switch (version) { + case gguf_file_VERSION_GGML: return "'ggml' (old version with low tokenizer quality and no mmap support)"; + case gguf_file_VERSION_GGMF_V1: return "ggmf v1 (old version with no mmap support)"; + case gguf_file_VERSION_GGJT_V1: return "ggjt v1 (pre #1405)"; + case gguf_file_VERSION_GGJT_V2: return "ggjt v2 (pre #1508)"; + case gguf_file_VERSION_GGJT_V3: return "ggjt v3 (latest)"; + } + + return "unknown"; +} + +static const char *llama_ftype_name(enum llama_ftype ftype) { + switch (ftype) { + case LLAMA_FTYPE_ALL_F32: return "all F32"; + case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16"; + case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0"; + case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1"; + case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16: + return "mostly Q4_1, some F16"; + case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0"; + case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1"; + case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0"; + // K-quants + case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K"; + case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large"; + case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "mostly Q4_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K"; + default: return "unknown, may not work"; + } +} + +static const char *llama_model_type_name(e_model type) { + switch (type) { + case MODEL_3B: return "3B"; + case MODEL_7B: return "7B"; + case MODEL_13B: return "13B"; + case MODEL_30B: return "30B"; + case MODEL_65B: return "65B"; + case MODEL_70B: return "70B"; + default: GGML_ASSERT(false); + } +} + +static void llama_model_load_internal( + const std::string & fname, + llama_model & model, + llama_vocab & vocab, + int n_ctx, + int n_batch, + int n_gqa, + float rms_norm_eps, + int n_gpu_layers, + int main_gpu, + const float * tensor_split, + float rope_freq_base, + float rope_freq_scale, + bool low_vram, + ggml_type memory_type, + bool use_mmap, + bool use_mlock, + bool vocab_only, + llama_progress_callback progress_callback, + void * progress_callback_user_data) { + + model.t_start_us = ggml_time_us(); + + std::unique_ptr ml(new llama_model_loader(fname, use_mmap)); + + vocab = std::move(ml->file_loader->vocab); + model.hparams = ml->file_loader->hparams; + model.n_gpu_layers = n_gpu_layers; + gguf_file_version file_version = ml->file_loader->file_version; + + auto & hparams = model.hparams; + + // TODO: read from file + hparams.f_rms_norm_eps = rms_norm_eps; + + { + switch (hparams.n_layer) { + case 26: model.type = e_model::MODEL_3B; break; + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_13B; break; + case 60: model.type = e_model::MODEL_30B; break; + case 80: model.type = e_model::MODEL_65B; break; + default: + { + if (hparams.n_layer < 32) { + model.type = e_model::MODEL_7B; + } + } break; + } + + hparams.n_ctx = n_ctx; + + // LLaMAv2 + hparams.n_head_kv = hparams.n_head / n_gqa; + if (model.type == e_model::MODEL_65B && n_gqa == 8) { + fprintf(stderr, "%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa); + model.type = e_model::MODEL_70B; + hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model + } + + hparams.rope_freq_base = rope_freq_base; + hparams.rope_freq_scale = rope_freq_scale; + } + + // ref: https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/model.py#L194-L199 + const uint32_t n_ff_raw = 2*(4*hparams.n_embd)/3; + const uint32_t n_ff_mult = hparams.f_ffn_mult*n_ff_raw; + const uint32_t n_ff = ((n_ff_mult + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult; + //const uint32_t n_ff = 28672; + + { + fprintf(stderr, "%s: format = %s\n", __func__, gguf_file_version_name(file_version)); + fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab); + fprintf(stderr, "%s: n_ctx = %u\n", __func__, hparams.n_ctx); + fprintf(stderr, "%s: n_embd = %u\n", __func__, hparams.n_embd); + fprintf(stderr, "%s: n_mult = %u\n", __func__, hparams.n_mult); + fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head); + fprintf(stderr, "%s: n_head_kv = %u\n", __func__, hparams.n_head_kv); + fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer); + fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim + fprintf(stderr, "%s: n_gqa = %u\n", __func__, hparams.n_gqa()); + fprintf(stderr, "%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps); + fprintf(stderr, "%s: n_ff = %u\n", __func__, n_ff); + fprintf(stderr, "%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base); + fprintf(stderr, "%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale); + fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype)); + fprintf(stderr, "%s: model size = %s\n", __func__, llama_model_type_name(model.type)); + } + + if (file_version < gguf_file_VERSION_GGJT_V2) { + if (hparams.ftype != LLAMA_FTYPE_ALL_F32 && + hparams.ftype != LLAMA_FTYPE_MOSTLY_F16 && + hparams.ftype != LLAMA_FTYPE_MOSTLY_Q8_0) { + throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1405)")); + } + } + + if (file_version < gguf_file_VERSION_GGJT_V3) { + if (hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || + hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_1 || + hparams.ftype == LLAMA_FTYPE_MOSTLY_Q8_0) { + throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1508)")); + } + } + + if (vocab_only) { + return; + } + + auto & ctx = model.ctx; + + size_t ctx_size; + size_t mmapped_size; + ml->calc_sizes(&ctx_size, &mmapped_size); + fprintf(stderr, "%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0); + + // create the ggml context + { + model.buf.resize(ctx_size); + if (use_mlock) { + model.mlock_buf.init (model.buf.addr); + model.mlock_buf.grow_to(model.buf.size); + } + + struct ggml_init_params params = { + /*.mem_size =*/ model.buf.size, + /*.mem_buffer =*/ model.buf.addr, + /*.no_alloc =*/ ml->use_mmap, + }; + + model.ctx = ggml_init(params); + if (!model.ctx) { + throw std::runtime_error(format("ggml_init() failed")); + } + } + + (void) main_gpu; +#if defined(GGML_USE_CUBLAS) + fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__); + ggml_cuda_set_main_device(main_gpu); +#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU +#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT +#elif defined(GGML_USE_CLBLAST) + fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__); +#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU +#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU +#else +#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU +#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_CPU +#endif + + // prepare memory for the weights + size_t vram_weights = 0; + size_t vram_scratch = 0; + { + const uint32_t n_embd = hparams.n_embd; + const uint32_t n_embd_gqa = hparams.n_embd_gqa(); + const uint32_t n_layer = hparams.n_layer; + const uint32_t n_vocab = hparams.n_vocab; + + ml->ggml_ctx = ctx; + + model.tok_embeddings = ml->get_tensor("tok_embeddings.weight", {n_embd, n_vocab}, GGML_BACKEND_CPU); + + // "output" tensor + { + ggml_backend backend_norm; + ggml_backend backend_output; + if (n_gpu_layers > int(n_layer)) { // NOLINT + // norm is not performance relevant on its own but keeping it in VRAM reduces data copying + // on Windows however this is detrimental unless everything is on the GPU +#ifndef _WIN32 + backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; +#else + backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; +#endif // _WIN32 + + backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT; + } else { + backend_norm = GGML_BACKEND_CPU; + backend_output = GGML_BACKEND_CPU; + } + + model.norm = ml->get_tensor("norm.weight", {n_embd}, backend_norm); + model.output = ml->get_tensor("output.weight", {n_embd, n_vocab}, backend_output); + if (backend_norm == GGML_BACKEND_GPU) { + vram_weights += ggml_nbytes(model.norm); + } + if (backend_output == GGML_BACKEND_GPU_SPLIT) { + vram_weights += ggml_nbytes(model.output); + } + } + + const int i_gpu_start = n_layer - n_gpu_layers; + + model.layers.resize(n_layer); + for (uint32_t i = 0; i < n_layer; ++i) { + const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT + const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT + + auto & layer = model.layers[i]; + + std::string layers_i = "layers." + std::to_string(i); + + layer.attention_norm = ml->get_tensor(layers_i + ".attention_norm.weight", {n_embd}, backend); + + layer.wq = ml->get_tensor(layers_i + ".attention.wq.weight", {n_embd, n_embd}, backend_split); + layer.wk = ml->get_tensor(layers_i + ".attention.wk.weight", {n_embd, n_embd_gqa}, backend_split); + layer.wv = ml->get_tensor(layers_i + ".attention.wv.weight", {n_embd, n_embd_gqa}, backend_split); + layer.wo = ml->get_tensor(layers_i + ".attention.wo.weight", {n_embd, n_embd}, backend_split); + + layer.ffn_norm = ml->get_tensor(layers_i + ".ffn_norm.weight", {n_embd}, backend); + + layer.w1 = ml->get_tensor(layers_i + ".feed_forward.w1.weight", {n_embd, n_ff}, backend_split); + layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend_split); + layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend_split); + + if (backend == GGML_BACKEND_GPU) { + vram_weights += + ggml_nbytes(layer.attention_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + + ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + + ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3); + } + } + } + + ml->done_getting_tensors(); + + // print memory requirements + { + const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1; + + // this is the total memory required to run the inference + const size_t mem_required = + ctx_size + + mmapped_size - vram_weights + // weights in VRAM not in memory + MEM_REQ_SCRATCH0(hparams.n_ctx).at(model.type) + + MEM_REQ_SCRATCH1().at(model.type) + + MEM_REQ_EVAL().at(model.type); + + // this is the memory required by one llama_state + const size_t mem_required_state = + scale*hparams.kv_size(); + + fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__, + mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); + + (void) vram_scratch; + (void) n_batch; +#ifdef GGML_USE_CUBLAS + if (low_vram) { + fprintf(stderr, "%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__); + ggml_cuda_set_scratch_size(0); // disable scratch + } else { + const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type); + const size_t vram_scratch_per_context = VRAM_REQ_SCRATCH_PER_CONTEXT().at(model.type); + vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context); + ggml_cuda_set_scratch_size(vram_scratch); + if (n_gpu_layers > 0) { + fprintf(stderr, "%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n", + __func__, vram_scratch_base / kB, vram_scratch_per_context, + (vram_scratch + MB - 1) / MB); // round up + } + } +#endif // GGML_USE_CUBLAS + +#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) + const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); + + fprintf(stderr, "%s: offloading %d repeating layers to GPU\n", __func__, n_gpu); + if (n_gpu_layers > (int) hparams.n_layer) { + fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__); + } + size_t vram_kv_cache = 0; + +#ifdef GGML_USE_CUBLAS + const int max_backend_supported_layers = hparams.n_layer + 3; + const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3; + if (n_gpu_layers > (int) hparams.n_layer + 1) { + if (low_vram) { + fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__); + } else { + fprintf(stderr, "%s: offloading v cache to GPU\n", __func__); + vram_kv_cache += hparams.kv_size() / 2; + } + } + if (n_gpu_layers > (int) hparams.n_layer + 2) { + if (low_vram) { + fprintf(stderr, "%s: cannot offload k cache to GPU due to low VRAM option\n", __func__); + } else { + fprintf(stderr, "%s: offloading k cache to GPU\n", __func__); + vram_kv_cache += hparams.kv_size() / 2; + } + } +#elif defined(GGML_USE_CLBLAST) + const int max_backend_supported_layers = hparams.n_layer + 1; + const int max_offloadable_layers = hparams.n_layer + 1; +#endif // GGML_USE_CUBLAS + + fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n", + __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers); + fprintf(stderr, "%s: total VRAM used: %zu MB\n", + __func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up +#else + (void) n_gpu_layers; +#endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) + } + + // populate `tensors_by_name` + for (llama_load_tensor & lt : ml->tensors_map.tensors) { + model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor); + } + + (void) tensor_split; +#if defined(GGML_USE_CUBLAS) + { + ggml_cuda_set_tensor_split(tensor_split); + } +#endif + + ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL); + + if (progress_callback) { + progress_callback(1.0f, progress_callback_user_data); + } + + model.mapping = std::move(ml->mapping); + + // loading time will be recalculate after the first eval, so + // we take page faults deferred by mmap() into consideration + model.t_load_us = ggml_time_us() - model.t_start_us; +} + +static bool llama_model_load( + const std::string & fname, + llama_model & model, + llama_vocab & vocab, + int n_ctx, + int n_batch, + int n_gqa, + float rms_norm_eps, + int n_gpu_layers, + int main_gpu, + const float * tensor_split, + float rope_freq_base, + float rope_freq_scale, + bool low_vram, + ggml_type memory_type, + bool use_mmap, + bool use_mlock, + bool vocab_only, + llama_progress_callback progress_callback, + void *progress_callback_user_data) { + try { + llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, rms_norm_eps, n_gpu_layers, main_gpu, tensor_split, rope_freq_base, rope_freq_scale, low_vram, memory_type, + use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data); + return true; + } catch (const std::exception & err) { + fprintf(stderr, "error loading model: %s\n", err.what()); + return false; + } +} + +// evaluate the transformer +// +// - lctx: llama context +// - tokens: new batch of tokens to process +// - embd embeddings input +// - n_tokens number of tokens +// - n_past: the context size so far +// - n_threads: number of threads to use +// +static bool llama_eval_internal( + llama_context & lctx, + const llama_token * tokens, + const float * embd, + int n_tokens, + int n_past, + int n_threads, + const char * cgraph_fname) { + + GGML_ASSERT((!tokens && embd) || (tokens && !embd)); + +#ifdef GGML_USE_MPI + ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads); +#endif + + const int64_t t_start_us = ggml_time_us(); + + const int N = n_tokens; + + const auto & model = lctx.model; + const auto & hparams = model.hparams; + + const auto & kv_self = lctx.kv_self; + + GGML_ASSERT(!!kv_self.ctx); + + const int64_t n_embd = hparams.n_embd; + const int64_t n_layer = hparams.n_layer; + const int64_t n_ctx = hparams.n_ctx; + const int64_t n_head = hparams.n_head; + const int64_t n_head_kv = hparams.n_head_kv; + const int64_t n_embd_head = hparams.n_embd_head(); + const int64_t n_vocab = hparams.n_vocab; + const int64_t n_embd_gqa = hparams.n_embd_gqa(); + + + GGML_ASSERT(n_embd_head == hparams.n_rot); + + const float freq_base = hparams.rope_freq_base; + const float freq_scale = hparams.rope_freq_scale; + const float rms_norm_eps = hparams.f_rms_norm_eps; + + const int n_gpu_layers = model.n_gpu_layers; + + auto & mem_per_token = lctx.mem_per_token; + auto & buf_compute = lctx.buf_compute; + + struct ggml_init_params params = { + /*.mem_size =*/ buf_compute.size, + /*.mem_buffer =*/ buf_compute.addr, + /*.no_alloc =*/ false, + }; + + struct ggml_context * ctx0 = ggml_init(params); + + ggml_cgraph * gf = ggml_new_graph(ctx0); + + // for big prompts, if BLAS is enabled, it is better to use only one thread + // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance + n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads; + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + if (tokens) { + struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens)); + ggml_set_name(inp_tokens, "inp_tokens"); + + inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens); + } else { +#ifdef GGML_USE_MPI + GGML_ASSERT(false && "not implemented"); +#endif + + inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N); + memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL)); + } + + const int i_gpu_start = n_layer - n_gpu_layers; + (void) i_gpu_start; + + // offload functions set the tensor output backend to GPU + // tensors are GPU-accelerated if any input or the output has been offloaded + // + // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal + // in that case ggml_cuda_assign_buffers has no effect + offload_func_t offload_func_nr = llama_nop; // nr = non-repeating + offload_func_t offload_func_kq = llama_nop; + offload_func_t offload_func_v = llama_nop; + +#ifdef GGML_USE_CUBLAS + if (n_gpu_layers > n_layer) { + offload_func_nr = ggml_cuda_assign_buffers; + } + if (n_gpu_layers > n_layer + 1) { + offload_func_v = ggml_cuda_assign_buffers; + } + if (n_gpu_layers > n_layer + 2) { + offload_func_kq = ggml_cuda_assign_buffers; + } +#endif // GGML_USE_CUBLAS + + for (int il = 0; il < n_layer; ++il) { + ggml_format_name(inpL, "layer_inp_%d", il); + + offload_func_t offload_func = llama_nop; + +#ifdef GGML_USE_CUBLAS + if (il >= i_gpu_start) { + offload_func = ggml_cuda_assign_buffers; + } +#endif // GGML_USE_CUBLAS + + struct ggml_tensor * inpSA = inpL; + + lctx.use_buf(ctx0, 0); + + // norm + { + cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps); + offload_func(cur); + ggml_set_name(cur, "rms_norm_0"); + + // cur = cur*attention_norm(broadcasted) + cur = ggml_mul(ctx0, cur, model.layers[il].attention_norm); + offload_func(cur); + ggml_set_name(cur, "attention_norm_0"); + } + + // self-attention + { + // compute Q and K and RoPE them + struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur); + offload_func_kq(tmpk); + ggml_set_name(tmpk, "tmpk"); + + struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur); + offload_func_kq(tmpq); + ggml_set_name(tmpq, "tmpq"); + + struct ggml_tensor * Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale); + offload_func_kq(Kcur); + ggml_set_name(Kcur, "Kcur"); + + struct ggml_tensor * Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale); + offload_func_kq(Qcur); + ggml_set_name(Qcur, "Qcur"); + + // store key and value to memory + { + // compute the transposed [N, n_embd] V matrix + + struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur); + offload_func_v(tmpv); + ggml_set_name(tmpv, "tmpv"); + + struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, N)); + offload_func_v(Vcur); + ggml_set_name(Vcur, "Vcur"); + + struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past)); + offload_func_kq(k); + ggml_set_name(k, "k"); + + struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa, + ( n_ctx)*ggml_element_size(kv_self.v), + (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v)); + offload_func_v(v); + ggml_set_name(v, "v"); + + // important: storing RoPE-ed version of K in the KV cache! + ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); + ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); + } + + struct ggml_tensor * Q = + ggml_permute(ctx0, + Qcur, + 0, 2, 1, 3); + offload_func_kq(Q); + ggml_set_name(Q, "Q"); + + struct ggml_tensor * K = + ggml_permute(ctx0, + ggml_reshape_3d(ctx0, + ggml_view_1d(ctx0, kv_self.k, (n_past + N)*n_embd_gqa, il*n_ctx*ggml_element_size(kv_self.k)*n_embd_gqa), + n_embd_head, n_head_kv, n_past + N), + 0, 2, 1, 3); + offload_func_kq(K); + ggml_set_name(K, "K"); + + // K * Q + struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); + offload_func_kq(KQ); + ggml_set_name(KQ, "KQ"); + + // KQ_scaled = KQ / sqrt(n_embd_head) + struct ggml_tensor * KQ_scale = ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)); + ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); + + // KQ_scaled shape [n_past + N, N, n_head, 1] + struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale); + offload_func_kq(KQ_scaled); + ggml_set_name(KQ_scaled, "KQ_scaled"); + + // KQ_masked = mask_past(KQ_scaled) + struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past); + offload_func_kq(KQ_masked); + ggml_set_name(KQ_masked, "KQ_masked"); + + // KQ = soft_max(KQ_masked) + struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); + offload_func_v(KQ_soft_max); + ggml_set_name(KQ_soft_max, "KQ_soft_max"); + + // split cached V into n_head heads + struct ggml_tensor * V = + ggml_view_3d(ctx0, kv_self.v, + n_past + N, n_embd_head, n_head_kv, + n_ctx*ggml_element_size(kv_self.v), + n_ctx*ggml_element_size(kv_self.v)*n_embd_head, + n_ctx*ggml_element_size(kv_self.v)*n_embd_gqa*il); + offload_func_v(V); + ggml_set_name(V, "V"); + +#if 1 + struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); + offload_func_v(KQV); + ggml_set_name(KQV, "KQV"); +#else + // make V contiguous in memory to speed up the matmul, however we waste time on the copy + // on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation + // is there a better way? + struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd_head, n_head)); + struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max); +#endif + + // KQV_merged = KQV.permute(0, 2, 1, 3) + struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + offload_func_v(KQV_merged); + ggml_set_name(KQV_merged, "KQV_merged"); + + // cur = KQV_merged.contiguous().view(n_embd, N) + cur = ggml_cpy(ctx0, + KQV_merged, + ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); + offload_func_v(cur); + ggml_set_name(cur, "KQV_merged_contiguous"); + + // projection (no bias) + cur = ggml_mul_mat(ctx0, + model.layers[il].wo, + cur); + offload_func(cur); + ggml_set_name(cur, "result_wo"); + } + + lctx.use_buf(ctx0, 1); + + struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA); + offload_func(inpFF); + ggml_set_name(inpFF, "inpFF"); + + // feed-forward network + { + // norm + { + cur = ggml_rms_norm(ctx0, inpFF, rms_norm_eps); + offload_func(cur); + ggml_set_name(cur, "rms_norm_1"); + + // cur = cur*ffn_norm(broadcasted) + cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm); + offload_func(cur); + ggml_set_name(cur, "ffn_norm"); + } + + struct ggml_tensor * tmp = ggml_mul_mat(ctx0, + model.layers[il].w3, + cur); + offload_func(tmp); + ggml_set_name(tmp, "result_w3"); + + cur = ggml_mul_mat(ctx0, + model.layers[il].w1, + cur); + offload_func(cur); + ggml_set_name(cur, "result_w1"); + + // SILU activation + cur = ggml_silu(ctx0, cur); + offload_func(cur); + ggml_set_name(cur, "silu"); + + cur = ggml_mul(ctx0, cur, tmp); + offload_func(cur); + ggml_set_name(cur, "silu_x_result_w3"); + + cur = ggml_mul_mat(ctx0, + model.layers[il].w2, + cur); + offload_func(cur); + ggml_set_name(cur, "result_w2"); + } + + cur = ggml_add(ctx0, cur, inpFF); + offload_func(cur); + ggml_set_name(cur, "inpFF_+_result_w2"); + + // input for next layer + inpL = cur; + } + + lctx.use_buf(ctx0, 0); + + // used at the end to optionally extract the embeddings + struct ggml_tensor * embeddings = NULL; + + // norm + { + cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps); + offload_func_nr(cur); + ggml_set_name(cur, "rms_norm_2"); + + // cur = cur*norm(broadcasted) + cur = ggml_mul(ctx0, cur, model.norm); + // offload_func_nr(cur); // TODO CPU + GPU mirrored backend + ggml_set_name(cur, "result_norm"); + + embeddings = cur; + } + + // lm_head + cur = ggml_mul_mat(ctx0, model.output, cur); + ggml_set_name(cur, "result_output"); + + lctx.use_buf(ctx0, -1); + + // logits -> probs + //cur = ggml_soft_max_inplace(ctx0, cur); + + // run the computation + ggml_build_forward_expand(gf, cur); + + // fprintf(stderr, "graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf.n_nodes, gf.n_leafs); + +#if GGML_USE_MPI + ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer); +#endif + +#ifdef GGML_USE_METAL + if (lctx.ctx_metal && N == 1) { + if (!ggml_metal_if_optimized(lctx.ctx_metal)) { + ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf); + } + ggml_metal_set_n_cb (lctx.ctx_metal, n_threads); + ggml_metal_graph_compute(lctx.ctx_metal, gf); + ggml_metal_get_tensor (lctx.ctx_metal, cur); + } else { + // IMPORTANT: + // Since we don't have efficient Matrix x Matrix Metal multiplication yet, we fallback to vanilla + // ggml_graph_compute(). It uses Apple's Accelerate CBLAS API which takes advantage of the ANE or the AMX + // coprocessor. + // + // When we implement Matrix x Matrix Metal multiplication, we can avoid this branch. + // But for now, we have focused only on Matrix x Vector Metal multiplication. + // + // TODO: avoid these syncs via shared memory (ref #1696) + // + if (lctx.ctx_metal) { + // We need to sync the GPU KV cache with the CPU KV cache + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k); + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v); + } + + ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads); + } +#else + ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads); +#endif + +#if GGML_USE_MPI + ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer); +#endif + + // update kv token count + lctx.kv_self.n = n_past + N; + + struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; + + if (cgraph_fname) { + ggml_graph_export(gf, cgraph_fname); + } + +#ifdef GGML_PERF + // print timing information per ggml operation (for debugging purposes) + // requires GGML_PERF to be defined + ggml_graph_print(gf); +#endif + + // plot the computation graph in dot format (for debugging purposes) + //if (n_past%100 == 0) { + // ggml_graph_dump_dot(gf, NULL, "llama.dot"); + //} + + // extract logits + { + auto & logits_out = lctx.logits; + + if (lctx.logits_all) { + logits_out.resize(n_vocab * N); + memcpy(logits_out.data(), (float *) ggml_get_data(res), sizeof(float)*n_vocab*N); + } else { + // return result for just the last token + logits_out.resize(n_vocab); + memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(N-1)), sizeof(float)*n_vocab); + } + } + + // extract embeddings + if (!lctx.embedding.empty()) { + auto & embedding_out = lctx.embedding; + + embedding_out.resize(n_embd); + memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd); + } + + if (mem_per_token == 0) { + mem_per_token = ggml_used_mem(ctx0)/N; + } + +#if 0 + printf("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__, + ggml_used_mem(ctx0)/1024.0/1024.0, + lctx.get_buf_max_mem(0)/1024.0/1024.0, + lctx.get_buf_max_mem(1)/1024.0/1024.0, + lctx.work_buffer.size()/1024.0/1024.0, + n_past, N); +#endif + + ggml_free(ctx0); + + // measure the performance only for the single-token evals + if (N == 1) { + lctx.t_eval_us += ggml_time_us() - t_start_us; + lctx.n_eval++; + } + else if (N > 1) { + lctx.t_p_eval_us += ggml_time_us() - t_start_us; + lctx.n_p_eval += N; + } + + return true; +} + +// +// tokenizer +// + +static size_t utf8_len(char src) { + const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 }; + uint8_t highbits = static_cast(src) >> 4; + return lookup[highbits]; +} + +struct llama_sp_symbol { + using index = int; + index prev; + index next; + const char * text; + size_t n; +}; + +static_assert(std::is_trivially_copyable::value, "llama_sp_symbol is not trivially copyable"); + +struct llama_sp_bigram { + struct comparator { + bool operator()(llama_sp_bigram & l, llama_sp_bigram & r) { + return (l.score < r.score) || (l.score == r.score && l.left > r.left); + } + }; + using queue_storage = std::vector; + using queue = std::priority_queue; + llama_sp_symbol::index left; + llama_sp_symbol::index right; + float score; + size_t size; +}; + +// original implementation: +// https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4 +struct llama_tokenizer { + llama_tokenizer(const llama_vocab & vocab): vocab_(vocab) {} + + void tokenize(const std::string & text, std::vector & output) { + // split string into utf8 chars + int index = 0; + size_t offs = 0; + while (offs < text.size()) { + llama_sp_symbol sym; + size_t char_len = std::min(text.size() - offs, utf8_len(text[offs])); + sym.text = text.c_str() + offs; + sym.n = char_len; + offs += char_len; + sym.prev = index - 1; + sym.next = offs == text.size() ? -1 : index + 1; + index++; + symbols_.emplace_back(sym); + } + + // seed the work queue with all possible 2-character tokens. + for (size_t i = 1; i < symbols_.size(); ++i) { + try_add_bigram(i - 1, i); + } + + // keep substituting the highest frequency pairs for as long as we can. + while (!work_queue_.empty()) { + auto bigram = work_queue_.top(); + work_queue_.pop(); + + auto & left_sym = symbols_[bigram.left]; + auto & right_sym = symbols_[bigram.right]; + + // if one of the symbols already got merged, skip it. + if (left_sym.n == 0 || right_sym.n == 0 || + left_sym.n + right_sym.n != bigram.size) { + continue; + } + + // merge the right sym into the left one + left_sym.n += right_sym.n; + right_sym.n = 0; + + //printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size); + + // remove the right sym from the chain + left_sym.next = right_sym.next; + if (right_sym.next >= 0) { + symbols_[right_sym.next].prev = bigram.left; + } + + // find more substitutions + try_add_bigram(left_sym.prev, bigram.left); + try_add_bigram(bigram.left, left_sym.next); + } + + for (int i = 0; i != -1; i = symbols_[i].next) { + auto & symbol = symbols_[i]; + auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n)); + + if (token == vocab_.token_to_id.end()) { + // output any symbols that did not form tokens as bytes. + for (int j = 0; j < (int) symbol.n; ++j) { + llama_vocab::id token_id = static_cast(symbol.text[j]) + 3; + output.push_back(token_id); + } + } else { + output.push_back((*token).second); + } + } + } + +private: + void try_add_bigram(int left, int right) { + if (left == -1 || right == -1) { + return; + } + + const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n); + auto token = vocab_.token_to_id.find(text); + + if (token == vocab_.token_to_id.end()) { + return; + } + + if (static_cast((*token).second) >= vocab_.id_to_token.size()) { + return; + } + + const auto &tok_score = vocab_.id_to_token[(*token).second]; + + llama_sp_bigram bigram; + bigram.left = left; + bigram.right = right; + bigram.score = tok_score.score; + bigram.size = text.size(); + work_queue_.push(bigram); + } + + const llama_vocab & vocab_; + std::vector symbols_; + llama_sp_bigram::queue work_queue_; +}; + +static std::vector llama_tokenize(const llama_vocab & vocab, const std::string & text, bool bos) { + llama_tokenizer tokenizer(vocab); + std::vector output; + + if (text.empty()) { + return output; + } + + if (bos) { + output.push_back(llama_token_bos()); + } + + tokenizer.tokenize(text, output); + return output; +} + +// +// grammar - internal +// + +struct llama_grammar { + const std::vector> rules; + std::vector> stacks; +}; + +struct llama_grammar_candidate { + size_t index; + const uint32_t * code_points; +}; + +// NOTE: assumes valid utf8 (but checks for overrun) +// adds a terminating 0 for use as pointer +std::vector decode_utf8(const char * src) { + static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 }; + const char * pos = src; + std::vector code_points; + while (*pos != 0) { + uint8_t first_byte = static_cast(*pos); + uint8_t highbits = first_byte >> 4; + int len = lookup[highbits]; + uint8_t mask = (1 << (8 - len)) - 1; + uint32_t value = first_byte & mask; + const char * end = pos + len; // may overrun! + ++pos; + for ( ; pos < end && *pos != 0; ++pos) { + value = (value << 6) + (static_cast(*pos) & 0x3F); + } + code_points.push_back(value); + } + code_points.push_back(0); + return code_points; +} + +// returns true iff pos points to the end of one of the definitions of a rule +static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) { + switch (pos->type) { + case LLAMA_GRETYPE_END: return true; + case LLAMA_GRETYPE_ALT: return true; + default: return false; + } +} + +// returns true iff chr satisfies the char range at pos (regular or inverse range) +// asserts that pos is pointing to a char range element +static std::pair llama_grammar_match_char( + const llama_grammar_element * pos, + const uint32_t chr) { + + bool found = false; + bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR; + GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); + + do { + if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) { + // inclusive range, e.g. [a-z] + found = found || (pos->value <= chr && chr <= pos[1].value); + pos += 2; + } else { + // exact char match, e.g. [a] or "a" + found = found || pos->value == chr; + pos += 1; + } + } while (pos->type == LLAMA_GRETYPE_CHAR_ALT); + + return std::make_pair(found == is_positive_char, pos); +} + +// transforms a grammar pushdown stack into N possible stacks, all ending +// at a character range (terminal element) +static void llama_grammar_advance_stack( + const std::vector> & rules, + const std::vector & stack, + std::vector> & new_stacks) { + + if (stack.empty()) { + new_stacks.push_back(stack); + return; + } + + const llama_grammar_element * pos = stack.back(); + + switch (pos->type) { + case LLAMA_GRETYPE_RULE_REF: { + const size_t rule_id = static_cast(pos->value); + const llama_grammar_element * subpos = rules[rule_id].data(); + do { + // init new stack without the top (pos) + std::vector new_stack(stack.begin(), stack.end() - 1); + if (!llama_grammar_is_end_of_sequence(pos + 1)) { + // if this rule ref is followed by another element, add that to stack + new_stack.push_back(pos + 1); + } + if (!llama_grammar_is_end_of_sequence(subpos)) { + // if alternate is nonempty, add to stack + new_stack.push_back(subpos); + } + llama_grammar_advance_stack(rules, new_stack, new_stacks); + while (!llama_grammar_is_end_of_sequence(subpos)) { + // scan to end of alternate def + subpos++; + } + if (subpos->type == LLAMA_GRETYPE_ALT) { + // there's another alternate def of this rule to process + subpos++; + } else { + break; + } + } while (true); + break; + } + case LLAMA_GRETYPE_CHAR: + case LLAMA_GRETYPE_CHAR_NOT: + new_stacks.push_back(stack); + break; + default: + // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range + // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on + // those + GGML_ASSERT(false); + } +} + +// takes a set of possible pushdown stacks on a grammar, which are required to +// be positioned at a character range (see `llama_grammar_advance_stack`), and +// produces the N possible stacks if the given char is accepted at those +// positions +static std::vector> llama_grammar_accept( + const std::vector> & rules, + const std::vector> & stacks, + const uint32_t chr) { + + std::vector> new_stacks; + + for (const auto & stack : stacks) { + if (stack.empty()) { + continue; + } + + auto match = llama_grammar_match_char(stack.back(), chr); + if (match.first) { + const llama_grammar_element * pos = match.second; + + // update top of stack to next element, if any + std::vector new_stack(stack.begin(), stack.end() - 1); + if (!llama_grammar_is_end_of_sequence(pos)) { + new_stack.push_back(pos); + } + llama_grammar_advance_stack(rules, new_stack, new_stacks); + } + } + + return new_stacks; +} + +static std::vector llama_grammar_reject_candidates( + const std::vector> & rules, + const std::vector> & stacks, + const std::vector & candidates); + +static std::vector llama_grammar_reject_candidates_for_stack( + const std::vector> & rules, + const std::vector & stack, + const std::vector & candidates) { + + std::vector rejects; + + if (stack.empty()) { + // accept nothing; EOS is handled elsewhere + rejects.insert(rejects.end(), candidates.begin(), candidates.end()); + return rejects; + } + + const llama_grammar_element * stack_pos = stack.back(); + + std::vector next_candidates; + for (auto tok : candidates) { + if (llama_grammar_match_char(stack_pos, tok.code_points[0]).first) { + if (tok.code_points[1] != 0) { + next_candidates.push_back({ tok.index, tok.code_points + 1 }); + } + } else { + rejects.push_back(tok); + } + } + + auto stack_pos_after = llama_grammar_match_char(stack_pos, 0).second; + + // update top of stack to next element, if any + std::vector stack_after(stack.begin(), stack.end() - 1); + if (!llama_grammar_is_end_of_sequence(stack_pos_after)) { + stack_after.push_back(stack_pos_after); + } + std::vector> next_stacks; + llama_grammar_advance_stack(rules, stack_after, next_stacks); + + auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates); + for (auto tok : next_rejects) { + rejects.push_back({ tok.index, tok.code_points - 1 }); + } + + return rejects; +} + +static std::vector llama_grammar_reject_candidates( + const std::vector> & rules, + const std::vector> & stacks, + const std::vector & candidates) { + GGML_ASSERT(!stacks.empty()); // REVIEW + + if (candidates.empty()) { + return std::vector(); + } + + auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates); + + for (size_t i = 1, size = stacks.size(); i < size; ++i) { + rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects); + } + return rejects; +} + +// +// grammar - external +// + +struct llama_grammar * llama_grammar_init( + const llama_grammar_element ** rules, + size_t n_rules, + size_t start_rule_index) { + const llama_grammar_element * pos; + + // copy rule definitions into vectors + std::vector> vec_rules(n_rules); + for (size_t i = 0; i < n_rules; i++) { + for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) { + vec_rules[i].push_back(*pos); + } + vec_rules[i].push_back({LLAMA_GRETYPE_END, 0}); + } + + // loop over alternates of start rule to build initial stacks + std::vector> stacks; + pos = rules[start_rule_index]; + do { + std::vector stack; + if (!llama_grammar_is_end_of_sequence(pos)) { + // if alternate is nonempty, add to stack + stack.push_back(pos); + } + llama_grammar_advance_stack(vec_rules, stack, stacks); + while (!llama_grammar_is_end_of_sequence(pos)) { + // scan to end of alternate def + pos++; + } + if (pos->type == LLAMA_GRETYPE_ALT) { + // there's another alternate def of this rule to process + pos++; + } else { + break; + } + } while (true); + + return new llama_grammar{ std::move(vec_rules), std::move(stacks) }; +} + +void llama_grammar_free(struct llama_grammar * grammar) { + delete grammar; +} + +// +// sampling +// + +void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) { + assert(candidates->size > 0); + + const int64_t t_start_sample_us = ggml_time_us(); + + // Sort the logits in descending order + if (!candidates->sorted) { + std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) { + return a.logit > b.logit; + }); + candidates->sorted = true; + } + + float max_l = candidates->data[0].logit; + float cum_sum = 0.0f; + for (size_t i = 0; i < candidates->size; ++i) { + float p = expf(candidates->data[i].logit - max_l); + candidates->data[i].p = p; + cum_sum += p; + } + for (size_t i = 0; i < candidates->size; ++i) { + candidates->data[i].p /= cum_sum; + } + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep) { + const int64_t t_start_sample_us = ggml_time_us(); + + k = std::max(k, (int) min_keep); + k = std::min(k, (int) candidates->size); + + // Sort scores in descending order + if (!candidates->sorted) { + auto comp = [](const llama_token_data & a, const llama_token_data & b) { + return a.logit > b.logit; + }; + if (k == (int) candidates->size) { + std::sort(candidates->data, candidates->data + candidates->size, comp); + } else { + std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp); + } + candidates->sorted = true; + } + candidates->size = k; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) { + if (p >= 1.0f) { + return; + } + + llama_sample_softmax(ctx, candidates); + + const int64_t t_start_sample_us = ggml_time_us(); + + // Compute the cumulative probabilities + float cum_sum = 0.0f; + size_t last_idx = candidates->size; + + for (size_t i = 0; i < candidates->size; ++i) { + cum_sum += candidates->data[i].p; + + // Check if the running sum is at least p or if we have kept at least min_keep tokens + // we set the last index to i+1 to indicate that the current iterate should be included in the set + if (cum_sum >= p && i + 1 >= min_keep) { + last_idx = i + 1; + break; + } + } + + // Resize the output vector to keep only the top-p tokens + candidates->size = last_idx; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) { + if (z >= 1.0f || candidates->size <= 2) { + return; + } + + llama_sample_softmax(nullptr, candidates); + const int64_t t_start_sample_us = ggml_time_us(); + + // Compute the first and second derivatives + std::vector first_derivatives(candidates->size - 1); + std::vector second_derivatives(candidates->size - 2); + + for (size_t i = 0; i < first_derivatives.size(); ++i) { + first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p; + } + for (size_t i = 0; i < second_derivatives.size(); ++i) { + second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1]; + } + + // Calculate absolute value of second derivatives + for (size_t i = 0; i < second_derivatives.size(); ++i) { + second_derivatives[i] = abs(second_derivatives[i]); + } + + // Normalize the second derivatives + { + const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f); + + if (second_derivatives_sum > 1e-6f) { + for (float & value : second_derivatives) { + value /= second_derivatives_sum; + } + } else { + for (float & value : second_derivatives) { + value = 1.0f / second_derivatives.size(); + } + } + } + + float cum_sum = 0.0f; + size_t last_idx = candidates->size; + for (size_t i = 0; i < second_derivatives.size(); ++i) { + cum_sum += second_derivatives[i]; + + // Check if the running sum is greater than z or if we have kept at least min_keep tokens + if (cum_sum > z && i >= min_keep) { + last_idx = i; + break; + } + } + + // Resize the output vector to keep only the tokens above the tail location + candidates->size = last_idx; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + + +void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) { + // Reference implementation: + // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr + if (p >= 1.0f) { + return; + } + + // Compute the softmax of logits and calculate entropy + llama_sample_softmax(nullptr, candidates); + + const int64_t t_start_sample_us = ggml_time_us(); + + float entropy = 0.0f; + for (size_t i = 0; i < candidates->size; ++i) { + entropy += -candidates->data[i].p * logf(candidates->data[i].p); + } + + // Compute the absolute difference between negative log probability and entropy for each candidate + std::vector shifted_scores; + for (size_t i = 0; i < candidates->size; ++i) { + float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy); + shifted_scores.push_back(shifted_score); + } + + // Sort tokens based on the shifted_scores and their corresponding indices + std::vector indices(candidates->size); + std::iota(indices.begin(), indices.end(), 0); + + std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) { + return shifted_scores[a] < shifted_scores[b]; + }); + + // Compute the cumulative probabilities + float cum_sum = 0.0f; + size_t last_idx = indices.size(); + + for (size_t i = 0; i < indices.size(); ++i) { + size_t idx = indices[i]; + cum_sum += candidates->data[idx].p; + + // Check if the running sum is greater than typical or if we have kept at least min_keep tokens + if (cum_sum > p && i >= min_keep - 1) { + last_idx = i + 1; + break; + } + } + + // Resize the output vector to keep only the locally typical tokens + std::vector new_candidates; + for (size_t i = 0; i < last_idx; ++i) { + size_t idx = indices[i]; + new_candidates.push_back(candidates->data[idx]); + } + + // Replace the data in candidates with the new_candidates data + std::copy(new_candidates.begin(), new_candidates.end(), candidates->data); + candidates->size = new_candidates.size(); + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) { + const int64_t t_start_sample_us = ggml_time_us(); + + for (size_t i = 0; i < candidates_p->size; ++i) { + candidates_p->data[i].logit /= temp; + } + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty) { + if (last_tokens_size == 0 || penalty == 1.0f) { + return; + } + + const int64_t t_start_sample_us = ggml_time_us(); + + for (size_t i = 0; i < candidates->size; ++i) { + const auto * token_iter = std::find(last_tokens, last_tokens + last_tokens_size, candidates->data[i].id); + if (token_iter == last_tokens + last_tokens_size) { + continue; + } + + // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong. + // This is common fix for this problem, which is to multiply by the penalty instead of dividing. + if (candidates->data[i].logit <= 0) { + candidates->data[i].logit *= penalty; + } else { + candidates->data[i].logit /= penalty; + } + } + + candidates->sorted = false; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens_p, size_t last_tokens_size, float alpha_frequency, float alpha_presence) { + if (last_tokens_size == 0 || (alpha_frequency == 0.0f && alpha_presence == 0.0f)) { + return; + } + + const int64_t t_start_sample_us = ggml_time_us(); + + // Create a frequency map to count occurrences of each token in last_tokens + std::unordered_map token_count; + for (size_t i = 0; i < last_tokens_size; ++i) { + token_count[last_tokens_p[i]]++; + } + + // Apply frequency and presence penalties to the candidates + for (size_t i = 0; i < candidates->size; ++i) { + auto token_iter = token_count.find(candidates->data[i].id); + if (token_iter == token_count.end()) { + continue; + } + + int count = token_iter->second; + candidates->data[i].logit -= float(count) * alpha_frequency + float(count > 0) * alpha_presence; + } + + candidates->sorted = false; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) { + assert(ctx); + const int64_t t_start_sample_us = ggml_time_us(); + + bool allow_eos = false; + for (const auto & stack : grammar->stacks) { + if (stack.empty()) { + allow_eos = true; + break; + } + } + + const llama_token eos = llama_token_eos(); + + std::vector> candidates_decoded; + std::vector candidates_grammar; + + for (size_t i = 0; i < candidates->size; ++i) { + const llama_token id = candidates->data[i].id; + const char * str = llama_token_to_str(ctx, id); + if (id == eos) { + if (!allow_eos) { + candidates->data[i].logit = -INFINITY; + } + } else if (*str == 0) { + candidates->data[i].logit = -INFINITY; + } else { + candidates_decoded.push_back(decode_utf8(str)); + candidates_grammar.push_back({ i, candidates_decoded.back().data() }); + } + } + + const auto rejects = + llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar); + for (auto & reject : rejects) { + candidates->data[reject.index].logit = -INFINITY; + } + + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; +} + +static void llama_log_softmax(float * array, size_t size) { + float max_l = *std::max_element(array, array + size); + float sum = 0.f; + for (size_t i = 0; i < size; ++i) { + float p = expf(array[i] - max_l); + sum += p; + array[i] = p; + } + + for (size_t i = 0; i < size; ++i) { + array[i] = logf(array[i] / sum); + } +} + +void llama_sample_classifier_free_guidance( + struct llama_context * ctx, + llama_token_data_array * candidates, + struct llama_context * guidance_ctx, + float scale) { + int64_t t_start_sample_us = ggml_time_us(); + + assert(ctx); + auto n_vocab = llama_n_vocab(ctx); + assert(n_vocab == (int)candidates->size); + assert(!candidates->sorted); + + std::vector logits_base; + logits_base.reserve(candidates->size); + for (size_t i = 0; i < candidates->size; ++i) { + logits_base.push_back(candidates->data[i].logit); + } + llama_log_softmax(logits_base.data(), candidates->size); + + float* logits_guidance = llama_get_logits(guidance_ctx); + llama_log_softmax(logits_guidance, n_vocab); + + for (int i = 0; i < n_vocab; ++i) { + float logit_guidance = logits_guidance[i]; + float logit_base = logits_base[i]; + candidates->data[i].logit = scale * (logit_base - logit_guidance) + logit_guidance; + } + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) { + assert(ctx); + auto N = float(llama_n_vocab(ctx)); + int64_t t_start_sample_us; + t_start_sample_us = ggml_time_us(); + + llama_sample_softmax(nullptr, candidates); + + // Estimate s_hat using the most probable m tokens + float s_hat = 0.0; + float sum_ti_bi = 0.0; + float sum_ti_sq = 0.0; + for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) { + float t_i = logf(float(i + 2) / float(i + 1)); + float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p); + sum_ti_bi += t_i * b_i; + sum_ti_sq += t_i * t_i; + } + s_hat = sum_ti_bi / sum_ti_sq; + + // Compute k from the estimated s_hat and target surprise value + float epsilon_hat = s_hat - 1; + float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat); + + // Sample the next word X using top-k sampling + llama_sample_top_k(nullptr, candidates, int(k), 1); + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } + llama_token X = llama_sample_token(ctx, candidates); + t_start_sample_us = ggml_time_us(); + + // Compute error as the difference between observed surprise and target surprise value + size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) { + return candidate.id == X; + })); + float observed_surprise = -log2f(candidates->data[X_idx].p); + float e = observed_surprise - tau; + + // Update mu using the learning rate and error + *mu = *mu - eta * e; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } + return X; +} + +llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) { + int64_t t_start_sample_us; + t_start_sample_us = ggml_time_us(); + + llama_sample_softmax(ctx, candidates); + + // Truncate the words with surprise values greater than mu + candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) { + return -log2f(candidate.p) > *mu; + })); + + if (candidates->size == 0) { + candidates->size = 1; + } + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } + + // Normalize the probabilities of the remaining words + llama_sample_softmax(ctx, candidates); + + // Sample the next word X from the remaining words + llama_token X = llama_sample_token(ctx, candidates); + t_start_sample_us = ggml_time_us(); + + // Compute error as the difference between observed surprise and target surprise value + size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) { + return candidate.id == X; + })); + float observed_surprise = -log2f(candidates->data[X_idx].p); + float e = observed_surprise - tau; + + // Update mu using the learning rate and error + *mu = *mu - eta * e; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } + return X; +} + +llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) { + const int64_t t_start_sample_us = ggml_time_us(); + + // Find max element + auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) { + return a.logit < b.logit; + }); + + llama_token result = max_iter->id; + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + ctx->n_sample++; + } + return result; +} + +llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) { + assert(ctx); + const int64_t t_start_sample_us = ggml_time_us(); + llama_sample_softmax(nullptr, candidates); + + std::vector probs; + probs.reserve(candidates->size); + for (size_t i = 0; i < candidates->size; ++i) { + probs.push_back(candidates->data[i].p); + } + + std::discrete_distribution<> dist(probs.begin(), probs.end()); + auto & rng = ctx->rng; + int idx = dist(rng); + + llama_token result = candidates->data[idx].id; + + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + ctx->n_sample++; + return result; +} + +void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) { + const int64_t t_start_sample_us = ggml_time_us(); + + if (token == llama_token_eos()) { + for (const auto & stack : grammar->stacks) { + if (stack.empty()) { + return; + } + } + GGML_ASSERT(false); + } + + const char * str = llama_token_to_str(ctx, token); + // Note terminating 0 in decoded string + auto code_points = decode_utf8(str); + for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) { + grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it); + } + GGML_ASSERT(!grammar->stacks.empty()); + + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; +} + +// +// quantization +// + +static void llama_convert_tensor_internal(const llama_load_tensor & tensor, gguf_buffer & output, const int nelements, const int nthread) { + if (output.size < nelements * sizeof(float)) { + output.resize(nelements * sizeof(float)); + } + float * f32_output = (float *) output.addr; + + ggml_type_traits_t qtype; + if (ggml_is_quantized(tensor.type)) { + qtype = ggml_internal_get_type_traits(tensor.type); + if (qtype.to_float == NULL) { + throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type))); + } + } else if (tensor.type != GGML_TYPE_F16) { + throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor.type))); + } + + if (nthread < 2) { + if (tensor.type == GGML_TYPE_F16) { + ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements); + } else if (ggml_is_quantized(tensor.type)) { + qtype.to_float(tensor.data, f32_output, nelements); + } else { + GGML_ASSERT(false); // unreachable + } + return; + } + + auto block_size = tensor.type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor.type); + auto block_size_bytes = ggml_type_size(tensor.type); + + GGML_ASSERT(nelements % block_size == 0); + auto nblocks = nelements / block_size; + auto blocks_per_thread = nblocks / nthread; + auto spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count + + std::vector workers; + for (auto tnum = 0, in_buff_offs = 0, out_buff_offs = 0; tnum < nthread; tnum++) { + auto thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread + auto thr_elems = thr_blocks * block_size; // number of elements for this thread + auto thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread + + auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) { + if (typ == GGML_TYPE_F16) { + ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels); + } else { + qtype.to_float(inbuf, outbuf, nels); + } + }; + workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems)); + in_buff_offs += thr_block_bytes; + out_buff_offs += thr_elems; + } + for (auto & worker : workers) { + worker.join(); + } + +} + +static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) { + ggml_type quantized_type; + llama_ftype ftype = params->ftype; + int nthread = params->nthread; + + switch (params->ftype) { + case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break; + case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break; + case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break; + case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break; + case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break; + case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break; + case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break; + +#ifdef GGML_USE_K_QUANTS + // K-quants + case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break; + case LLAMA_FTYPE_MOSTLY_Q3_K_S: + case LLAMA_FTYPE_MOSTLY_Q3_K_M: + case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break; + case LLAMA_FTYPE_MOSTLY_Q4_K_S: + case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break; + case LLAMA_FTYPE_MOSTLY_Q5_K_S: + case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break; + case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break; +#endif + default: throw std::runtime_error(format("invalid output file type %d\n", ftype)); + } + + if (nthread <= 0) { + nthread = std::thread::hardware_concurrency(); + } + + std::unique_ptr model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false)); + gguf_file_saver file_saver(fname_out.c_str(), model_loader->file_loader.get(), params->ftype); + +#ifdef GGML_USE_K_QUANTS + int n_attention_wv = 0; + int n_feed_forward_w2 = 0; + for (auto& tensor : model_loader->tensors_map.tensors) { + if (tensor.name.find("attention.wv.weight") != std::string::npos) { + ++n_attention_wv; + } + else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) { + ++n_feed_forward_w2; + } + } + + int i_attention_wv = 0; + int i_feed_forward_w2 = 0; +#endif + + size_t total_size_org = 0; + size_t total_size_new = 0; + std::vector hist_all(1 << 4, 0); + + std::vector workers; + std::mutex mutex; + + auto use_more_bits = [] (int i_layer, int num_layers) -> bool { + return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2; + }; + + size_t idx = 0; + for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) { + gguf_buffer read_data; + read_data.resize(tensor.size); + tensor.data = read_data.addr; + model_loader->load_data_for(tensor); + + printf("[%4zu/%4zu] %36s - %16s, type = %6s, ", + ++idx, model_loader->tensors_map.tensors.size(), + tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(), + ggml_type_name(tensor.type)); + + // This used to be a regex, but has an extreme cost to compile times. + bool quantize = tensor.name.rfind("weight") == tensor.name.size() - 6; // ends with 'weight'? + + // quantize only 2D tensors + quantize &= (tensor.ne.size() == 2); + quantize &= params->quantize_output_tensor || tensor.name != "output.weight"; + quantize &= quantized_type != tensor.type; + + enum ggml_type new_type; + void * new_data; + size_t new_size; + gguf_buffer work; + + if (!quantize) { + new_type = tensor.type; + new_data = tensor.data; + new_size = tensor.size; + printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0); + } else { + new_type = quantized_type; +#ifdef GGML_USE_K_QUANTS + if (tensor.name == "output.weight") { + int nx = tensor.ne.at(0); + int ny = tensor.ne.at(1); + if (nx % QK_K == 0 && ny % QK_K == 0) { + new_type = GGML_TYPE_Q6_K; + } + } else if (tensor.name.find("attention.wv.weight") != std::string::npos) { + if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; + else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) && + use_more_bits(i_attention_wv, n_attention_wv)) new_type = GGML_TYPE_Q6_K; + else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) && + (i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K; + ++i_attention_wv; + } else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) { + if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; + else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) && + use_more_bits(i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K; + //else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && i_feed_forward_w2 < n_feed_forward_w2/8) new_type = GGML_TYPE_Q6_K; + ++i_feed_forward_w2; + } else if (tensor.name.find("attention.wo.weight") != std::string::npos) { + if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; + } + bool convert_incompatible_tensor = false; + if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K || + new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) { + int nx = tensor.ne.at(0); + int ny = tensor.ne.at(1); + if (nx % QK_K != 0 || ny % QK_K != 0) { + fprintf(stderr, "\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K); + convert_incompatible_tensor = true; + } + } + if (convert_incompatible_tensor) { + if (tensor.name == "output.weight") { + new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing. + fprintf(stderr, "F16 will be used for this tensor instead.\n"); + } else if (tensor.name == "tok_embeddings.weight") { + new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing. + fprintf(stderr, "Q4_0 will be used for this tensor instead.\n"); + } else { + throw std::runtime_error("Unsupported tensor size encountered\n"); + } + } +#endif + + float * f32_data; + size_t nelements = tensor.ne.at(0) * tensor.ne.at(1); + gguf_buffer f32_conv_buf; + + if (tensor.type == GGML_TYPE_F32) { + f32_data = (float *) tensor.data; + } else if (ggml_is_quantized(tensor.type) && !params->allow_requantize) { + throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor.type))); + } else { + llama_convert_tensor_internal(tensor, f32_conv_buf, nelements, nthread); + f32_data = (float *) f32_conv_buf.addr; + } + + printf("quantizing to %s .. ", ggml_type_name(new_type)); + fflush(stdout); + + work.resize(nelements * 4); // upper bound on size + new_data = work.addr; + std::vector hist_cur(1 << 4, 0); + + int chunk_size = 32 * 512; + const int nchunk = (nelements + chunk_size - 1)/chunk_size; + const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1; + if (nthread_use < 2) { + new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data()); + } else { + size_t counter = 0; + new_size = 0; + auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size] () { + std::vector local_hist; + size_t local_size = 0; + while (true) { + std::unique_lock lock(mutex); + size_t first = counter; counter += chunk_size; + if (first >= nelements) { + if (!local_hist.empty()) { + for (int j=0; j %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0); + int64_t tot_count = 0; + for (size_t i = 0; i < hist_cur.size(); i++) { + hist_all[i] += hist_cur[i]; + tot_count += hist_cur[i]; + } + + if (tot_count > 0) { + for (size_t i = 0; i < hist_cur.size(); i++) { + printf("%5.3f ", hist_cur[i] / float(nelements)); + } + } + printf("\n"); + } + total_size_org += tensor.size; + total_size_new += new_size; + file_saver.write_tensor(tensor, new_type, new_data, new_size); + } + + printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); + printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); + + { + int64_t sum_all = 0; + for (size_t i = 0; i < hist_all.size(); i++) { + sum_all += hist_all[i]; + } + + if (sum_all > 0) { + printf("%s: hist: ", __func__); + for (size_t i = 0; i < hist_all.size(); i++) { + printf("%5.3f ", hist_all[i] / float(sum_all)); + } + printf("\n"); + } + } +} + + + +// +// interface implementation +// + +struct llama_model * llama_load_model_from_file( + const char * path_model, + struct llama_context_params params) { + ggml_time_init(); + + llama_model * model = new llama_model; + + ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; + + if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gqa, params.rms_norm_eps, params.n_gpu_layers, + params.main_gpu, params.tensor_split, params.rope_freq_base, params.rope_freq_scale,params.low_vram, + memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback, + params.progress_callback_user_data)) { + delete model; + fprintf(stderr, "%s: failed to load model\n", __func__); + return nullptr; + } + + return model; +} + +void llama_free_model(struct llama_model * model) { + delete model; +} + +struct llama_context * llama_new_context_with_model( + struct llama_model * model, + struct llama_context_params params) { + + if (!model) { + return nullptr; + } + + llama_context * ctx = new llama_context(*model); + + if (params.seed == LLAMA_DEFAULT_SEED) { + params.seed = time(NULL); + } + + unsigned cur_percentage = 0; + if (params.progress_callback == NULL) { + params.progress_callback_user_data = &cur_percentage; + params.progress_callback = [](float progress, void * ctx) { + unsigned * cur_percentage_p = (unsigned *) ctx; + unsigned percentage = (unsigned) (100 * progress); + while (percentage > *cur_percentage_p) { + *cur_percentage_p = percentage; + fprintf(stderr, "."); + fflush(stderr); + if (percentage >= 100) { + fprintf(stderr, "\n"); + } + } + }; + } + + ctx->rng = std::mt19937(params.seed); + ctx->logits_all = params.logits_all; + + ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; + + // reserve memory for context buffers + if (!params.vocab_only) { + if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) { + fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__); + llama_free(ctx); + return nullptr; + } + + { + const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v); + fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); + } + + const auto & hparams = ctx->model.hparams; + + // resized during inference + if (params.logits_all) { + ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab); + } else { + ctx->logits.reserve(hparams.n_vocab); + } + + if (params.embedding){ + ctx->embedding.resize(hparams.n_embd); + } + + ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead()); + + ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type)); + ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type)); + } + +#ifdef GGML_USE_METAL + if (params.n_gpu_layers > 0) { + // this allocates all Metal resources and memory buffers + ctx->ctx_metal = ggml_metal_init(1); + + void * data_ptr = NULL; + size_t data_size = 0; + + if (params.use_mmap) { + data_ptr = ctx->model.mapping->addr; + data_size = ctx->model.mapping->size; + } else { + data_ptr = ggml_get_mem_buffer(ctx->model.ctx); + data_size = ggml_get_mem_size (ctx->model.ctx); + } + + const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx); + + fprintf(stderr, "%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); + +#define LLAMA_METAL_CHECK_BUF(result) \ + if (!(result)) { \ + fprintf(stderr, "%s: failed to add buffer\n", __func__); \ + llama_free(ctx); \ + return NULL; \ + } + + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size)); + + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.addr, ctx->buf_compute.size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.addr, ctx->kv_self.buf.size, 0)); + + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].addr, ctx->buf_scratch[0].size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].addr, ctx->buf_scratch[1].size, 0)); +#undef LLAMA_METAL_CHECK_BUF + } +#endif + +#ifdef GGML_USE_MPI + ctx->ctx_mpi = ggml_mpi_init(); + + if (ggml_mpi_rank(ctx->ctx_mpi) > 0) { + // Enter a blocking eval loop with dummy input, letting rank=0 drive the process + const std::vector tmp(ctx->model.hparams.n_ctx, llama_token_bos()); + while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {}; + llama_backend_free(); + exit(1); + } +#endif + + return ctx; +} + +struct llama_context * llama_init_from_file( + const char * path_model, + struct llama_context_params params) { + + struct llama_model * model = llama_load_model_from_file(path_model, params); + if (!model) { + return nullptr; + } + struct llama_context * ctx = llama_new_context_with_model(model, params); + ctx->model_owner = true; + return ctx; +} + +void llama_free(struct llama_context * ctx) { + if (ctx->model_owner) { + delete &ctx->model; + } + delete ctx; +} + +int llama_model_quantize( + const char * fname_inp, + const char * fname_out, + const llama_model_quantize_params *params) { + try { + llama_model_quantize_internal(fname_inp, fname_out, params); + return 0; + } catch (const std::exception & err) { + fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.what()); + return 1; + } +} + +int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) { + fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); + + const int64_t t_start_lora_us = ggml_time_us(); + + auto fin = std::ifstream(path_lora, std::ios::binary); + if (!fin) { + fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora); + return 1; + } + + // verify magic and version + { + uint32_t magic; + fin.read((char *) &magic, sizeof(magic)); + if (magic != LLAMA_FILE_MAGIC_GGLA) { + fprintf(stderr, "%s: bad file magic\n", __func__); + return 1; + } + uint32_t format_version; + fin.read((char *) &format_version, sizeof(format_version)); + + if (format_version != 1) { + fprintf(stderr, "%s: unsupported file version\n", __func__ ); + return 1; + } + } + + int32_t lora_r; + int32_t lora_alpha; + fin.read((char *) &lora_r, sizeof(lora_r)); + fin.read((char *) &lora_alpha, sizeof(lora_alpha)); + float scaling = (float)lora_alpha / (float)lora_r; + + fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling); + + + // create a temporary ggml context to store the lora tensors + // todo: calculate size from biggest possible tensor + std::vector lora_buf(1024ull * 1024ull * 1024ull); + struct ggml_init_params params; + params.mem_size = lora_buf.size(); + params.mem_buffer = lora_buf.data(); + params.no_alloc = false; + + ggml_context * lora_ctx = ggml_init(params); + std::unordered_map lora_tensors; + + // create a name -> tensor map of the model to accelerate lookups + std::unordered_map model_tensors; + for (const auto & kv: model.tensors_by_name) { + model_tensors.insert(kv); + } + + + // load base model + std::unique_ptr model_loader; + ggml_context * base_ctx = NULL; + gguf_buffer base_buf; + if (path_base_model) { + fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model); + model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true)); + + size_t ctx_size; + size_t mmapped_size; + model_loader->calc_sizes(&ctx_size, &mmapped_size); + base_buf.resize(ctx_size); + + ggml_init_params base_params; + base_params.mem_size = base_buf.size; + base_params.mem_buffer = base_buf.addr; + base_params.no_alloc = model_loader->use_mmap; + + base_ctx = ggml_init(base_params); + + model_loader->ggml_ctx = base_ctx; + + // maybe this should in llama_model_loader + if (model_loader->use_mmap) { + model_loader->mapping.reset(new gguf_mmap(&model_loader->file_loader->file, /* prefetch */ 0, ggml_is_numa())); + } + } + + // read tensors and apply + bool warned = false; + int n_tensors = 0; + + std::vector work_buffer; + + while (true) { + int32_t n_dims; + int32_t length; + int32_t ftype; + + fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + fin.read(reinterpret_cast(&length), sizeof(length)); + fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + if (fin.eof()) { + break; + } + + int32_t ne[2] = { 1, 1 }; + for (int i = 0; i < n_dims; ++i) { + fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); + } + + std::string name; + { + char buf[1024]; + fin.read(buf, length); + name = std::string(buf, length); + } + + // check for lora suffix and get the type of tensor + const std::string lora_suffix = ".lora"; + size_t pos = name.rfind(lora_suffix); + if (pos == std::string::npos) { + fprintf(stderr, "%s: error: '%s' is not a lora tensor\n", __func__, name.c_str()); + return 1; + } + + std::string lora_type = name.substr(pos + lora_suffix.length()); + std::string base_name = name; + base_name.erase(pos); + // fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str()); + + if (model_tensors.find(base_name) == model_tensors.end()) { + fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data()); + return 1; + } + + // create ggml tensor + ggml_type wtype; + switch (ftype) { + case 0: wtype = GGML_TYPE_F32; break; + case 1: wtype = GGML_TYPE_F16; break; + default: + { + fprintf(stderr, "%s: invalid tensor data type '%d'\n", + __func__, ftype); + return false; + } + } + ggml_tensor * lora_tensor; + if (n_dims == 2) { + lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]); + } + else { + fprintf(stderr, "%s: unsupported tensor dimension %d\n", __func__, n_dims); + return 1; + } + ggml_set_name(lora_tensor, "lora_tensor"); + + // load tensor data + size_t offset = fin.tellg(); + size_t tensor_data_size = ggml_nbytes(lora_tensor); + offset = (offset + 31) & -32; + fin.seekg(offset); + fin.read((char*)lora_tensor->data, tensor_data_size); + + lora_tensors[name] = lora_tensor; + + // check if we have both A and B tensors and apply + if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() && + lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) { + + ggml_tensor * dest_t = model_tensors[base_name]; + + offload_func_t offload_func = llama_nop; + offload_func_t offload_func_force_inplace = llama_nop; + +#ifdef GGML_USE_CUBLAS + if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) { + if (dest_t->type != GGML_TYPE_F16) { + throw std::runtime_error(format( + "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models", __func__)); + } + offload_func = ggml_cuda_assign_buffers; + offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace; + } +#endif // GGML_USE_CUBLAS + + ggml_tensor * base_t; + if (model_loader) { + // load from base model + if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) { + fprintf(stderr, "%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str()); + return 1; + } + size_t idx = model_loader->tensors_map.name_to_idx[base_name]; + llama_load_tensor & lt = model_loader->tensors_map.tensors[idx]; + base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU); + lt.data = (uint8_t *) lt.ggml_tensor->data; + model_loader->load_data_for(lt); + lt.ggml_tensor->data = lt.data; + } + else { + base_t = dest_t; + } + + if (ggml_is_quantized(base_t->type)) { + if (!warned) { + fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, " + "use a f16 or f32 base model with --lora-base\n", __func__); + warned = true; + } + } + + ggml_tensor * loraA = lora_tensors[base_name + ".loraA"]; + GGML_ASSERT(loraA->type == GGML_TYPE_F32); + ggml_set_name(loraA, "loraA"); + + ggml_tensor * loraB = lora_tensors[base_name + ".loraB"]; + GGML_ASSERT(loraB->type == GGML_TYPE_F32); + ggml_set_name(loraB, "loraB"); + + if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) { + fprintf(stderr, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");" + " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]); + return 1; + } + + // w = w + BA*s + ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB); + offload_func(BA); + ggml_set_name(BA, "BA"); + + if (scaling != 1.0f) { + ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling); + ggml_set_name(scale_tensor, "scale_tensor"); + + BA = ggml_scale_inplace(lora_ctx, BA, scale_tensor); + offload_func(BA); + ggml_set_name(BA, "BA_scaled"); + } + + ggml_tensor * r; + if (base_t == dest_t) { + r = ggml_add_inplace(lora_ctx, dest_t, BA); + offload_func_force_inplace(r); + ggml_set_name(r, "r_add_inplace"); + } + else { + r = ggml_add(lora_ctx, base_t, BA); + offload_func(r); + ggml_set_name(r, "r_add"); + + r = ggml_cpy(lora_ctx, r, dest_t); + offload_func(r); + ggml_set_name(r, "r_cpy"); + } + + struct ggml_cgraph gf = ggml_build_forward(r); + + ggml_graph_compute_helper(work_buffer, &gf, n_threads); + + // we won't need these tensors again, reset the context to save memory + ggml_free(lora_ctx); + lora_ctx = ggml_init(params); + lora_tensors.clear(); + + n_tensors++; + if (n_tensors % 4 == 0) { + fprintf(stderr, "."); + } + } + } + + // TODO: this should be in a destructor, it will leak on failure + ggml_free(lora_ctx); + if (base_ctx) { + ggml_free(base_ctx); + } + + const int64_t t_lora_us = ggml_time_us() - t_start_lora_us; + fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0); + + return 0; +} + +int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) { + try { + return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads); + } catch (const std::exception & err) { + fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what()); + return 1; + } +} + +int llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, const char * path_base_model, int n_threads) { + try { + return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads); + } catch (const std::exception & err) { + fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what()); + return 1; + } +} + +int llama_get_kv_cache_token_count(const struct llama_context * ctx) { + return ctx->kv_self.n; +} + +#define LLAMA_MAX_RNG_STATE (64*1024) + +void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) { + if (seed == LLAMA_DEFAULT_SEED) { + seed = time(NULL); + } + ctx->rng.seed(seed); +} + +// Returns the *maximum* size of the state +size_t llama_get_state_size(const struct llama_context * ctx) { + // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state. + // for reference, std::mt19937(1337) serializes to 6701 bytes. + const size_t s_rng_size = sizeof(size_t); + const size_t s_rng = LLAMA_MAX_RNG_STATE; + const size_t s_logits_capacity = sizeof(size_t); + const size_t s_logits_size = sizeof(size_t); + const size_t s_logits = ctx->logits.capacity() * sizeof(float); + const size_t s_embedding_size = sizeof(size_t); + const size_t s_embedding = ctx->embedding.size() * sizeof(float); + const size_t s_kv_size = sizeof(size_t); + const size_t s_kv_ntok = sizeof(int); + const size_t s_kv = ctx->kv_self.buf.size; + + const size_t s_total = ( + + s_rng_size + + s_rng + + s_logits_capacity + + s_logits_size + + s_logits + + s_embedding_size + + s_embedding + + s_kv_size + + s_kv_ntok + + s_kv + ); + + return s_total; +} + +// Copies the state to the specified destination address +size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { + uint8_t * out = dst; + + // copy rng + { + std::stringstream rng_ss; + rng_ss << ctx->rng; + + const size_t rng_size = rng_ss.str().size(); + char rng_buf[LLAMA_MAX_RNG_STATE]; + + memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE); + memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size()); + + memcpy(out, &rng_size, sizeof(rng_size)); out += sizeof(rng_size); + memcpy(out, &rng_buf[0], LLAMA_MAX_RNG_STATE); out += LLAMA_MAX_RNG_STATE; + } + + // copy logits + { + const size_t logits_cap = ctx->logits.capacity(); + const size_t logits_size = ctx->logits.size(); + + memcpy(out, &logits_cap, sizeof(logits_cap)); out += sizeof(logits_cap); + memcpy(out, &logits_size, sizeof(logits_size)); out += sizeof(logits_size); + + if (logits_size) { + memcpy(out, ctx->logits.data(), logits_size * sizeof(float)); + } + + out += logits_cap * sizeof(float); + } + + // copy embeddings + { + const size_t embedding_size = ctx->embedding.size(); + + memcpy(out, &embedding_size, sizeof(embedding_size)); out += sizeof(embedding_size); + + if (embedding_size) { + memcpy(out, ctx->embedding.data(), embedding_size * sizeof(float)); + out += embedding_size * sizeof(float); + } + } + + // copy kv cache + { + const auto & kv_self = ctx->kv_self; + const auto & hparams = ctx->model.hparams; + const int n_layer = hparams.n_layer; + const int n_embd = hparams.n_embd; + const int n_ctx = hparams.n_ctx; + + const size_t kv_size = kv_self.buf.size; + const int kv_ntok = llama_get_kv_cache_token_count(ctx); + + memcpy(out, &kv_size, sizeof(kv_size)); out += sizeof(kv_size); + memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok); + + if (kv_size) { + const size_t elt_size = ggml_element_size(kv_self.k); + + ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true }); + ggml_cgraph gf{}; + + ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer); + kout3d->data = out; + out += ggml_nbytes(kout3d); + + ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer); + vout3d->data = out; + out += ggml_nbytes(vout3d); + + ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k, + n_embd, kv_ntok, n_layer, + elt_size*n_embd, elt_size*n_embd*n_ctx, 0); + + ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v, + kv_ntok, n_embd, n_layer, + elt_size*n_ctx, elt_size*n_ctx*n_embd, 0); + + ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d)); + ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, v3d, vout3d)); + ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1); + + ggml_free(cpy_ctx); + } + } + + const size_t written = out - dst; + const size_t max_size = llama_get_state_size(ctx); + + GGML_ASSERT(written <= max_size); + + return written; +} + +// Sets the state reading from the specified source address +size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { + uint8_t * inp = src; + + // set rng + { + size_t rng_size; + char rng_buf[LLAMA_MAX_RNG_STATE]; + + memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size); + memcpy(&rng_buf[0], inp, LLAMA_MAX_RNG_STATE); inp += LLAMA_MAX_RNG_STATE; + + std::stringstream rng_ss; + rng_ss.str(std::string(&rng_buf[0], rng_size)); + rng_ss >> ctx->rng; + + GGML_ASSERT(rng_ss.fail() == false); + } + + // set logits + { + size_t logits_cap; + size_t logits_size; + + memcpy(&logits_cap, inp, sizeof(logits_cap)); inp += sizeof(logits_cap); + memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size); + + GGML_ASSERT(ctx->logits.capacity() == logits_cap); + + if (logits_size) { + ctx->logits.resize(logits_size); + memcpy(ctx->logits.data(), inp, logits_size * sizeof(float)); + } + + inp += logits_cap * sizeof(float); + } + + // set embeddings + { + size_t embedding_size; + + memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size); + + GGML_ASSERT(ctx->embedding.capacity() == embedding_size); + + if (embedding_size) { + memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float)); + inp += embedding_size * sizeof(float); + } + } + + // set kv cache + { + const auto & kv_self = ctx->kv_self; + const auto & hparams = ctx->model.hparams; + const int n_layer = hparams.n_layer; + const int n_embd = hparams.n_embd; + const int n_ctx = hparams.n_ctx; + + size_t kv_size; + int kv_ntok; + + memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size); + memcpy(&kv_ntok, inp, sizeof(kv_ntok)); inp += sizeof(kv_ntok); + + if (kv_size) { + GGML_ASSERT(kv_self.buf.size == kv_size); + + const size_t elt_size = ggml_element_size(kv_self.k); + + ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true }); + ggml_cgraph gf{}; + + ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer); + kin3d->data = (void *) inp; + inp += ggml_nbytes(kin3d); + + ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer); + vin3d->data = (void *) inp; + inp += ggml_nbytes(vin3d); + + ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k, + n_embd, kv_ntok, n_layer, + elt_size*n_embd, elt_size*n_embd*n_ctx, 0); + + ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v, + kv_ntok, n_embd, n_layer, + elt_size*n_ctx, elt_size*n_ctx*n_embd, 0); + + ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d)); + ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, vin3d, v3d)); + ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1); + + ggml_free(cpy_ctx); + } + + ctx->kv_self.n = kv_ntok; + } + + const size_t nread = inp - src; + const size_t max_size = llama_get_state_size(ctx); + + GGML_ASSERT(nread <= max_size); + + return nread; +} + +static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + gguf_file file(path_session, "rb"); + GGML_UNUSED(ctx); + GGML_UNUSED(path_session); + GGML_UNUSED(tokens_out); + GGML_UNUSED(n_token_capacity); + GGML_UNUSED(n_token_count_out); + + +// TODO: implement with GGUF format + return true; +} + +bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + try { + return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); + } catch (const std::exception & err) { + fprintf(stderr, "error loading session file: %s\n", err.what()); + return false; + } +} + +bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { + gguf_file file(path_session, "wb"); + + // TODO: implement with GGUF format + + return true; +} + +int llama_eval( + struct llama_context * ctx, + const llama_token * tokens, + int n_tokens, + int n_past, + int n_threads) { + if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) { + fprintf(stderr, "%s: failed to eval\n", __func__); + return 1; + } + + // get a more accurate load time, upon first eval + // TODO: fix this + if (!ctx->has_evaluated_once) { + ctx->t_load_us = ggml_time_us() - ctx->t_start_us; + ctx->has_evaluated_once = true; + } + + return 0; +} + + +int llama_eval_embd( + struct llama_context * ctx, + const float * embd, + int n_tokens, + int n_past, + int n_threads) { + if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) { + fprintf(stderr, "%s: failed to eval\n", __func__); + return 1; + } + + // get a more accurate load time, upon first eval + // TODO: fix this + if (!ctx->has_evaluated_once) { + ctx->t_load_us = ggml_time_us() - ctx->t_start_us; + ctx->has_evaluated_once = true; + } + + return 0; +} + +int llama_eval_export(struct llama_context * ctx, const char * fname) { + const int n_batch = 1; + const int n_ctx = 512 - n_batch; + + const std::vector tmp(n_batch, llama_token_bos()); + + if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) { + fprintf(stderr, "%s: failed to eval\n", __func__); + return 1; + } + + return 0; +} + +int llama_tokenize_with_model( + const struct llama_model * model, + const char * text, + llama_token * tokens, + int n_max_tokens, + bool add_bos) { + auto res = llama_tokenize(model->vocab, text, add_bos); + + if (n_max_tokens < (int) res.size()) { + fprintf(stderr, "%s: too many tokens\n", __func__); + return -((int) res.size()); + } + + for (size_t i = 0; i < res.size(); i++) { + tokens[i] = res[i]; + } + + return res.size(); +} + +int llama_tokenize( + struct llama_context * ctx, + const char * text, + llama_token * tokens, + int n_max_tokens, + bool add_bos) { + return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos); +} + +int llama_n_vocab_from_model(const struct llama_model * model) { + return model->vocab.id_to_token.size(); +} + +int llama_n_ctx_from_model(const struct llama_model * model) { + return model->hparams.n_ctx; +} + +int llama_n_embd_from_model(const struct llama_model * model) { + return model->hparams.n_embd; +} + +int llama_n_vocab(const struct llama_context * ctx) { + return ctx->model.vocab.id_to_token.size(); +} + +int llama_n_ctx(const struct llama_context * ctx) { + return ctx->model.hparams.n_ctx; +} + +int llama_n_embd(const struct llama_context * ctx) { + return ctx->model.hparams.n_embd; +} + +int llama_get_vocab_from_model( + const struct llama_model * model, + const char * * strings, + float * scores, + int capacity) { + int n = std::min(capacity, (int) model->vocab.id_to_token.size()); + for (int i = 0; ivocab.id_to_token[i].tok.c_str(); + scores[i] = model->vocab.id_to_token[i].score; + } + return n; +} + +int llama_get_vocab( + const struct llama_context * ctx, + const char * * strings, + float * scores, + int capacity) { + return llama_get_vocab_from_model(&ctx->model, strings, scores, capacity); +} + +float * llama_get_logits(struct llama_context * ctx) { + return ctx->logits.data(); +} + +float * llama_get_embeddings(struct llama_context * ctx) { + return ctx->embedding.data(); +} + +const char * llama_token_to_str_with_model(const struct llama_model * model, llama_token token) { + if (token >= llama_n_vocab_from_model(model)) { + return nullptr; + } + + return model->vocab.id_to_token[token].tok.c_str(); +} + +const char * llama_token_to_str(const struct llama_context * ctx, llama_token token) { + return llama_token_to_str_with_model(&ctx->model, token); +} + +llama_token llama_token_bos() { + return 1; +} + +llama_token llama_token_eos() { + return 2; +} + +llama_token llama_token_nl() { + return 13; +} + +struct llama_timings llama_get_timings(struct llama_context * ctx) { + struct llama_timings result = { + /*.t_start_ms =*/ 1e-3 * ctx->t_start_us, + /*.t_end_ms =*/ 1.00 * ggml_time_ms(), + /*.t_load_ms =*/ 1e-3 * ctx->t_load_us, + /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us, + /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us, + /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us, + + /*.n_sample =*/ std::max(1, ctx->n_sample), + /*.n_p_eval =*/ std::max(1, ctx->n_p_eval), + /*.n_eval =*/ std::max(1, ctx->n_eval), + }; + + return result; +} + +void llama_print_timings(struct llama_context * ctx) { + const llama_timings timings = llama_get_timings(ctx); + + fprintf(stderr, "\n"); + fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, timings.t_load_ms); + fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample); + fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval); + fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval); + fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms)); +} + +void llama_reset_timings(struct llama_context * ctx) { + ctx->t_start_us = ggml_time_us(); + ctx->t_sample_us = ctx->n_sample = 0; + ctx->t_eval_us = ctx->n_eval = 0; + ctx->t_p_eval_us = ctx->n_p_eval = 0; +} + +const char * llama_print_system_info(void) { + static std::string s; + + s = ""; + s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | "; + s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | "; + s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | "; + s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | "; + s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | "; + s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | "; + s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | "; + s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | "; + s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | "; + s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | "; + s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | "; + s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | "; + s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | "; + s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | "; + + return s.c_str(); +} + +// For internal test use +const std::vector>& llama_internal_get_tensor_map(struct llama_context * ctx) { + return ctx->model.tensors_by_name; +} diff --git a/gguf-llama.h b/gguf-llama.h new file mode 100644 index 000000000..20dcc9f63 --- /dev/null +++ b/gguf-llama.h @@ -0,0 +1,468 @@ +#ifndef LLAMA_H +#define LLAMA_H + +#include "ggml.h" +#ifdef GGML_USE_CUBLAS +#include "ggml-cuda.h" +#define LLAMA_MAX_DEVICES GGML_CUDA_MAX_DEVICES +#else +#define LLAMA_MAX_DEVICES 1 +#endif // GGML_USE_CUBLAS +#include +#include +#include + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define LLAMA_API __declspec(dllexport) +# else +# define LLAMA_API __declspec(dllimport) +# endif +# else +# define LLAMA_API __attribute__ ((visibility ("default"))) +# endif +#else +# define LLAMA_API +#endif + +#ifdef __GNUC__ +# define DEPRECATED(func, hint) func __attribute__((deprecated(hint))) +#elif defined(_MSC_VER) +# define DEPRECATED(func, hint) __declspec(deprecated(hint)) func +#else +# define DEPRECATED(func, hint) func +#endif + +#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt' +#define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla' +#define LLAMA_FILE_MAGIC_GGMF 0x67676d66u // 'ggmf' +#define LLAMA_FILE_MAGIC_GGML 0x67676d6cu // 'ggml' +#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn' + +#define LLAMA_FILE_VERSION 3 +#define LLAMA_FILE_MAGIC LLAMA_FILE_MAGIC_GGJT +#define LLAMA_FILE_MAGIC_UNVERSIONED LLAMA_FILE_MAGIC_GGML +#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN +#define LLAMA_SESSION_VERSION 1 + +#define LLAMA_DEFAULT_SEED 0xFFFFFFFF + +#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL) +// Defined when llama.cpp is compiled with support for offloading model layers to GPU. +#define LLAMA_SUPPORTS_GPU_OFFLOAD +#endif + +#ifndef LLAMA_DEFAULT_RMS_EPS +#define LLAMA_DEFAULT_RMS_EPS 5e-6f +#endif + +#ifdef __cplusplus +extern "C" { +#endif + + // + // C interface + // + // TODO: show sample usage + // + + struct llama_model; + struct llama_context; + + typedef int llama_token; + + typedef struct llama_token_data { + llama_token id; // token id + float logit; // log-odds of the token + float p; // probability of the token + } llama_token_data; + + typedef struct llama_token_data_array { + llama_token_data * data; + size_t size; + bool sorted; + } llama_token_data_array; + + typedef void (*llama_progress_callback)(float progress, void *ctx); + + struct llama_context_params { + uint32_t seed; // RNG seed, -1 for random + int32_t n_ctx; // text context + int32_t n_batch; // prompt processing batch size + int32_t n_gqa; // grouped-query attention (TEMP - will be moved to model hparams) + float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams) + int32_t n_gpu_layers; // number of layers to store in VRAM + int32_t main_gpu; // the GPU that is used for scratch and small tensors + + const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES) + + // ref: https://github.com/ggerganov/llama.cpp/pull/2054 + float rope_freq_base; // RoPE base frequency + float rope_freq_scale; // RoPE frequency scaling factor + + // called with a progress value between 0 and 1, pass NULL to disable + llama_progress_callback progress_callback; + // context pointer passed to the progress callback + void * progress_callback_user_data; + + // Keep the booleans together to avoid misalignment during copy-by-value. + bool low_vram; // if true, reduce VRAM usage at the cost of performance + bool f16_kv; // use fp16 for KV cache + bool logits_all; // the llama_eval() call computes all logits, not just the last one + bool vocab_only; // only load the vocabulary, no weights + bool use_mmap; // use mmap if possible + bool use_mlock; // force system to keep model in RAM + bool embedding; // embedding mode only + }; + // model file types + enum llama_ftype { + LLAMA_FTYPE_ALL_F32 = 0, + LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 + // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed + // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed + LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors + }; + + // model quantization parameters + typedef struct llama_model_quantize_params { + int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() + enum llama_ftype ftype; // quantize to this llama_ftype + bool allow_requantize; // allow quantizing non-f32/f16 tensors + bool quantize_output_tensor; // quantize output.weight + } llama_model_quantize_params; + + // grammar types + struct llama_grammar; + + // grammar element type + enum llama_gretype { + // end of rule definition + LLAMA_GRETYPE_END = 0, + + // start of alternate definition for rule + LLAMA_GRETYPE_ALT = 1, + + // non-terminal element: reference to rule + LLAMA_GRETYPE_RULE_REF = 2, + + // terminal element: character (code point) + LLAMA_GRETYPE_CHAR = 3, + + // inverse char(s) ([^a], [^a-b] [^abc]) + LLAMA_GRETYPE_CHAR_NOT = 4, + + // modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to + // be an inclusive range ([a-z]) + LLAMA_GRETYPE_CHAR_RNG_UPPER = 5, + + // modifies a preceding LLAMA_GRETYPE_CHAR or + // LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA]) + LLAMA_GRETYPE_CHAR_ALT = 6, + }; + + typedef struct llama_grammar_element { + enum llama_gretype type; + uint32_t value; // Unicode code point or rule ID + } llama_grammar_element; + + // performance timing information + struct llama_timings { + double t_start_ms; + double t_end_ms; + double t_load_ms; + double t_sample_ms; + double t_p_eval_ms; + double t_eval_ms; + + int32_t n_sample; + int32_t n_p_eval; + int32_t n_eval; + }; + + LLAMA_API int llama_max_devices(); + + LLAMA_API struct llama_context_params llama_context_default_params(); + LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(); + + LLAMA_API bool llama_mmap_supported(); + LLAMA_API bool llama_mlock_supported(); + + // TODO: not great API - very likely to change + // Initialize the llama + ggml backend + // If numa is true, use NUMA optimizations + // Call once at the start of the program + LLAMA_API void llama_backend_init(bool numa); + // Call once at the end of the program - currently only used for MPI + LLAMA_API void llama_backend_free(); + + LLAMA_API int64_t llama_time_us(); + + LLAMA_API struct llama_model * llama_load_model_from_file( + const char * path_model, + struct llama_context_params params); + + LLAMA_API void llama_free_model(struct llama_model * model); + + LLAMA_API struct llama_context * llama_new_context_with_model( + struct llama_model * model, + struct llama_context_params params); + + // Various functions for loading a ggml llama model. + // Allocate (almost) all memory needed for the model. + // Return NULL on failure + LLAMA_API DEPRECATED(struct llama_context * llama_init_from_file( + const char * path_model, + struct llama_context_params params), + "please use llama_load_model_from_file combined with llama_new_context_with_model instead"); + + // Frees all allocated memory + LLAMA_API void llama_free(struct llama_context * ctx); + + // Returns 0 on success + LLAMA_API int llama_model_quantize( + const char * fname_inp, + const char * fname_out, + const llama_model_quantize_params * params); + + // Apply a LoRA adapter to a loaded model + // path_base_model is the path to a higher quality model to use as a base for + // the layers modified by the adapter. Can be NULL to use the current loaded model. + // The model needs to be reloaded before applying a new adapter, otherwise the adapter + // will be applied on top of the previous one + // Returns 0 on success + LLAMA_API DEPRECATED(int llama_apply_lora_from_file( + struct llama_context * ctx, + const char * path_lora, + const char * path_base_model, + int n_threads), + "please use llama_model_apply_lora_from_file instead"); + + LLAMA_API int llama_model_apply_lora_from_file( + const struct llama_model * model, + const char * path_lora, + const char * path_base_model, + int n_threads); + + // Returns the number of tokens in the KV cache + LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx); + + // Sets the current rng seed. + LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed); + + // Returns the maximum size in bytes of the state (rng, logits, embedding + // and kv_cache) - will often be smaller after compacting tokens + LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx); + + // Copies the state to the specified destination address. + // Destination needs to have allocated enough memory. + // Returns the number of bytes copied + LLAMA_API size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst); + + // Set the state reading from the specified address + // Returns the number of bytes read + LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src); + + // Save/load session file + LLAMA_API bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out); + LLAMA_API bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count); + + // Run the llama inference to obtain the logits and probabilities for the next token. + // tokens + n_tokens is the provided batch of new tokens to process + // n_past is the number of tokens to use from previous eval calls + // Returns 0 on success + LLAMA_API int llama_eval( + struct llama_context * ctx, + const llama_token * tokens, + int n_tokens, + int n_past, + int n_threads); + + // Same as llama_eval, but use float matrix input directly. + LLAMA_API int llama_eval_embd( + struct llama_context * ctx, + const float * embd, + int n_tokens, + int n_past, + int n_threads); + + // Export a static computation graph for context of 511 and batch size of 1 + // NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these + // parameters here to keep things simple + // IMPORTANT: do not use for anything else other than debugging and testing! + LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname); + + // Convert the provided text into tokens. + // The tokens pointer must be large enough to hold the resulting tokens. + // Returns the number of tokens on success, no more than n_max_tokens + // Returns a negative number on failure - the number of tokens that would have been returned + // TODO: not sure if correct + LLAMA_API int llama_tokenize( + struct llama_context * ctx, + const char * text, + llama_token * tokens, + int n_max_tokens, + bool add_bos); + + LLAMA_API int llama_tokenize_with_model( + const struct llama_model * model, + const char * text, + llama_token * tokens, + int n_max_tokens, + bool add_bos); + + LLAMA_API int llama_n_vocab(const struct llama_context * ctx); + LLAMA_API int llama_n_ctx (const struct llama_context * ctx); + LLAMA_API int llama_n_embd (const struct llama_context * ctx); + + LLAMA_API int llama_n_vocab_from_model(const struct llama_model * model); + LLAMA_API int llama_n_ctx_from_model (const struct llama_model * model); + LLAMA_API int llama_n_embd_from_model (const struct llama_model * model); + + // Get the vocabulary as output parameters. + // Returns number of results. + LLAMA_API int llama_get_vocab( + const struct llama_context * ctx, + const char * * strings, + float * scores, + int capacity); + + LLAMA_API int llama_get_vocab_from_model( + const struct llama_model * model, + const char * * strings, + float * scores, + int capacity); + + // Token logits obtained from the last call to llama_eval() + // The logits for the last token are stored in the last row + // Can be mutated in order to change the probabilities of the next token + // Rows: n_tokens + // Cols: n_vocab + LLAMA_API float * llama_get_logits(struct llama_context * ctx); + + // Get the embeddings for the input + // shape: [n_embd] (1-dimensional) + LLAMA_API float * llama_get_embeddings(struct llama_context * ctx); + + // Token Id -> String. Uses the vocabulary in the provided context + LLAMA_API const char * llama_token_to_str( + const struct llama_context * ctx, + llama_token token); + + LLAMA_API const char * llama_token_to_str_with_model( + const struct llama_model * model, + llama_token token); + + // Special tokens + LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence + LLAMA_API llama_token llama_token_eos(); // end-of-sentence + LLAMA_API llama_token llama_token_nl(); // next-line + + // Grammar + // + LLAMA_API struct llama_grammar * llama_grammar_init( + const llama_grammar_element ** rules, + size_t n_rules, + size_t start_rule_index); + + LLAMA_API void llama_grammar_free(struct llama_grammar * grammar); + + // Sampling functions + + /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix. + LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty); + + /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details. + LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence); + + /// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806 + /// @param candidates A vector of `llama_token_data` containing the candidate tokens, the logits must be directly extracted from the original generation context without being sorted. + /// @params guidance_ctx A separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context. + /// @params scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance. + LLAMA_API void llama_sample_classifier_free_guidance( + struct llama_context * ctx, + llama_token_data_array * candidates, + struct llama_context * guidance_ctx, + float scale); + + /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. + LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates); + + /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + LLAMA_API void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep); + + /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + LLAMA_API void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep); + + /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/. + LLAMA_API void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep); + + /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. + LLAMA_API void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep); + LLAMA_API void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates, float temp); + + /// @details Apply constraints from grammar + LLAMA_API void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar); + + /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. + /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. + /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. + /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. + /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm. + /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. + LLAMA_API llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu); + + /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. + /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. + /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. + /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. + /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. + LLAMA_API llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu); + + /// @details Selects the token with the highest probability. + LLAMA_API llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates); + + /// @details Randomly selects a token from the candidates based on their probabilities. + LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates); + + /// @details Accepts the sampled token into the grammar + LLAMA_API void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token); + + // Performance information + LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx); + LLAMA_API void llama_print_timings(struct llama_context * ctx); + LLAMA_API void llama_reset_timings(struct llama_context * ctx); + + // Print system information + LLAMA_API const char * llama_print_system_info(void); + +#ifdef __cplusplus +} +#endif + +// Internal API to be implemented by llama.cpp and used by tests/benchmarks only +#ifdef LLAMA_API_INTERNAL + +#include +#include +struct ggml_tensor; + +const std::vector>& llama_internal_get_tensor_map(struct llama_context * ctx); + +#endif + +#endif // LLAMA_H