From 3ab338e4905cbfba5b393d279d9cfd3ed41d1611 Mon Sep 17 00:00:00 2001 From: T-Sunm Date: Sun, 23 Nov 2025 16:03:57 +0700 Subject: [PATCH] Fix: Gop code openwebui pipelines vao repo chinh --- assets/Screenshot 2025-11-23 023944.png | Bin 0 -> 14250 bytes assets/Screenshot 2025-11-23 024001.png | Bin 0 -> 15718 bytes assets/Screenshot 2025-11-23 024015.png | Bin 0 -> 121000 bytes assets/Screenshot 2025-11-23 033554.png | Bin 0 -> 148794 bytes .../length.bin | Bin 40000 -> 40000 bytes .../storage/chromadb/chroma.sqlite3 | Bin 5672960 -> 5672960 bytes openwebui/pipelines/.dockerignore | 11 + openwebui/pipelines/.github/FUNDING.yml | 1 + .../.github/workflows/build-docker-image.yaml | 120 +++ .../.github/workflows/docker-build.yaml | 60 ++ .../workflows/merge-docker-images.yaml | 71 ++ openwebui/pipelines/.gitignore | 12 + openwebui/pipelines/.webui_secret_key | 1 + openwebui/pipelines/CONTRIBUTING.md | 50 ++ openwebui/pipelines/Dockerfile | 77 ++ openwebui/pipelines/LICENSE | 21 + openwebui/pipelines/README.md | 194 +++++ .../blueprints/function_calling_blueprint.py | 189 +++++ openwebui/pipelines/config.py | 24 + openwebui/pipelines/dev-docker.sh | 9 + openwebui/pipelines/dev.sh | 2 + openwebui/pipelines/docker-compose.yaml | 19 + openwebui/pipelines/docs/CODE_OF_CONDUCT.md | 75 ++ openwebui/pipelines/docs/SECURITY.md | 32 + openwebui/pipelines/docs/images/header.png | Bin 0 -> 28696 bytes openwebui/pipelines/docs/images/workflow.png | Bin 0 -> 57523 bytes .../filters/conversation_turn_limit_filter.py | 64 ++ .../filters/datadog_filter_pipeline.py | 121 +++ .../filters/detoxify_filter_pipeline.py | 83 ++ .../dynamic_ollama_vision_filter_pipeline.py | 91 ++ .../function_calling_filter_pipeline.py | 100 +++ .../google_translation_filter_pipeline.py | 185 ++++ .../examples/filters/home_assistant_filter.py | 116 +++ .../filters/langfuse_filter_pipeline.py | 333 ++++++++ .../filters/langfuse_v3_filter_pipeline.py | 406 +++++++++ .../filters/libretranslate_filter_pipeline.py | 141 ++++ .../filters/llm_translate_filter_pipeline.py | 157 ++++ ...mguard_prompt_injection_filter_pipeline.py | 81 ++ .../filters/mem0_memory_filter_pipeline.py | 140 ++++ .../examples/filters/opik_filter_pipeline.py | 274 ++++++ .../filters/presidio_filter_pipeline.py | 81 ++ .../filters/rate_limit_filter_pipeline.py | 127 +++ .../examples/pipelines/events_pipeline.py | 83 ++ .../integrations/applescript_pipeline.py | 89 ++ .../pipelines/integrations/dify_pipeline.py | 84 ++ .../integrations/flowise_pipeline.py | 428 ++++++++++ .../integrations/langgraph_pipeline/README.md | 28 + .../langgraph_pipeline/langgraph_example.py | 166 ++++ .../langgraph_stream_pipeline.py | 63 ++ .../langgraph_pipeline/requirements.txt | 40 + .../pipelines/integrations/n8n_pipeline.py | 79 ++ .../integrations/python_code_pipeline.py | 50 ++ .../integrations/wikipedia_pipeline.py | 218 +++++ .../providers/anthropic_manifold_pipeline.py | 293 +++++++ .../providers/aws_bedrock_claude_pipeline.py | 285 +++++++ .../aws_bedrock_deepseek_pipeline.py | 187 +++++ .../azure_dalle_manifold_pipeline.py | 89 ++ .../providers/azure_deepseek_r1_pipeline.py | 99 +++ .../providers/azure_jais_core42_pipeline.py | 215 +++++ .../azure_openai_manifold_pipeline.py | 99 +++ .../providers/azure_openai_pipeline.py | 90 ++ .../providers/cloudflare_ai_pipeline.py | 83 ++ .../providers/cohere_manifold_pipeline.py | 163 ++++ .../providers/deepseek_manifold_pipeline.py | 150 ++++ .../providers/google_manifold_pipeline.py | 210 +++++ .../google_vertexai_manifold_pipeline.py | 232 +++++ .../providers/groq_manifold_pipeline.py | 122 +++ .../providers/litellm_manifold_pipeline.py | 135 +++ .../litellm_subprocess_manifold_pipeline.py | 211 +++++ .../pipelines/providers/llama_cpp_pipeline.py | 61 ++ .../providers/mlx_manifold_pipeline.py | 211 +++++ .../pipelines/providers/mlx_pipeline.py | 115 +++ .../providers/ollama_manifold_pipeline.py | 99 +++ .../pipelines/providers/ollama_pipeline.py | 55 ++ .../openai_dalle_manifold_pipeline.py | 86 ++ .../providers/openai_manifold_pipeline.py | 130 +++ .../pipelines/providers/openai_pipeline.py | 81 ++ .../providers/perplexity_manifold_pipeline.py | 167 ++++ .../pipelines/rag/haystack_pipeline.py | 108 +++ .../rag/llamaindex_ollama_github_pipeline.py | 94 +++ .../rag/llamaindex_ollama_pipeline.py | 74 ++ .../pipelines/rag/llamaindex_pipeline.py | 49 ++ .../examples/pipelines/rag/r2r_pipeline.py | 45 + .../pipelines/rag/text_to_sql_pipeline.py | 111 +++ .../scaffolds/example_pipeline_scaffold.py | 67 ++ .../scaffolds/filter_pipeline_scaffold.py | 68 ++ .../scaffolds/function_calling_scaffold.py | 33 + .../scaffolds/manifold_pipeline_scaffold.py | 59 ++ openwebui/pipelines/main.py | 789 ++++++++++++++++++ openwebui/pipelines/pipelines/.gitignore | 0 openwebui/pipelines/requirements-minimum.txt | 15 + openwebui/pipelines/requirements.txt | 67 ++ openwebui/pipelines/schemas.py | 22 + openwebui/pipelines/start.bat | 5 + openwebui/pipelines/start.sh | 157 ++++ openwebui/pipelines/utils/pipelines/auth.py | 77 ++ openwebui/pipelines/utils/pipelines/main.py | 153 ++++ openwebui/pipelines/utils/pipelines/misc.py | 35 + src/config/settings.py | 1 + 99 files changed, 10493 insertions(+) create mode 100644 assets/Screenshot 2025-11-23 023944.png create mode 100644 assets/Screenshot 2025-11-23 024001.png create mode 100644 assets/Screenshot 2025-11-23 024015.png create mode 100644 assets/Screenshot 2025-11-23 033554.png create mode 100644 openwebui/pipelines/.dockerignore create mode 100644 openwebui/pipelines/.github/FUNDING.yml create mode 100644 openwebui/pipelines/.github/workflows/build-docker-image.yaml create mode 100644 openwebui/pipelines/.github/workflows/docker-build.yaml create mode 100644 openwebui/pipelines/.github/workflows/merge-docker-images.yaml create mode 100644 openwebui/pipelines/.gitignore create mode 100644 openwebui/pipelines/.webui_secret_key create mode 100644 openwebui/pipelines/CONTRIBUTING.md create mode 100644 openwebui/pipelines/Dockerfile create mode 100644 openwebui/pipelines/LICENSE create mode 100644 openwebui/pipelines/README.md create mode 100644 openwebui/pipelines/blueprints/function_calling_blueprint.py create mode 100644 openwebui/pipelines/config.py create mode 100644 openwebui/pipelines/dev-docker.sh create mode 100644 openwebui/pipelines/dev.sh create mode 100644 openwebui/pipelines/docker-compose.yaml create mode 100644 openwebui/pipelines/docs/CODE_OF_CONDUCT.md create mode 100644 openwebui/pipelines/docs/SECURITY.md create mode 100644 openwebui/pipelines/docs/images/header.png create mode 100644 openwebui/pipelines/docs/images/workflow.png create mode 100644 openwebui/pipelines/examples/filters/conversation_turn_limit_filter.py create mode 100644 openwebui/pipelines/examples/filters/datadog_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/detoxify_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/dynamic_ollama_vision_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/function_calling_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/google_translation_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/home_assistant_filter.py create mode 100644 openwebui/pipelines/examples/filters/langfuse_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/langfuse_v3_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/libretranslate_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/llm_translate_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/llmguard_prompt_injection_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/mem0_memory_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/opik_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/presidio_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/filters/rate_limit_filter_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/events_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/integrations/applescript_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/integrations/dify_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/integrations/flowise_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/README.md create mode 100644 openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/langgraph_example.py create mode 100644 openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/langgraph_stream_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/requirements.txt create mode 100644 openwebui/pipelines/examples/pipelines/integrations/n8n_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/integrations/python_code_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/integrations/wikipedia_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/anthropic_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/aws_bedrock_claude_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/aws_bedrock_deepseek_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/azure_dalle_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/azure_deepseek_r1_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/azure_jais_core42_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/azure_openai_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/azure_openai_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/cloudflare_ai_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/cohere_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/deepseek_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/google_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/google_vertexai_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/groq_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/litellm_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/litellm_subprocess_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/llama_cpp_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/mlx_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/mlx_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/ollama_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/ollama_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/openai_dalle_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/openai_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/openai_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/providers/perplexity_manifold_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/rag/haystack_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/rag/llamaindex_ollama_github_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/rag/llamaindex_ollama_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/rag/llamaindex_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/rag/r2r_pipeline.py create mode 100644 openwebui/pipelines/examples/pipelines/rag/text_to_sql_pipeline.py create mode 100644 openwebui/pipelines/examples/scaffolds/example_pipeline_scaffold.py create mode 100644 openwebui/pipelines/examples/scaffolds/filter_pipeline_scaffold.py create mode 100644 openwebui/pipelines/examples/scaffolds/function_calling_scaffold.py create mode 100644 openwebui/pipelines/examples/scaffolds/manifold_pipeline_scaffold.py create mode 100644 openwebui/pipelines/main.py create mode 100644 openwebui/pipelines/pipelines/.gitignore create mode 100644 openwebui/pipelines/requirements-minimum.txt create mode 100644 openwebui/pipelines/requirements.txt create mode 100644 openwebui/pipelines/schemas.py create mode 100644 openwebui/pipelines/start.bat create mode 100644 openwebui/pipelines/start.sh create mode 100644 openwebui/pipelines/utils/pipelines/auth.py create mode 100644 openwebui/pipelines/utils/pipelines/main.py create mode 100644 openwebui/pipelines/utils/pipelines/misc.py diff --git a/assets/Screenshot 2025-11-23 023944.png b/assets/Screenshot 2025-11-23 023944.png new file mode 100644 index 0000000000000000000000000000000000000000..f7ef14d993ea9c5fa266acfad62bab1232d1ac89 GIT binary patch literal 14250 zcmeIZbySpJ^e+q|prn9Eg93umFm$VgNGaVRCEXo^q_pJFC@qb^(A_ywLo*t5;B&8tpO2f)ewZwEf;kpCn8HSZrm zm-J{}s4U}FEf}E`k_MjrJ8(dxi?aKNdXWtp?i=y;TUAxnJcV?#$AJx+f#Bat@lU6w zre0)5X5yfasFj>F|B@@jC`pjJ5vb z@`-?ez+KNf5Hiva%MXODFRx4fkLwmc5E42yrK24&K=p*4p8k=}UR>Q@<}!mHYsj`4 z-p#DL{PN!}et-YQc5-lP7!>!PNP*0l=O+27Gs6(WT4bB!OL;Ttc=mohoUBxXSh5?n6XlXPbWJ;VN^>Z+0@ zI(-4a5R3BOxC*tq9%=ebb2i8FL=uhHtFjjcWkSxEH?G7y+1j0!Uxr_6fF5Q=FX{ITjj`ovXO#Avoh@rNo1D z?3NV9|L>G$nu)@W9zn)#Ia~cqrwx^-2#Jq>Plyn-aiq``wGTuQ6Fe=MmwKnQ<|6Fe zP=ZRSGuki4UPnEPpKR`!m7VLKK5Mjn;@bC| |siCIIYh277z`FtUgA|pWmWQ8Mh<_ zTlnKBQd+upXX=k`zuSo36w}c8tyX(I+=)9Z9LAt&cG>)Te!?c$a>;9&2VfArQ~-GGw?-`&th~jeYq(KnE^?5 zFM_nIO%+LEFC-Awmr)X?KV8TGd^uJN6q>?PbDf*YG{V%0Bbr(FF6cY1#&KLWEvq>k zu@)pBt|c@8t3A59_b*y2@3;bZOj;ai^4F$y(-g&DB_u8jojn}_%8tLDu)0acuWXN1 zyk!b1-hSK0;g_7bl~TQX?sI5N(ja;0?$_`fGq=ws+?xiTZV6Ai7&a@Q;cs1wCd@M{aX(X#VD@N zZW5B?Mom5}@aH8qtF^h{khyw5W6l~qP7F2)Qb4miAinhRJRH)~ zl-9wJVn22-&Rlq;b+i+CqZg_i?^K>B#_$Ju&2JSvFyo;_Ulq}cp7 z8h`(IFq&Q<-Pk~{xzQ)narEihxc1BT9)0L7mu_5JSDTrN4K8-6p>SLybt^%`X|lE{ z66u2{d1Ab4xv+UJ6V!KF1IH2w*FBZSQMGd+f!hvCaPw{~Z_Un4{o0k9;j#bix-X8V z{j>4v<|R|&Fxr3TDw{~mc`D#ib z{8=2}3i_Fug~%LnhTC}Dm1pN&-+1Mi)$p6;bbl#WK}pGc<%_)6#hBO7?M=H~>CY5w zr=Qk??6k_MBQ-J8Vm|@L5!&O^IAFyJMqb`{2MWFVHO-nhZFEB%!=h*rm?*e^Y;^~d zr1q0!!59`L!i-`8?{b>6)F-E$;q?s!`0Dd*?EqVA{~^bMn{lHEI>DCeHry%= z0Io1T+;hxpaFWmaEFlf>Ux`*SR{?KjQd4dJkV)AD3h0*;UT z2`=a_Go6o*Oi3_-V!~Pk3Xqe52M}jsBbfG-D@{Nh_7_(sy)F8P-~g<~!ot+tz`zYnO_>i7S(xojeQI!Hr_oUPtK$67no0WEP9kj~hbCSgo;l;4U6HBE*6!t55Evj=?f9d87IQZ&ZmV{(R`+hOyH<4P|tj`p`zu4f(n{5q~% zDjorLOviG=$9+o!7-$S5&7S4BXU8~-KqD72p>wAXM=I5@uFY$AFKfr=A+4mrA!@F-Ge%cQfkENj zJ3OYziL}~=hR-GzdH!VeMS@&m6;RQT{$GuNBvj4k`^Ot-=f|ux^lzU%??nrQ685iz zl!k)bnhY8cm>!N?rQ~yxJZIRQ%V^-2%K1)k9L8b`(ufy)i`B?NyW)Ba$9Jm8`S`;V zPaP_@8dAEdkYd24VzTwx7WL&@D-gT=Mk;n9jSC9yiA6E4BmwR4b1Nlbg9+iR38H+V zN%24Otwv1z^$Db|YXyt%)gp=a5?0)H-bK5&XEl>{n-U2)mDDBucOEiCM#hJ(hlxOM zwzerTGHG(m=Og(Xe9`E&lCAWa#ZhUQKe+b z8yTTyYU%}#vI|478iY+-*Bf?ax>u`T#!KbFlDRw{9|o6r>4Q@m7O8H?A@nh_W30 z!A4o9Cnp0LV$nFZ^Z}A2gww=`%a`p8=`Z9Jx3)^=`n#metIL-JZVrz@gZ=_>EsG%U z?LMD~t9_S!N}|1=L)}S}X++~ta?YZE(dt$vKVid|{nlDz1B_$Jh+pl&;6uh^-Z55*$I8~f; z_Dr%RwH`*am|g0`dP^~e!y43+|M|!R6MMl!?IxIp3PA>IBI@y|arS~7`s7WF4e=So zXo`!bFf3&5FIrn4n>Akz0Qq^E?tHr4H-k~h$V=&Z22Xs0<)ooFk(FH~mushr#u2#? zPyU}bPBa8kf+c_~vy~$$f2M2tG04L2Tr+NM-exp?!@$+515tQClwrvc`@e`;&@At| zl_T40?}bB$za7JHj=@+b0|858T#-yTgIk~k_G1m7i6rE#WD3N1>E~d0x_REdDnREp zp0%rynBu6%6}#a#u32NuR4GOMGBhTbz3^la+}&V{QJ?6~p8a&~Gpl#A$HX%l@P(7t zmKKp{R!n&3fQ%fBdbktB!e?fIxd67$Gua?0v~v*@pP0+cVRo@!7kt9?vhdCCIs`0c z!FJZE3FR-MzU8>)L>@@9mAxU(>x`%RM2D$NmD_>($?*8OE{8WvcSh9}s!s2e<-#%e zrWrdaZsG?4bMny1=-}#{}=0TWj4lZiVY_A znWg7)$@IrKu zziPfOSjzdC1&GiJ>Yox3bTjMdlPZ!YdU}jcM&<2`_mXKct$MoeNS=PZZ(hM;e^JH6?q@6)upSBnlG^$8J~6zeZ_a!N}3WVGtiz_?a>8{n2lsmyEpQI09KzpVEXQ z@g3J!=h_Qg2l16?lV86ZdOMNhMX^v`<8@Tn*KNMFE}xoA+q>X8DS>jkbWJjp8iVUd zh#yRR&RTYPna6UXwo#{)?ay#Lx|Fxe%@>cPZz8!gw)`4!$6$S&`4DtDy1Ag>SOOu5 zQH6%UDJeW9d%0Dam?VPJ#KcaUSM5DJMJQMR-XT%XJ>$-5QDzxV=*u_alDV1^hN`I| ziKiY>fAC_+Q!@f%AnKgTFC>NrbZV@TgO=EDQ}pLtZ5(nZ9{UvIo>F}(sL}H066J4@-1D~_b z&k%31pVI*16qJJ}CnSrV*D4~OG7kLi6u#MtP+DA$v)UrcbFDgskNt}$_ca3_(0KSh!W>>mQ9ieNKW`&8dhuD-YXN84C&<44 ziD+WZ`@eA6`dvc|yzYfkHsSt_a5Y6m%;a0TWR>fzo&2I0=CutMpBNNfFehj}l3xyl z`A2pGB7>A~l$8mKRIa2dW#*$720L4kh#Tg>tl75K?VBw8&8;dn3YE*N(b%pWqW_Mr zRhFoGWM%v9e3OpM>p(87Cx3UWb%&V8j#^6@C-kn({~+K25l(UO8v@B8IT<}B4i2UN zFsQw$scA@fy*ZvPiO6JEclUoldcMxKsDq7^@$MH1(6+wzpBwunGwQL9Y@2`Ex5&f4 zawR!FG9)VNxWmv#*qShjbsx8wzvDvTSEbh_94?!op`ij^7Qz2{60AGus);0x2XtQ| zio@ui75uZ#6Cvs8)X}_n<&5?No{g36{L?xAO#s{>IZE}r87U9(@k{*$$kqJ+gVTB) zz$DF8&3xzE)yM7QQzkF}r7AL6%#@UynS|L=^43hE{3U-Cny&Eo%Mrh}33 z|8nZ&|0h`T_mF(~|B!zAKV&%ODOsi^!tR`1#U-I$;-n?|<9fcyKv@1dcYUYy0Y1Lj z$_?Kh+dqZ;%wrZ^%~htZaBDWH<2vxO!JjxGAfvB}{UC+hh!t<}7NzzM<2OL(q}j{% zVI|wpvQ9@3h07pkMc{=*qqiN4Ix8~?jWy@qR(CUy(59c0Aq_wbTm#KOrdWY5p{#DatrNnp2THFNuH<9!;d$H{%*&_71Ss$6 z}3Q!WK{K8MAUk3ldqj-%QKtc~b$uAgDsBw;7s#%uoFt>jx5L>B2 zEkEawm`Hk{OE1!8cQbS%$Y3nJD>-d<3H^~<)}eGB8g#3u6nZXU6)5mDOZPvXzaFqK zkz$Qo?&7*DvKyV;R(D8_a9s5)To@k+DLNe2dauL%oSm8nYkh;)f)`@)<|)lo(jz(5 zTTvM%E<|Z^kl=Yx_k6o#r^DTz1k*`YuGZu_1YgfN00gM9sx5bo_C)_2HxU|aIxYD! zVuKq0*ZG)AXTi-_+O$Kw50|-{6gR!wi(v`2_u5{=vur+gO1BK2#I#J8ENlf*&_sx| z1x~RKCtY)2E?n-~m?`PTD&SE*JE7q%so*J{TvK+t6XKy**LsUSP<(TpwRCi=xrf>=_U55GN?21`+PGK42_X1j3r10!C#?l5&&mzh)f3}@tt%VGVQmyN@%K=&9 zxW!d)*5uE&tL#So+KIwRvhaRmtS8;EAUre~AhCJBHkLPBF#X4lHH-bGoN6kXdHC!T z^V7os$^YulR$jzD=*=km8P@gq`0iFH5W~%BouCJLR zmCfc8^kjzG6^E{3*3b%2dD{mw-rH{J>iX#nph1 zyP9!`iZTDUc#q=paQ{?YwTao{BHqIJ8TwQNF20g%o-B#(KkpE$`(?vnFnX>)linoU{HFLt@J7qBIha z?)7LkiI>6^wIjn>U1rpu45W*eO|M;trm&|4;41AMMgr*#tLsaqOIe7y7?WDUQy*sQ zU`*)O=If0Gu6$m`CK9pHhM`=4 zW3sXX0fedtimC3GYHShD&7!dSzvHpd5(&os?5-HhZ(R|&_(nIYvc8BnGMJc}h)kQt zH^Ik05eM{XweqF;dSIYjixDM3k#EQ|IR+V*^@})T#y*v{JxFdVE<1zfd2&<-44X~5 z4m8|zsxUQ(!++5HjZFKws#)iP#nkydCnM5HBicZ^7XdC$v$!5GmGn#ZNwkLj=Dxv) z>Rv6Vxb2c6(p%>AhS+Bhh=+22zvp7YpnP?B$KeU!UhI#m*|7vwm5v;R$}q1Obf}@%4#pGdF`D0rlSM|{+1LSH&h4ruK9T6E zDxR(`uBW@ZgLXgeRg8YriiEip=2V#ONCuKbw>*5BVA`FCF<_jzcQt|cYlY(0PcH20 zV$~ld2-ADAWLk)Y= zVa$~dYN#L1nKDzkJb7|*vWAwH^K>fz$Io3|2T(#mGfFD6TnTi{erFYl1cnVmX+pax zAqG>xr7a?NqDBh&mJi2wR|8>v5GL(UqUOKqI-$FI1@TVV@jt*CcJs#jOLR^A7bJjagNRzrL|enq@0v_L&zR8#89$CckWzdzd?x)KVBmpu_>VnL^4U0sF2WztLxO(NRQzEADFh#>@H6vQUd+{1iGo&f)-qICuIX+8Kr^_uhXo$|-m zX#O%K_vIhw9hX725y$GqtSu?|AYvMlIgD4R#=mfxjV`TG3I)(!wrH(h1zlad6Z9cd zvm;iOej&Cl#RiSQ+_Ap1TJeGdK-!{*n!RPZElCAY+CMiY*{b>z!6iutiRr#*%^n=_ zWyX1=>qDdzrsN!4B-U`Up+uck^vVe9=S!?v{mrZTWJWBxm%}d>_O4(##bHT+;skRxspdo)WFgy#y_?BvY zd}}ZGbsr)=JS^FLipXwpHXtXLo5lX%wmTJ(t+4D`OMVZj{KucRMFkO+GNt%(EtMin zO12U@$nkaPj>=m?7Ri@q-|G%c^eReJwtR$xjyX9-Pg}mrb3-VQCB2OSAkDtSi{GRv zEe#$_h8@K9%Qu@ZkydHcA z&pl;~ILzovAgmDwm9>H6Ii;tI^mP*J!vvalMfoO{gY(m{Ip~Pq_oWNzF$_Z%FY>-b z-lvovV?AFe&HX4w1;TVxE!=WUQ5(T1te97}4h>G6rfCJy`~{;^6mB}@)3($id5oVq z7f;^@EEWH%Fh~i&ktwX6KMAcn^M-5`g|ch=%4IpEk4y~*mSErb>{Dk3fKD_Id^6+Y zk}*BxScF^|EJ~BB_#uu;R@z*O#2C!ZOUWI{Gv4sT;UXTs^zBdZ;l|L#8E_2>&~7WF zs?R_3yzidM39P($LEDWPdFnEEau~oy-J+rWNBf*N+zC2P`#&$$c~?a)L2lBG(Uwn{ zj33axaZB01jD8^BKk@KOnL+me?5>M=h#7|d+Q#+0M{J&EVUd9#{nz{fk}J-^KzwHD zepToVG2`C+{zqJWtujkwvxP7u_@|+M@jV~$+7R8B$31cj5#m)#E|t61C17|Q0N?5MGuX(_>N-eLSynvwqA@<8ZAd`D%gEk*bJ~YG>oZgge`ke1F(upF$IFVY4mn!b42<e=T4Q3`STCc zz_+M19jUI_)_zLwUJL-E#3-|&?hN?YwG~;u?D+EZe2mYz=(^{FSe?ul6&Ao z@uQIzTW@cx!*bT`1DbZLzK^+N@~O2H5;f<7*Ob}wr>yhLN<+M*AgeTzrPhUWNw%H! zA=70P0gpPUQm_=HDxko-%y&Nau-&!h!V3kOp6+cgx3vP_LbL1G|M*<{AMSTpvWr=B57I4mP%OPERkj2=K00b;TpRu3y+A`OU&wh+jTm4% zN}9OtyNHvz{h_gtapjnUsK#GK?SM%gquX0zh1=?L@* z0(UoujFY(3eX9;mO!+s!@W2Q3QSN=(6FS^u%QP1L5m`%S?|94A4;{8LpB*jB$JNc* zXFZK%%S=|OlAlC4NLZAN)sXo+=Y`IXJ!DO0E%{>iQ^o4*b*@SjGBvx&W$7W3xwk-a(wmOxv z^-VObLV!6Aw($nfH_m#_Km5TWe?2ILQQAuXtN=NVdDe0d;p?jFyfQEH!ePb%bSBk? z{w(sHaDVp@%c$(z(CU!r`-eV zqFi-q1Bm6>z$g7PisjE`+6*nO);WxZ>BHS+*{5i~JINA>u!GfhXS?U6$X9=+{MiM` zRje3$SIJ(7QG*Y2@O~(VoOJbe z5~EWN)4c4gTOZNy*#zp$U!lmK|;AduKg5g8E)b;$z}Q7liUp+j}1R%VhWD-4i)wovAV&b zF>{Eld2;q?u<>z558cw`QGc(kb1#v_dq@pWNfTZgaK@{zsLm7(&L_lt`_y$*{YP-QnRHp_6Sy_V{jxY!! zR95&#j(6S?MkuV-wHZVjs{;xz&h7n?&&BnVD=gjRgkC=k3Ds>8-tdWh!jb>0J%W4t z?Tq5b?fHUM*lQr+dDJw?J3K*kQYAA1?Hs`2gR8iFKGb=~P}4j7xtjp%Q_vz*_{E5d z)$Q?j@tYBj!{Th!z6{W*#swCEJ%#lXd{>dUb@*ySp}h;lbwv9Go+Nl-{yHy(0U zJvmreRe^Xim6Ryt&b86*6Si2!{BBFhto)qft!?c$LW!Zl44#N`!+s}Mc!vO($S`7O zDmfL7%jHO}f6vq6Z0%Q$R<(TI-+#>QSw<+$%J8vn*5b?;GvtwHAn#r657`zQwrOr~ zz6N_t$l?<-|9?xONIU6ZS!n5#!PscbbILx*g}bAp7;t@R2aw znRxGV_pDy)nJqT~Kk*IV&{MMoZe1R!(7sqw_(nOGr_Au!pb4aaTBu@;t4h{1>WIUI z!E*(#o@7J+*VhwqKEB%_ZVHa41wV#_ib^tbFI9il8GE+mT%_2317?&1d4lZUNFCly z&ANgZl~b=|%!B@#3IoJlQOI?%yD5e*t(3rmPTMsZ7cxhtNk(`8c=BO6QppqwC%bC% zw7VGCn`~sQj|{n$%9W5G0It)gG+&=2LX20di(Cc_ThqGo&^6|+5o{U0ip8T%!fl35|SkgB;EslG2I?s@Dek(kC{6q-E>+3+&SUPhu22(j|W$@gZ zIMGzfMv+K3G{@qoVs~hQ(=1&5`U3)YqkuiC0o$Gg{lrz)vP2N>q34i=cF@JOf+6|| zC!@Pwe0{8BTa(~~b#}Aha54(FB4mMkJAk?(xZMQuN=P2u*0t#Es(VpX zS5~)Q>W4xtB6AZPXn>@Yw^9So+^?T-?dJ>>pjX>p|8Y}()K}LuFE+tSVe@!t=#M1w zc$+n!fbwC>p18aL-)rXG3Ho^{rn7UL=vy*qT9REGld|=Uul)8T+PLr`d_KKms`-G@4v}QJcUlGyz%vIr=Cel1c_v) zIW}y&B6a!-iE@WK4GLiy2cA3j-Tl9WDMIIVMsB~co6$E09|mAq%Fm2iI(&%ULOARP zt;NhK>9rJ9NaOEL+UXw{_8_9|@R&#`(?nCgYm4d#KF=5pr=@ijaN+sjOrT8njzLW3 zoN8!xs@edU2J@NW+JqpOrXSYx3c9~|xNkaFo3;+z(av8M+4;D(wwAW;sUld`==u;L zPoH?0J}9TnAXgY`u^(gjLYFry+6tehRa~Ux@_irC-kZlC&Zq=as(;L^r3H=nW>;nm zqV`sr?sk5%8(W7GvOcD7S`@zPXWCIeeKJRN`L+5f!>#Wl|1z;-o3mNtQ~Ku8ulv45 zHFm(q?PqY<9ripoWYqV+*AHUl$N{Jt(j3(5jnqG+1~7diDn>`Dv$_9O!-uV};o`v)^qFOD-u_!dZ3l$2ad?jY7>* z`>z=w03ZI=;exW>>UqX`*#YKoYm;5GjVsuhz=bfVhXk* z6dLq2$A(tNubu8o4I}hQpPyGduve9fqf9v0L{w2@-*I%u0I&&dxZvQRX9juB73W!Y zFxiX-^tRgS!^9#1}`nFN{Aqe@bo? zyK<;u`=&>jvsLKTMwOlEV07K%n0Cj4l)xf0tY*k7u2vFWtuGbVFF4AR2iG-y7tOml z3meTC7D^{Yqx_H4(-Op+9Dt#^EFvPq_p^TxUMd8d%<~Mus0e2|jd`|K+LbL<*=kCT zQjH&)-KGE*nIT@2Hw&pvr&DOp%|XwRX#IrIXwGcXKcVtO)i*gU%x!A<%+Z(y!X=Dhqno^vo(ON{{HhZZ%d0jg=T zC=Q)3^1*HYL8gtLQen!0%eB!`){FM|{zL!k`{H5wzVPLv4PJOYFA*L-vB@4tCJ6Zb zNEnCEZj>&>!NcDRBS7-YE6jzc)lHh_HMUtLtVv#h)%&3ka~@>3q<9q%peoh~x)!zj z3)AK()}*dX5^9fKo1FKxD_F3i{pGWxYOYpX-fc!SHmBPM-;57 z(>j0(`J0lciB#ROD;2--my~p^n$vmBcw4oCt6nE`of*Jo!1|PJ_w^ngBiE;=RDydi z#vspF3ZvYyKTsgU2Ue2M-l8?=T?}9OCjJQ)Wd`Hn-N|+EU4_u2-q3SCzxxOJlNyrX z3;t?txQMRInEe|{z8<){K9OvL@c8;xZkp>3KCAqf)dZg{-^U*)aIbI^!gLNvq)rO3 zxT1bYMQBWg%B&VPRa4ow3x@?0$xPe20=bCFaz^u~9)}T7y&fvoIMk)wUkeNxH=qF@ zz@u3puLAQzIed76NnXC8OyCQeI-b>t1~s z(me5HF-eSLTzZPXHSV|5N7>#xnAakwsS`aCNinwF*LOupGQnG6&1oH8>0OwlUM8cW z(DG8WjR(tmA-|8(vaaRATN$yA=?+x702<8oaH?l(yuUht<+9!2-N=`IFJ}w-J!mKOG&}wS`nbmK*<%g;!U75-2<+}Y%Ey?13iO)ynjUe2W9@VH;iTCG3 zp|XM%@R@;AuRpc*`C6x@b(X}#oA#kOsw)+xOdTm5A+?eXx9Oul0~Owk1ui?EegD$+ zdU$4++q1fUQO*G3)6EJC;&O}rJTcKR=pO4oo?e|F+jGA2CC|il<&@g?o%dF&mIHj~ z1ufjZpKrE1oA^>1LZ7WD6#O{xNU_N%NqVzPY#Xs`S12I%{i`Mov`C%SUsQPZCIz;x zjFHp!<~z*%t^Hvny_*%WEY!7q_rZqzUT=HK&SUvS**mS~ELX1y&n5ouTmMbJNTGG@ zx+6Svzv{kdKkP!yAu3PbjDa1tbu2M#j77AW2;k09Tj^lMUD+}lxPc$KoyELM0pVVF z7RC0wAA+G*>&rGN<>zx@ZG$i>a@?2By-vf1o?jp7UwUP}*?>Z1@1q>H!%Iy78TC)p zrk+K-O4(}rQ^4@ne>7}bBFZY!II$xny2%&I9*fnsKOD8W?&lB=1z~H4Gm0m-NSRYP)3FSr;5o+oD#r|$~fsd2ok>ZIxUq{Vv6YOd=1{b6&_56l+P z-}gYagAT{rdc!>h+$*aA{R5+C%UD7lqVSs;I`MPFuj|Xjt0bUwz^j-2HaQF4mc0d% z(rQI6CpnoiHoGVevFx4@V*wJ5^pA>9U02nkcl5^QLB_ueK(Nmwaq)A%?Bt#}=g~ji zL5dBuu_s4sx0>lvJU`aCpcFk!Rkj&=6h@%<%){aKxhycs@DRno7eO~pPQt9B4elvG|SrAqDzITNM@w&hGs z*)3bI@XhM2IxmXAO)EUt`F6n12&fWX?~ZtW0xqbJG!=}ik|P+9Oxf23i^*@SXLv`_ zze?Z#<=)f1I9HdH+1!#d)9m6@5M7jmmg`!h1U!#O?Y&RmL*L8YtJS@cJ@_|{2&{aV8*6z77JUcNm*uy{vBO>Jde^>B^IzyVjDmJ`_> z_t!xgrt``>uEf_WDYh0?SIJj zYi#teE|FHwGmnsN^Yzx53E#1;OjW)*Kme54=Kx-vtJsm^|>#G zign3pxRM>$YLwO=^cj-Bk^$t9Fw_r!9;W+zfOH2{;}oPp%&Hb7$}b#M0Pc2xR~MQ) z!#|YfrIJ8f1ooZe_y2@BTNQ=!T5l9WVv37b+T7^bxQo^x*Zd5-^LjlyxSF1GdWBXa z<~8U~1-8VtFt&bZtG-kJl=OS-FNlE0AMB|KR3SUl{mMJnJLV(vBP8vLj)n{s7N=77 zXP?gchNX+hle(CyWMu`Y-s_c5FAzx<-=pPB9YiRc&N(kO6;h4*(KFdCx7lx=I_Vw2 zGA(y!ErWL0`?a)McfLWC74S;_u=q;US57NfOJzC94zUEt@6nsd!nPS21e)VTU1*s# z50I@JsNfzFDTt@sot;sm-#$cF@{*H9oF7$_V~Wwyf$CheL_&0qKG9Z(k@$XEKlqFN zP;Q0xy8|KnzUgfvI(k2nEX-J|FBGfJ+Y&_p3HP$S|7NaZkVIkAa2(GodXFSg%qsVN zWw^tKa@x-~v8B%HZJxLV{g`%c2nEL`lCNM_%0}*{vRzC0Jb@#hoq9Gjt$NJ>-qA zOpx^HnVn}6ixekwGDCp_6YC2_8%eKn$62R6o0E}7_s9)6HE2%HMGez&Xy)Q9A>??_ zg!dwGWQV!4$;Fs&gEA2FBiX-ZPihs*4Ej$i`vzYu{I6}=#QuL8z@l#&M%l(K#D1|_) zyiGed>kTs-MWn#PD)n9~_%9L{eX?`c1%yWob#^Tu@c+TlDdK5CX*&E!*B_0vlbrO4mUVo@-Uq-?9kU+VdI<|dl zlDIQXskoA{QN6xP#56klDERc$do;J39P4RJqg2u%j%X!ds+g5*r^deh>PM>!W8Ux` zLS(y8{jSS_`APK)jKZqVxmC&R!ljMc+tM#((CM@+uD2ICkC>Ne=O(b`XH<;0$DK7Z zN8fHU#M;aUd2Uq^hCd&OBs?mzu)e4Mt!i3nGG%^vC&1DADIuzQ4=YFUe&cyrWe2(^ zljK_Ws)%RWMVc%4<72z?t$rt8lmTj6AJp=BZrM;mfKQ2pfZELiaMXONSnPRwYWA1K zj{m7dziu(lAp>@o!Sbb=tA_{aB_)h~e6Q{pF_)3O1o?wgP(R-b>{-3l*o!vqt_^9RyR+9c*B% zMD~ArKlSB~^TVf7JD37bHi+Sa-h9cJ>HU^4bpf5g{miGRlcK#4Y?`E6=wPvx-eRFh zQfFnFag9xMnQzxk_z@yHHKN@~aVN|EgU1sLE|%1jWn0m_UO17&(qhtbIF}}b-U5yy zMipbktW(<`pN}L9dbuOLr_Ml$+&Gm%&5XvX%n-F+z)`g0rs+|NP+1@&!h*{I<9+!A z=4P3uE81+4i5Ks>$&lFg2j;Wqbj4XF6JiKc7Nc`;H;Qrv%xvlqL^R*ZSkQCm)7U7F*2? zUCAf9o{iD5Mx9)~gdO0P7*KIJVM4$QQM2%G2NZD5x2k>&BccI=;IyT?20h{ilG;pt zTOc1Msnk+uprdq3(m8?$Nw`**F;^&c5+ImR zOI+XN=L~-_=6mo1p-t0SustOZ`1CwA&1#SnAR4X%U2p3lGf@AMe@CT3^ zImQz45x5Lc)T$b&rs31Mif|moL-!keyH`#+A`|eem8*6ym{X$RC$nDn(aZ%NJ-q?( zgbQZX>ZT)?z+*WjC=dl^^k!#BwPsVnav?SJ#i@l5rV*-FUPZSsSM%tktxa2ddd4~b zBKmPJs9w1#^pmIQ1%=gp!RnbJkG+G2NlKV4OdO2m5=y5R2))j>1a_i!3kMVz4@)Y?WHcsI~@!MeRg z>?OB5tT~6n!j)qvE(i}oMumnyo`Q;ODXhQhvsnuD`$Tq@TIj&eT1>tw4T}j5lMkz) z&pPJy(RYE`boi&n={hGc3127INog-+Vkra3?)NrEPk>ARbwSw?_vzLpo82QZ+VL@k z#YMz9>ka>K??q+tka#$<>=X$KHE(xNG4$#OU)$%#Rw1k3 z0=E+@*=sjTzegk7af^)uCui!0n7+)HF>Y~)4?+h=UI@12TIaGJuShA6veu50%=!IM zqOM?Mg!}sPHL#r-0JWrKE5;$Oog7fxdf8dfqJ6#?{bI(V%{7Wd9^zOB^i35yOQSzC zsAJrHIbdz79+a1{-X$jT9gfGbB<72ECJOhc8Nh5VMx$I@K{{(H%I)*=7l;F+Cs*Y}|U9`FWR;Sn_RM|ITH$jt%F| z>Hz%E;2u3JCO!2d+sA;S|0=0*#2VN3{Ge3zX)k~F+zpxOT)MN_1JmD{8TX@)7K`@7 zBLbceb+5~w4%yYhOZ9}wmd^tb3C1yTZ2^0N>`MMm$q9m*o(f)Zzdn(V7`zMnT8{IVjvuE?gBGT~@BMMMQjSt39ZiM31oW56}#o zx_5^zQRfFATqz!bi)Mnd z(g(ZqUYbL`84@fp3{i}_>|1jWCTZtH-vpSK@OBd=e4 z)@jb|eL9xbMzUULx5T}-=x;YY=mU8fft|EYQTCW_BI=8*$0FSBCg0Gl2iwjRR@VB0 zyb%dqNc@}nPp77ZiAtWxd#+;qBj;b;3j+p{krv~AXei!tBe}Ho3pRfKaMrmXhKSnc zVU1ZPlzX$vlgNY2-`j^2SL_;+z)&EDo^6{Hd%Xt?EB1=N-ObIfsTI4}4?NHYx_HF( z^oVtjmpBkgV&S9ajnR?m(kjT2-9_mKIfp|oJpZ(|^bUCP)8XUYMQ!tgEbha*a$MrH zTSWNfy%23Jx;LQ`UAW;H-1lGaP6}8`ypVL=aTC!s5`v}nFix1btkM!gI!J5+jC->G zWH(0`tlgq}UV|lTNSKhJAk?L0ywdmK9SR#EQT{(y%Zvu2zGK3}<;;vexX?Rbdn$Yem?xbC(o5u+AVW;x69y3AiAWsP%->Np+av$rH_iv*KW5uVQeM) zS(VQl8oh|({nVbh9CF+6oGg(pXLo@SE;r69R+qIkO+)WC{vAI=>Sj%$Y zh;#hf@L0fDZ7H^9+#|G=GchNs4-v-70!&)(4Y`{Mlr=h9_lKi6qnH}q<=|Ime$iu{ za+Kem6Y|C$o^#29ujFEg+_^!$v`0acbwKY32YVTDz#ce)zm>~IY$karS%>~r@x3JQ z-MLxtysy!DH4eU6r04R+7j=FxAUo~zf*GB%1!%Ax2I#f1FD@aOmhT5H%s=|4U}@uB zJZ|6tv3(aBoono1w65z$#?Ew$WjLm<>x{cWPI!_XGDH-e`O3p`@DHk+;v?~kU2(! zgOKBvi^LpAX&m;YR{h0SwP*~Ki;z2_^oh*siB8T|AU;pO(N*mtJWKPuP|-4U>`T9? z#WF|qb%Us>K8D)wvnv)m(X^FNH2q*od72l|dr#ZKDQ}jP!3WH{VW?uyqF*}ilR|%V zlkQb#Kl$8EI7Vx@OGB0q$d1BkmV)FfNkz$tb=>(=94dzVWzv92g%-l0O{mc}p)ksB zTR-DuEG%P1SUKkMcD%ViLMVtTjtN(2aHErEMCQxKcLII{iDG?SzQ4g&!$@_hs+xAl2iJeiL!o|vVs-NudDiU>uC z?C8p*EBp$MwO>8Gu1ad~ij+P%o2l0?S8YBzOeMn*yv5b8j*2*oASLt4PEEamx$ z4+`DRXH}H)C4QrkK0Mxj!YcoyzzQC=_k zwl<>mt+@8x=i(6wiLOO@b2)|G=kfD~dFy|8E{qJkWbPXwm%dZn`TO1Nci-$qtOJfz z^WqnFsN{Y%2pn=!jjSy8mfQyoZHRJkxNx&`Lx`TNTo~pamlHssg1PW5_G>LV``kU% zTH@fMY<5LeK10xTqb6QML|5O!7IhJW=_Nx!BuQ7h#NDHNPzXjlCi{DX#(|w8#`{dn zJJg+mCr0~mQ&@2w(&XWNIW6U&i&3GQFs90HE33~{E7f-$bqD|(n>lU_M_H&pG-&n) zgWY2rZ51J+>U`Q$UNk-M1W_%mW(0rTnxh?pGtFULmA?GcdkL^(!A> zg2TJS-HeZ0@VKY|d51!`+N>J&L%tawSHqqwu^hnNp!+@1CXtHX?5@DH8v@v#tA%WxZ0CZ;^vD|2iAN{UyL@u{H86Ik?e~E?y(ni#1Ee1JKKa2L8o!q~NU&w+ zBEe&N*Gb^UBL9JDwt?3AESP{&({UXat?2j(IIe*P?iuFGM<*3?ewMRVz?sEpaq=p= zdT74bDa^a^FuB*>=uJhczw6C#7tdw~!>%QQr*)5EsuEN04Jtf+mYP?WkQfr_`i=W* zb(i@zNp~y}MfvofP-Lc}1P}{(B7eQ84_U`5T&=G@n%{CR4f~4H{uwzozvaUY9c#|r z5{0>pGb$*uTcjtl^WS04Ct#MpGUM^I8JY4NVYIGFET zg|_4UUL5t5$t6F=N-&I20T)NpEejrM8|^+kOuB1930-%pVO!;->R?z}I0%F&8Tqs4 z@SAiCa@*dj$oW8dK`?#8w?(}In`oUwE-|BT_A(BsJ>`5bEiGsy?8+%!VtKL8 z?4JB^64mZ|A)Bob5SK&$ohF4Q;3B19VS)qXWA#2yI+DL@vk(-i7O15>l+yzOk}?>Y zWL3QPJ^P5hS;Sy3ydl(BFweCgFl^C(*L%iVF4*dF&%GtQ@P&QqhJfdIa0jU2H0oOv zZ~;z*p-sM_=iH43PbrXL0!fqeN=2p+8~^>^IJ*zEC?6{csH63OA`5Aug9T z*@+C+WV!U~3#KXd7Gy_=6w1a9={4UGgX#S{%h^~^R~M|Mt5L}Y%8|w2zpQnQFOlqh z4{UcbV;^!X;=ENfKvVR+WYt3-2VE^--a#nI81-PZKUghE5DNv9cex@auE69S5^WX- z>#0^K{Zcln8z$snS9fs}^ z-ZAQ!U=ar!8hdlKVx3Ig=h7p#*nhOKeWuFz*#&l1v%I41Ts$+eyqv_g2;1l!<(zqH zyBG6QuQV}CFn2&A23*^M{Y*s@5#)GEi!uEptkXF8XR&AxE7EfmU*BbC;u}j!lWUSw zQP~QD`$HkL>-vfeOvFQ&ogZ5f8JIS^FR9io8pd+P-R+0LH@=%G78CYFe8#>fvu2>C znbzkxl&yT-=lc!w^PqAP*=L2sDa|LdZWNtVONW6zJ9`o3AYfSZTSzoWlCoe2ejxYYo6Lm6hUyO^x-l}RlC`O)>#35-& z4}8$L`cJw}omREklX&OKeK+^JMpPuNO%K@;p6>%fVynwFURk@T67Twol;1QWfN~dw zb^S=Hca?;9`Z%cAy~_ZPQm1@6@VtCk^ifZw!xsSi5h1-P-QOV2U6Dja;BcLyuM_1& zquI~jF-IkdUQyBnx$Ua14s*#kBm*ZT2tHlmAntvgn-CH{9%*w`3Zc_dUMv)_(x zxOT7f^cue3VO;KuR&iqg#MyO{q+1qshJO)^Y?=e0_tG7Ypxzk`5fM7Fq8BrqQ_Wx3uww7Y5Ic<#iXrp9xbAGYLFjg9U^z|;vnTjLW6O$D0Tl7WC3A59 ztNczsAVK!FK45iLt`M9N^=C0bXKD5k6>v1Wiqh< zT2k_Vxvcn#w+#H2vsxM@E6$7a0WJ)|;j#EN!)vl`T}{ZznZW-q&Nu3cVV? zys!v6s;*;eiUwbn7Y#jFjAz?U*i$^oL`&!TBj2&ds-Zp2iAsA%nEo{w@P9o=0&7do zv1ve&qBSf-9v&ZN2#-trSo14|8Q~Ngp{-V{*;D?^l-V?P1$&dSXWW1p+|{OehCb2nNTE%)s%km85U| zp-*i2gb_2#UhvBtm7p}svJtg|V#eF@AB+yZ8!MFGG6(__spD7iU+fR(FEJhsCInXh z&M;1xZ{icCm@8nQdh9ucxBNi}%NlC^mi*X1vwNfI+PNb7EsP*LUrH#77at`nyYi7Q zl74p5z*tw^mTS{S)i+@mk7oF+8`X%!5i)cT8AL&oWc~c4A=&i)SwW{a+~cUuuYB8y zX3TZX;_Jy@dYX`HKeMaegswuSRhCE+Nipk-9?ilX4pe5H4`>s=!!Vy>C*u+B_2z{M zOR>2JxijQX81WVq>MIf#0~of-v)3M4c|cwq1Y;L$9SrwD@J{207hu}b`4f#h?kUq1 zhx0$xDPqX+2Gg;K!rVBox!fd%`#3ht<*t3p47dw)_iOSd_jL5{Hq_uCGerB-h0Bb- zdc3EQPSt057UNIm$=dW%JgtyP-kb2Q;*mQcLK1`JmA{O)n~Mgn)*!Mat>^?U<%rG! z1I(xMX?Pd^1QYlDV478NlNW^K^^r%UVPj3K*?J+bRq!GzHB;b8*c+d4phodqW z<@drGW$l(-WxQOA*L!0lv|ce17GhQP+QJW|O42AM2Ph16xrNg(#1Wrds)mxMaHZgP z={1ELlP7JXoXCy(+Rs6%t6O<5&Lfl|3ss}~8<%eEWoiMRHj-@X`o0fC4664Rk!Y7s zGzwGRM{J!O+U;02Y@1d#rS7M*;z=>MxvN?&%rS8!OBdE3s8yzrb5C7?X%=3c`QZ16 ztFI3Lt%LBK*@)jGBqIh1tkO;!pL!!Y0g++HE;8#btp%#dS$m zf>$YC6la{B-ZGP%|NPOaxrK$C#)0k^;a6NO%fc57g#}z%U0hlk%R_r1!hFT-EY_xw zfWY>pRK7o?ug}c9VQ-+-?d@QQ)dH0X)+_X=#rzMv1B~WL5iaY!VufVC+ut7AXV87? zsQjn%r@o%?U;lTT$_yqjS}Ojg{CouVZM;-G{RsXmm=&${n_j2v4gCYDYb3K;?Nj~( zB)`CyzG8rrN%J*k3O-^}Luji8ZLfXo+S*!7!O4F}<nibyLd$!3I3syZ+PWDobe4!1=bV&zeB8V zfaL#u@Kt30&j|GYY2vZ|qdQel9{Aj3Z*9RSs8bkn_RF4k4c;tl{pO)_Xb6p*Z<-4C z-+D&FSPLR%rkLq`8ei7*-BkEF_W>GsEs5Q6B1ipReAvWbPG=?y4Ym|1aEoWmLx6O!CW@N~HbVf4xFOLby}KRO_WDQ__Hz60!`+3TInMIvR|^*v_z54nR~PEBQ=*G6`fUJ zo*nX&u6KbowKNPfM;a&PBKt_G8x2n!Bo-<(ZT(kn1%T0S6v&KWJ7h&=UPqTOC}uB5 z<}5D4RP)ITou zt`IlwYak|WnDF6hup2a{DVOlz2qdoBa3&}CK$X&|07M@@u0#=P`wiA}8$_dF^hYrF z2h7ky$}g%w%q)G4Ke8|#CeHCpW+=KE?DkFz9gwV`XK3G39aTlX3Av@B&!LKHQ~S;$ zNF+uCX2@#he<$qtDh^v|hqvehG57Q%)4``_$>ifk4qED>;is^$#8MS*ZMCkbMk&LCfrrs%%~2rxz{j&W9_ z|Lgu&0f5QtGg7T>OC^mOP3O3wB96O{g%vpU>2mC@g9MWw1gc$hD^_S78XY+dgwrX5a&|G-Hg?E8;icc%ArLNu3pt`Gw)v=X&rpCd z@p(Vx=OVwrrE^I2#i~Y|XKP6iRa@?;?aVB)Ro ze#8S55N7mn+)G(h;sdxt^i5^9Zh%KZT ze%1B&IQj1wkN^xwNcmK<(eDjvW#;b8yV48%Frksa(A__Dy5=R?d_*}N8;})K#s%su z-c9!PE}E!hy#rjj_02qVnkdrz%yFMG+hBm@KYOsqg_JEoQ;A|{ZfE(je*iN8Zry3m z)2Rieq|gHCkYc!@c2)FCJ$+yv$S?ts*vSak@4Vrf|3$pqg>!qdW1ip@8KbhQE&vJ*kCp9_KQ0_nWy|mz+|)XC)!W=00y%jgSL^l z?W4la@{QLVHzp=dn_UNCpUrA5TU@t%6t8$$%Ubar5kk8B_wqC{+&{U^Um5}LLNDZ$< zwz>VlbP6%z3yV%EfW!1J*m>wsH7jG@Aj!s@!fMN;G1Ow3>3r03E12a%@cVuF#>Uy0 z{w`EoLBOy(3g{;)3wU*r13OdTr`x_QH|tnRmFTk1!G4P7hw@GQuEkH-bk1pPl|H2uv!aN@{JszY{*A*jjvV4Y!GjsTaD)B4zYX9@GLBV zGyoDb#Ag^rH>4oBC4jq@c&-W9yJiA7rYQIxjb)Mbh39WyTC8 z)Q4nm*ilX72^Z?U)fA!|>-)WKPEgplf?G;rITrwX{X6pX1xu%psf$R4!MTKxp9Fpc zT0nW+U_mqv=>U+KWu`Wdhx!F{dCbXg^K7anAzcb=>cFqfQW zIp^v(rtX;+p5sX+M^e>*2I%3rWB7UmOSH!P5Q5}mczWi1PxlKPsgAk|IGh`z-VVs@ ze1zJKcCN_s*5FP1A|c7&(@v&1>#6wLYE9nCZbUY)<8g~$TZ`LNcsT^TR{2>}|NQgx z4dzw1$Tn98@AEElTQw7YOu8#7I3g*4<&I8^O0?3kZl<-89+k+{`Z>Ng`v|3dv=R{I zS7XC{a8Oc>j-UK1%1IW*OM~Bo0!9?zSKedrT^RAvYx0!lul%F(6|$0q%Umw!+sQ5K zAl$y|Q}1Cow|hTRRf*3M*QW!~oKg>)oWXinAUxtn*{-yxZ5$2lE2s3?xBdTv1>uRh#6_Ao*Q_(A;i$9wK=Mes4d^06(pT)Fo*fT}cCHTqdBTg1zY z5g|bbH9=NRP>JhWkcOH&vKl&un5m-%vICdpY3$J3ucYI}_J#|@j5;}K5Q<-L#Slud zqgBOc!!g6_ zNef|;x1v?efxtlHn!9|)l#!L&YU14z=k>Z?V^Ob9m!b%F1Xl%^1FmA=Bk0 zS0wHfFtN3sPQ4zLr$qNhSt1&;p59l>LZCL0M`|rXQ07HXCzABzitu$ezovKc|F)Ye zjqx6GX(9()&dDZ4c!?=TA^icnUYWUAc2WN+*h=zk56sk2A{1YG*aCx@Ia3%l z73n^Dn4_^@4B+FL$Dt(ct-I}db6~{Sdxz-}zR79qYUn9~ZP`_bVi!i(|a|y7yKCz=MQnvENk^PoV`$d=M-Oc;etQY>psqZ0&wKnTZaY*8e z$q^UJFu=nF?Zjprg>I;>)@E$GWUNX98`h%9cD&i<0Gc4ET#uQJC5-i`s2Z4uRxWU| z`;7pY@6V5c;#W??W=-)xX?!Ie>vijuwGFSOFSd0&tvYb)&yQ5-%kP3SmuTGp{P@l^ zw>gP6u-E(ug|14Cxa-}pWO1HtRVqq| z`=gp`0c>ExkVaH9RrU*mLq4m(V^`K|&q^32Z!CsW$i3C{-`7RJ-OO6BWizDlMj^4} zg#~*E+3k>|P;)uL&7Pe#y1JCsAAYjt0lpi>-=?^-!a83dZfd>tDv_D@d+PN~ySWrF zapMJ`&U>l0Ucxx&u*>>h96e;nfYA#q%<}i4_)F2@8h>vr-K$}7?f8+B9}6SI zl6qKB+P1={p7XY8oT%+Xu>4Qe$Ci9&``Xt{4OO)%X{$pwgH7_k7PTBtJPr1TJ(LtPA@^*CFL{T(@$t9$)J| z-Z26lUaE*+4%na+dQe3`E9J8n--*t-EKd39F#zy}^kF?6S|-4Eyl@8aH$nTYT)&Z^Md9fg@#&zI}mx(FypAW=<%e(L5{SUxTFL%VLB%VYV7{`7|Ht#CGRbsbn zknn>cfKjMaPmlF$E*qiV>;F`>DV)=ywnw`jM`!*icENCL`L4zf;5+KQZx)mL6%$eI z644oh5tQ-UhRxM&mfmAzOlWKf#+Q_EK!3T*4;@3#{x;y&fCNVZLL)XlC*XQXTz(JX z&~zdF3Y*`bB3Ts)G}HZ#@CK>8>E>%tMX_(3E3{6Ndq zQWw_&KH@6OS<2Ml&7P5|d`kL4txV2BZ;h27ixP+_Xc(#*Au8PCrNVb09}h|`Ncj9r zJv~QHHGwLCB)~$mmquYedu~F}=wT$`*Qkvj`cS5RwNYn6+CpH%xEVw7)eRFKKJ8{7U{$2(&^w z@ni@5IFH2cn9bPVlScx{-9E|ka6@GeHX3XbhnuKaw^5T1GV?T`3v|T@>9*Wi`U~U> znRVj>lty1@G*06E@aBcC)o?(o$NWx0Xa_$Dnkw3~hvEv#6GKRW=0VJY4hFT9r z4;_O?j{K~?vgB8?wqHQ=JKdzdYG7UBGXT!1&d#sl&E;+$l=C0p z-b~q;Q;CxQlyg6z0>6pa8_p)uDny86s3!+>21?*0JUOQFl_prt+sFMdI)QMrwz4Z3Jv@y5?>3XXwUvM*69QYi)NZMfPz9?vgI-BmG z%^$F{WuEOe&Z@|lm7Ij z)*a_3`W)f<>4=RZpx?Hn3(zW-x3SmWoN*reEB49IoBKLx&tKb1@yo+CNNE%a(D3tK z3?daw6>B>5m7DWgu)>&=_wfQVqX6$yl@n@xJ=*@NxHbOhfwRNt0zDo~RM?@V26SHKdA(6V4GXz=)c*U`Xz9sA_Ac6f4p#|+LQ^~Ox7cCT|8%e+9{NMpK+*TH=9Y~ix8s9 z7jMRy1p0$x_U9J@y$8=d?EPaE%t;x$FH`oVL>yY!+3`;IZEKk}plK`@MK-<}15RGi zlxiWQyBOn3wK+VF;s@WVL!WHn{9+V_R2-r+mirV zvA_3zLznnC@JKwSxBc<18YEb|8pXn+J7KP+j6Ko{m%;d|FrOix3DY0tlmW|`+V1Ic zEzzIH^peN+YKI(6lE(qV*C-2Cnia-WWO+TLGd)YX|M58MRqf`dlQXA|<=Lv&ny@`! z5-@1zJz3+#gsaW9?ZwHvr2s~+ZVOlb-@bD)k-fKPz=Q@@K2GjC;&{oXb1rr58$Ms& zT3_=`Sk$`3=3k1qPlOLGR?Dj#PS!}B$pdL3zfa;xxBs5uk?`W6C`ch{RKNng0jqv% z)hoiY-7GXI=Fd{5T`A?5L|QKiRu^uhha(ff5}B@*Jn8PL04b#S{efeZ%gXx0ss^7U z;02Fqxt&ipHAYf}LuCC^4$C8y82F2Ml;4XH<4D4PK&8Ye;4E6za~RDrW;pI#|2&gDku~IcMtmSL zH_)@8)#RV*kkja+(&wm*AXJK=s7;j&1N#l~D3K6O>FUbLHzRtmZ0I|z3(c)~T1CKK zfEr5Qy}Ht=tiKiGeRan^FZPvQ%&T23ubLAt!(TkQ%BS6-#G=Sg+k|HQX83%tgRgh1Hg#GZE8A%{V}93Xts{N=^jEV`C1NO^yL8x zZ*(QJ(N6k&_e-2kJ^%RZIkjD`fMmAQiJmpz0e@{~gJ&$tW=<2)6`YBiLz-~#)TWh6>uZsA=2C?fd zCI8Ho@T9%w3*&0>(D`g-KT55@ b7laZn!3j8K)L*avjRt%dkrpl!)b;;g*dI2c literal 0 HcmV?d00001 diff --git a/assets/Screenshot 2025-11-23 024015.png b/assets/Screenshot 2025-11-23 024015.png new file mode 100644 index 0000000000000000000000000000000000000000..bfdbff2c10f13b4488e83fe48042d54356841221 GIT binary patch literal 121000 zcmeFYXH=6*7d8x{pn@PG(m_E*rS}@Z1}IHHDbl2d-a-!~AW9JeA_CHtD$;vTfFQj{ z@1YYQ37sU=eDR#~to8o?e!XkGvlisO=bp0nls(s6dnWRgw%WC;>{lr$D6YLwf2K!4 zLA6UkaRGUmnjB+xF*uz3LkZGTdqPn%z_~$wxd?o$^_YU9JeKy<@)G&|io3coh=PJ1 z`S*{q)9t4X1qCVh#k0q+{mij*{-$rn&PlsTPao^PPl=GT&9#3>KT+_cmr`J;;fFz+ zVQ+z_rq+q$#+%+$bN!K*xV6{GqguJ!*UB?_4PM@J)P7C>^MT`n$CCYBwV|_<L7dE6B7 zPf%z3aohaaKOraj)pe17^XaKRh+z3Q?L)qs?NR@x{mQs{knv9u_1tV>tqB5#6{PLc zh={Lwo~u2!B+Tho5$M?gRN$YiahNa(cf&uzyc0|l@6HG5*O~t*rptS=CqZC}hcOP5 zAo1Ck8b6WpuT)3+5B*r;D!SPEEHD}Vmv;XOvX@^Uc;t!S@n?*y=#XoNKlyld?M1HY zzp|Tx2Eg+o7I5c`yS#d9uYbN;Ou4i-Q8RDxSDEv@n=MgYu(iF_UK?B6jnw@IBnS(V)Xy5{S{iQlh|C9VXDlc=%?q_L-l%3IKi zYvZ|Y<;QjYDe5m8)KvrI*^<`|5E;Xhr2omHKVACvQFLbZt?TKg@wcwIO6xqj6T`ig zISkT+CqHSg3?p?Q-*if?t#Zr4+&UZ^^hxkmvp25>m5EeIh(7x;qk@* z58-g`buIVchZY50%^6mP4Y+Jk{BHMm^Jh(L;q~1&pPS>hh&@-DM7cn?75c^!4<`Nu*BoLm7@kNhAsN_oF#_gLWWHZdqt#sw-j9 zE@|GfZPvWchEBkIz(Gi+f;{N;%dMV+Jj?$#DnJ{;!6Ly^xKgOrY{V zO@@Dv^1YHi0=+N42U_w;zfHySO9t0oxj5&5Maw5W>FW%Qa@Y+Nhu*uwFTPWN3PT}z z&_qf7+tt>Lp-RDbNZb(;?eK5627leNC|~R{{w{vV4S62|2;CLAouy5>m-4(9ZZ&WG zlaqLn%KQLL0&H`0n!q#KFf-E)7AkelmYqqNJH}Y0fu*O1(sOJ(n*5tbvUv&<9zF_R z(83ECJ~rZLP+F@DIG^R<=wDs=z zNGH)Sxy6tS%+?BhR$@@c#K}OL(3r!B3q`ZS1QLbCt}>Cp@@&Kf-x;mZV=y=7vsog@ zo{(C3_WJ%#B%Cg=NDMN?a_MSgaXcHArN1v0vLu*rG_!B*w?WB+|6SMDIPp%^tHf}L zrJihHJGEryY_x@IuEz80WH@Wz{A*Vh})fpN0=)WB_LE44B@Eu+q zQhj})E->w<2cOZ7hONBP*-aeDAhgddad#0Kt6K{)ah$`NcAjz0)WT9p4DvDzdD=IIYTd52VWzw@v9CMs(^(X-_S|KR6hr}5N* zy?nhOdqn`Uurfsz2rN(|@Xw6Y9n1MVD}C2re?kP)f)^5<<|x%INBBAFMx}uqpd(vG zk>;AT<-7)2A0=4yv{?m)*B8KZJ77xfwCm}Iy0M9|q>rFx=m{XXvBEAcT%dFzMcN&fn5gD^|Y^K#xt$qJ3t*)jRrv!7}g zOp0%?;Bma!yCaGd)k3h%iNzR`1!B)`v{6&4$+XgM2`*5Lzs}gJ>9IyObszysiVdQa zTV2U}a!r`Wr~}~IwqIbzx1O{+b64o;^Yn>#QQVVh zmGiDPJT3R2?7r+>qjJ`lF`Sz2?!M;7os}0eX4~(hpN?OZ$TS^}JSPB+-o{!Ew4=PP zk~0sA^>ILC5+&A}wuDip@il->QQ)yk0}Tz+Y`si;m~R_U?Lz8)zc4s5esthXOT=o5TCZtl)mGBkY2`K=DrTO_OiA(k64HpxUPuSy%8R|ebH1~eX5xKH~Q-74)5=r*7DL6t0z zzL&yt79G31@Bg$dIZBcT5E}9We;nX{tRV?+3WX`J?Q(6WNHVy#&M7VE=U-}VZM|qf zS=XiIJv|J@a@rjXPOT*=5X+W3`#(aO-=)&0?@U!jUQ7xfzel!n>4-Zp@8Dvj7`w*g`_lte_H6CHl zf^Rx(ZhxBUP5h7E6jCQW^b+0BHTU*_@)H_lo6afTz(l5ZFP1t zLfu@ILtq@plAW2i57ryj*ko?q4-FHyt#*p7qhVvS@q4SbJu@4cW2$WhxU^x>{Ib-%c~+?ul0c6D z)+q{|<_&{)GDBncTLbPDlzDrQYaY$EE4g!&P@s27*t}`wi-D)DZmD_6CA~sBEm=>+ zZQYwnwX6On&*RcH-wi3N?gc&*33#qJ`2B}I@mKI#qKUP<_bzarAr*^A4^5qP7yqoy z1#;t{$S*ch=MP8&r((u1Z?Xnkw%OKzqb%AXxcsu2w@o(4rmt$T#y27)<@l~Vody2#XhVoY(c2?=U`@JIrIh?Ex5Ud^@@ zFe5qQB>;Vlx)HA1Z+k0+h4&hrioO}L z-O3s$>F6G*2dBs8a)rN@(m-HENPRTcz#A6uYoVkg#$IKM)=#TEyVKc6HPW%00M=;I z`9>mBM711B&FP)wL zwuZgEsHR_vI0D8ONN$@O2ZPyfbkX0qp@{@Tr&}#sP|5A*uX+Fc^7^#kSm6}-Dn+VoWb>_kp7Hea~q}?h4X&zjw5cBAARAGq9i$5NrETccl!nd z_fU@r!S&boyNktQ!^e<&W16>IJOa_=FXGDrF{=vjX7=-Qcf~s~nOwX5zn49lszp(0 zL+KVY0aXOcPX3VyrS-|gztJ|8azJ4iGJwf%hshi&lChjj!SZ7M?1~#KFu}oN?RMcs zB5?spFWqUPu58%uE+qO{<%ZFYkL1bla5*;CZjGZ*)@}fu$E~BtL+6w)deCgYw7eV{ zdlVd33F+vp&Yv_MEV1=l11crsubgacwl0?tT9R&Oh7R>bzgSzcy}d8%;Wl_zxKeJ8 zhcH|5X^#dXn#BN_S8V1hCXzXzvv3^C$dUh)2_2UV%*|A|gh(W|Z1uP2=f*Euetq^M zVZA0X;|q%6zo96e>w%DK!v8TgXqJnJFmH}vJ*VvUJ+vKOke;O*1+Pg_qZ0aPkX(dz z28$LR%d;!OOM!|7N9}j>lysBb>=fVl&9TiQ#AI-%AFd#2#18r|I>L>1q>rm0 z$^LE!xv1SzqTf_!VAGWV9slH-JlaPC0RzHFmi)2;J-0mVEpwG47K{B5Ylm`&3yg#K zEW^yWK=(54p#0b$`fwKTCuFP|4SkMBz)dFVYgStXmR5ytNfzL*Q6#skp&7X6r9eJE zZ)+xMr8iB*hv1)FAxR-L@{DO`C31tO+$0)_<53&Z`#8KyENlB$n9@OpBnl@pTt{H# zZVsXag)>;KeB!OX<$JHGI5#&Zl<27%i6A`vOM4*IrzUrmH;s8GPioayn29xp^=vWXLj@yWg3RS~oVxKS+Ct+t%)L#*5aO zrN=*df7~^Ncm3{_0fFMAg*$)uMa^Q(`b4PQ|J$FY%@W?Rwe>tB=W`MoI!ZbO91lRA?Mx_FWUyxy&#ZsCc{523MIpWt6k{MY$!ew zFf8Gene&R5JNosL{~u!NrB=P?Dj5|%n(0>}9<6eHp2IGOXIk93FmYjRIc{IUznykt zSAuO6{U%GRb`53PU9P>=eQ6*6aJI(p)w<_{E6ksGTBx31UJ&`hDe*CGWVx(vRwJ`q zA*C)`Nz!@Y2Y-$1VPJx5$x0IRy2*Rb1hbE+yWNxRCL-5XLo+}5MIp!JDPl7~$ajn*ocu|59eT#gf`qrM|T&PY$KpMB=1Fw~xeON>#4sFby5< zL^0Hyv;F&HMn#exr=onA(&M#Ls2TL7LR0nCu|m|_mcr{K_MRk>qt7PDjeqRcYcjpI z+M87GnR|17;~rEiNyPJ%y!>WT?fjSyA}|@P4q{=|41Dhrg21D_*F{D{X@O(^ za$4#Ijh?Gljg5`hY#8G>41RxN*2v|Dub?0N<>k$_1p<-N~KIzU_P6<0&tH)lyB9}$s~U$P8UI?>m6V4T@W2B6tH_Ac zDhw2s3qPekm;S-;|5t3pSH}M<8o{Ddd}KtOG?(`F zt@sNa4StE3VMq*Czo-gtECLvF9B{K za~1^_3HS5ZwbvI?!X>_2bf#@{n`q`wQ%$`Ri+%$%Wb{ehpf!Am!$8Fsdu{BczCI64 ziLWVRk}{v9Xg2Uzj1t|O`H-b=wt}7>f6kpY<2QkE)=7D}|7qX~zTyl$6BYSFJ{3BZ zG2Oc4%P;D-;iDJA-cKWX028C@Dw$WD4vDuu*?+xo+WWl-&=;_v)Zy$j(3gX2@+_4L z{f7dL^gznn^f^aUW++phSw&Z_m9mD#9b2j;1um-g-2>4hMTO5C`tR0`oGB4#qu!zTe))!Y-4Q+<<(CQKCqO=wMyt zZwHnaU= zam|f2j1kzRC(v^F=iw(t^p^m$8PU-l>d>uTz&ZUtv)c6R-g{!1jvj3E^$B`H>M3mP zU2v_KSLl&KK}Qz-7UdJ8l}q{=w5poQ$|qikn2x?{;n9_75msP)Eyp(DYFR!woI?z) zWdZV}j=5$b1930Xcl1o#ZICe9EbCI2g67}+M%6i-+t4*wT}-P35e*BlRterc9#0z` zy%!Vp0AksZbysaFlQd>@@U^+GnMMJ*!V_(H%1L=h zZMMky5~&(a@%*Isy~!Q^PGKHHX&FLD^{KPwa!cG>Yuxm;4nm`TG#%(xelIY(`3gt0 zks6AJkl~rSy*dJh3C*{&J57{GSs|vZuz+NO-ZgUF5|JIXP7~}LHK|Y>4NQpqSZ>0= zk))+h$2T^~ZI~wh*I85_0F4AMHJ@F$e7OwD4melZ4+B#r7k!#r%-EfLbPGy#eIUzM z;llN5-cOnnipn~+lfz!$b=z9mOY36W>*FV$KxrsI1eVz+A?|C$x^EC68eSjD1l$GK1-T zRJxjeZE^WNw7e+urxi0{bsFZ^9S0Sr;Z@jG-*mOT0x8XPKOQN^i3NC!b%gx86kP*( zWw^{|ZJ47C+PF!AApfJGu4I#UW|4<@hw1iL8k4oVZln(QscLDzO-pl3`F^dGRZC;9 zf6yNaUPqfR)b0y3<2h6xBo2#db_y>qXsLu32xce;Q-*FKPhJlMidBbVV>A5&;ZH5m zJPKj#)qD{bZ-2QAOs3>eo$0mio7gBiQsdt$Gz%3gk2wO~XF801^>(xd?J(qf%SrJo zCMMUqqPJSL@YVeybsvXVVHe&rwY9_Q2||!%B~gq^cJ<4+2c2%GDG-(R#~h_19>u4H zpki=Dy-8~D9m{nkLpraG`;QbzCCfTPTL*Q0xf)4zUQ=o2SGCW2*0dDlth9a0u;(Fj z2(!G+Gw8D0wbQzT8CS@pOSw*y)iT2jvzj)?37ix{g?9UaKZEkEZZKt1`6@4aq{>s9}0|H#r5F zfDC`p=pLz&BU|Z?)2wzG=*ZO7Z z4{W_=0iF!~uC9Y4-~@~zsMl$-x{^@4tBEY8!VW)DW3U2Q+f`VyzL0>ax&l58aCelb z`tc}-euAh5XZzkbjovBsq+T*NikCQ;EHzNkh9^qEEVwYXTW3qze_IwJE86HhY2TY(SRaJ^9PX4^B_0}^pRrX$d%)R=#nfvdI2KXO-;Ry5SHs)sS z@9bnN@VD!4gdHW%BcAm%-8)jaM$KbIbadIe8N-1hTvA-9*+tnU z_hdEhZ;5c3nlsN?S?E+8)ViqL?(psM3_0!wR07(o#Je!lp2n6j$?a5Rj_8%OWmSFI z#%_JMxeW^qvt(8M6=$EKrg!RS?cEf_Ck{OuH){5Yiu=+$1dKNGD1=qd^fhC(3p#4N z3t`u0YB@LzOT{3HSb3ptBIJD?PT0>Fy?917vu9EXLo>$%?(dfE>bm6Ju6p~&*!ky$!g9o(%lc_ zZn^aN>|I&g3yYdbROtHIZ3dKW#-`&nUE?S@@SEjL4aKVdQJLq10~gy>Xe zpYuBsF8~c`?i6jt5Xpp5si$)_8=YJb!IuremaKx?^k6$WYY+BO4$jW0%7PqjK<`v( z+paZ@^>Ke?tfczESbl@(9ekss1n?U+a1=jyFNg#ItpJX%~g7GV`jo79x+_nQ9G+UnZDn&1XM? z@ocprVd)a5p(ALQuZaGnUn>0#zKpN(r70;c{vn3YIG7?c_P==ZB+maB=BlPFGlO4P z6;+8uNAS_yJTm|aKem1*HG26~{oQZ305tNp(q5$JR{_Hq&sEnSo`qu0T^FYZY#c?B78_}@j7}v~WEo?r z{FufY1t^?u2@lGJ?6Y1jzLS*(VvEt4ms-=@q89}A(NeJypIl7eoPV-vr^yGi?5#pA4^H6Qc?qGdTWCjq4>2fTQ% zvs5spFJ&sUY+ieDB5ndPt-aY`X#X`q`|VqH@D0Le`p^Ei2yxRe)<2DLXRr&tUOCp* z`Y9HyfQ$-s)50{QviJPGhLvvm@`Md7^Dn8j#qq}%r5!XH!H+Bz0)nXw7iL5F(uB}D z%W7{Ky(~b})mIksGzPX&&-y!x9av69p^YM+zte2xi6cM*7ZNZA%i~e`av7fhlgdX5 zGH;6e(_MSR1QND$aiI(&B`()P2xlXrz%DyzSs{TzsXK#O22-33f0$u9e zx=3&KQ}e9DoAQB0Gb7YVkApBtqEK#cFB3keK%Ag7Mvn2p4SM~KvyK|eNppx@wGN(~ zvxbjIbcjZB8t4Q*7k863De#wW{udVCBg3Vhsn;+>R7GWDylfUy^Lqd^Y>=1LK56)_>FS##8DylcBbTRz{ zg!1C()!Qxp8M8oUpiJ`msFi7Wlc2L4~WItT{?rA+e3#G}~T1a(6SQ83*X1NwF$@uGcD@F;ZsRKZlL`2l* z5V1%<5GG-SQ`zRzQt#NRFzITVQpyKh4EA~2w5msEsYWBT*vP3EQah3=fjYq|qo2kim+_2RGUD}uPWQ0P~Vj|$cX zWwww}r2|65H&i>;>+B4@FSTsVBTi&qvrOoi*?>yW88)0jdn)n@S=kY9%R>MIab3*K zSM242pAYHdo+1#@mQc^oVu=QhA&L}X|U8Y0tT@k~ts8a#`DUR=a z+x)A-rK6)ftN|=L{j6Ky05OI2E{ugU`qiefu<0icx*0^qY=@BfyI!vikTk_3_js9E z&wZ^t?hV?>G{CMQeVNqab+juY6#@7Yr`AZVC#R6voOC>#yv+HcX6HnRau&D9Z#z|; zJXhcjT&}t3OP4#A{v7k`e3^`n@13=BJ!Tkg3K;mYfraxB?Uo0P%9K~O2q1(tbimBN z>CwchTeX14swTS{rSREfgM^bS5XrB9{&a2(!<7uho&Dxg1U{f&zI+5Moo&}jTRzwr zNmBwv?#UphM(>_nqGX};rE$m9&rcG1J+$=`QhY7Oe(3*&M!#-iG}}e#RAKDT=ixyw zw6wfwyGl*#+crk>lVv@42Nm}wnuYrk?cJ?G~gmCJ}^Z5F!SoUDww>KGe z2;bGZ1Y+{L6l`#ovDRKH&F_VCRD zs*aSS!3EMU_}4*o>-iyNY%}UaPZopqItMUuFxTN?re8PDMfD-`-J7Fk3nAAYSt$0y ztq~xn>Sx$wo`!s#FKuC-al^_7&<8%!WnLgqomOoq)NCnq z-V^pXo)F?xf(lUj9;GJweSF%f16svTR|PFR=JpDl9xHu$1iD3^>W2Ht*_EDfzF^;D z?7D}DYWOYx4}$p`ZI%A1y`w|icgs}R0v=skD^m#^ye}pe2LR)Db~|}M=yYEzR%%^c zUCm_i-=~MlaWZD}0huu>YZaE7b9Op&jMd*HTN*87E%PSK$1qHm4DHf`)<=}ztw4DY zK>V7@aRv8lcy7g6tErUhV&Z+KNlN0O@xVnj5qF2l{pp}tLc0nz{CZCi!)d%tiyM)( z&5g>H7PuL`h6f`#0A1fM={|#VwFYVNvP$I8^v0j|RW?3?sUmdhc<$;rwFU(N8SAaM zt*523Mk2(XH8q4Z&HLvz)VXdh)x>=8rkHFAD;{X42#CH~tE{;-c2buM;pa7HRkz&Mkz^Sukf`%DC9u>N#UEk-%M0&tf)x#+F2ma+Llc4kGZ){DZhZS^nGvl z`kM~t1E@pwFrSEb3vH+7Q)^m86Fk21eiI`@OorNMh4C}V`q&7=@S){O0FhG4amsCG za(`UqzF-uu7=dZrA3}g?Gn0I*4MlP357>nUbH9lnX6ZKPU4E4 zr>nZpUl0@YgQ4;Cil|k$QR*G~6}QKfuVw2~x8X61m_r41q%x30KqhZ$4t@%8XTn&G zmmihE6t>)+TJJvs-j7S6Pxv(t!}x{lc{{$UKb0G%KU@6r5^sO8E+M!PH2?B8nKk#+ zO6D=N_#DL|!anxvjReJVUaG4&z7V&JkuyK5>QZmM38Xvhpm4KZrzQmYutcxUah1lWEbi^)XBS2r z%1>IvCZ6rOx_&^MgvJf_Hthwb8w&fw$BW{`q#Wah#*W?!ie5Mtl+l|r&-NK7TB=T9 z4O1zpnK{@NGfz{}k*Dku`}6fl+vfm~F`fN-1v&M53z?+~+9%!~XQLm1EWAkUQRZ9M zI(RD%n=%25sh&lL`-1oayGvb2fPZOw%CsX{qX`6*OrSIWqNe^->C!-9U=>2q(3)m z_4Pq)Q$b*}5zp~^=-SUCB}m@=0H`^mS)b?tumNs5@OxF0kFV5$MS>2q&C*{GW@##| zcVnEr5OFfrC8^^Hn(9*YMRhsh?`5QR6FOrAcgf5q$2-j`$Hzlzcv9vN;G32tI>8jLp{ ziVarS>wE19(c#AtG@JI#^7V2Wx^!0>)a?uk8`&yxlqlt6s;4;RUCx*z-FH*+HB4@& zTRQ?D!|RwK=gIlQi}Z*miZd<35&+miU*KA5vi4hfe;tvMdA=*nfcH|Ur;!RIB`yt5 z)Vi+&9vDw5GYsXiQD&31CP%;*!j$19Os6{c@|_}eFu zp+S4Q8s8k>LLD;Nj9lgR{R{VwMsCVQlp2blbB!_~#|{}G$G-DHoy?uq5hk^+E*XN) z$K(71K8$RA(bc??_A^B4@vW` zc^Xb{tDFFTTkWWbpR`^R)U)}>qbiuI<$dR`GmjSi3vrHze1D4!z^CqbdNJgzvCq*U^!#b@+v=Z2Vn9s~ zvNy88pqQO>Jb{Raj_#vh`0G3U3w}0lHu9t1=;_@ssc~Lh`W;neKlHu>#sQ{7`QoOZ zJ39-K9gEg0U2)v||9CaX|A6;y{O^{7`i21BJ>Xw48S88QkJ}GdmvY}F!unDeyE|-B z{Xa#hi{UJ%qSdXSO2CZ2mX7!6)%DDL5O)5$hSz~zjA`$r)G{Z3(S&=*N8=0~t+)Pe zYa29bWqno|51?V1n1@AdYoks@qAvM=oQ;IgU47#DRO6xTs+|>5V~US^Y8)kt8$vDb z4DO%n?pGlmsF}o~+^-kn&Hg&Bz3V+&clv>dl2<_A^36wu@NmG)Tk^~T)3ic z_FLlGjE5nz{29qOBv8qPW;5B|;dj4a#Q}H&W!) zusc4lONL^~=g}aK_KG;;iQmV!kRfjlTv(o@p2uNpx!(Pb?!TxwlPN6K$EJ}{d982S zy+?85c+rGby6N$JM3SFdlD`$>>X>#B2is5Wdf=_y7|Ad*`Emu0O2?yaYs-eMQVVU7N zp^t}Dr|<2{kWFt<$0zu;YY1vLRue^S#8rN{gdKdZLF_bPQT3z&e`tSvfa&0mB%5sn zy9AHDJ#j3cmAE3a&k|L@+N%-8?mAk0ShQ=tL}~r|`@8IbbwsrH3xnk+FOoi5vUb#x z#Mvdcoh6B#v98^iu&D{}g}1dfVsuE^@`3e{?>`n<4oVB1Gu2=p7t>+X{T8CP^1kIW zw}WQ7Yk)CBl7AD3&Km7D`e#{kW)Qxf`LZ_5s!P!E;R?l6=YqpjL!sENZm*5tT~zj8 z6|AGZ^UcmPx8(DAU|0GTIIg9TdWi#4Q+L7slsatN_mcA=Ksq|I4ws_E#*_KYG_JU@ zdQ-8pLxQre)oOXEN142%gRXj86Tjz2Z&H#=3?ArUDlL-qNtQTI-Xj7y!Yp$-n=K`a znH{4e72Y}fV;&+!?4x(PGvaeAEp}U`sNW7MIA(aknE%*`;6ytl$yFVi08K@81U^00 zh#u#jJ}jw~=YYgESr!?Ib*?kp%ny!p;nO>%laNOo{yl!%p8>WL*jGrA?*+TkWl zZfP|sW@b=f0Z;mJSh(9QP|AU{@SpR`N(eeD^ksMLiv|^`wQ;B(2IohM31q=!GU;nb zTOgcF$w1;n=ru%xb#!Up{;5kn5qG=V6_&M*NVLL z{FOqFMc=}5ku3{Vbs=yAnyEpP@2vd=-`vw3k1@SOj^-3!X?95lpR@=W$y0VO>HC3B z14d6iH8g3gxzv)<6Eik4N^^?1tS#~_xAH5mE|q@F?f}QuWqwOavw+EP@lQ5q^|^G` z&xUm{^?F_iHWe>cO>r`jB)PDDq;CPeCx(U1XKO?Ux$;kbOjXS_G^HSgml z5XtXNp;-hT-#DhOVA;>yZXxU9N;>M|i$Hwd(ql*xhk5C9`I)P3-A2XJ=Ib;R*B)(u zS!}vIJoOVJ8*?&tO*$ZG<>!QEp-E$dd&7c{g=GCq1*VEQXrIJIUdF$E;P8`eh9$TS zypvg&*`E(2XkWf|DzJ#tkEG_Zu$@CM`ca+$1#Js^ePf)BdcJoy3>}xFsvt!?e7CmS z9UF9HB;*nc1`qF-J--|x6tI5~J^6k+RkxOkPspBV(V_+(5xOj;5y&0;ZUQ}A<(0G(t2`+6+84+iS<7rHt{ zZcOri$X8bCh%5!ly6TfJq?GDRO~}aeyZl*7_ovUFm3{c3Ww9KueD0=+SDc%Yy>WJ* zq4x5fvga}xX*YW#u`YzNbBg)?veJ{Be&I;F63i3~zVB-VVDBCgdU82BVkJUD8EbLM9JVo5)?{J#d(s2=#k87az1Kz80t z&fbb=sPJF~+crA|--$YxBcnOXvf6(^StnQqi zUL;pz1Z}+HSE6F>5-{??M>6g6+||84^ow%@@C0JJ7=h+mol1pA`F!43t|%_Sk@{5g zBT zrlfv3Y_I+F4;dtBI|uM1iv|A1=Vky~3HjW@TNFwu+4;!+LaclbZs^4jc3`J%Ci%_; zytm|)caj;J>8LHA5y?_wwK$K9XKy-YvrshbJy*LVTkh(LhZi~!FWNzH4cmKU6fp`%UYOVL1iL~3%m?v|fayCNwzE!*OC(?n- zI8O#Hr&MROjyKT{j2j(GpQIl#b(_0`>vPHTM;_vGmEF zzK`f(GD>sy-A9SeNz$3#vC^ah8sZLwkH+KWdxXQe`aZ39M359Bqz+jV%?jC0Y#vzG z)(PcaA7E5?^fg>f$aYy5{9*lBSc!L{K0YSZJ=S&YOxIG9%bS@n<%3lnd|3va*3j(V&RC|>@IPo}Nv<2- zRUqG#Ti%n|81}j-z(re|E*Ez1koC%Tvu4tja;!EWY*j^&sj;~)LKmUY{TC?9+o^ja11Mr`cXGLU6X4>A<^t%sR_#TH;92-}}q#ht1$uKOd!rH@KhE z`*esO9!Xjbc9cycir9TvwsjF}6v8jD@m`72n$2=iJ_kG-72!J8oc# z`mlJO-Uu3Vn{jE{#Mi(RXr%1`4Tl}h(h+_A7*dyfR$w4on;bcp0d#EhIN2`yj(&Ex zZ*`F$cs*#+T&d9z{vrvOxMq6VJ=qm8e&-;FKXDEyzF3hkhf(^z9h&Yi;(O(UjMdIi zX~*-mo`NpNrAwE`vGacA(?3DVOEu?m2;4fGgx|IbU%*G%_itD}y6_uT(n+B4z`@?? zS#gRYthroh{{Eb=90KFeY`XFWU7wMx_btfK^`iskqa#w~u*L<;GBxgE7cVi?@QLLZ z+EU@GDr)&u+nl^RTWatKC1%raqYw!^+y34_;vxo6a@r8ME!bx{23VBc zEa&k?crK$J6_EvuI=q4cYq?gZE~s*{kt*9Lk{CpDgSb*K(^~PhAQMbKAgv8fUl;to zQF!+*IU_usGsBqe?k z@Y_yGPXjc>dp~ug%r`p9Wyw6Ph_6C|)mh!PBj<HnJ&@F;WF=MQZB8bNz!I3 z*Gwy8vTy(3c0;xakcjLq^sg*vmZ{C@<(ml^!$Q2I3c_W|x82`2pH3m$JuY3+QF!5W z%oZN&dI$G2+syOR^UQj@zuYK2ItrI5rwWv`Y9sxwa62iiqA5G?*#cGXxRD-Rp^+|k z0}F=NYUVJPLC;?%#R**cIrK4~M18%N1(TMK?Tj$DR8HPHCwxxhmaoKCxb@D=q`Sj^Qo^Ly@_Dd^Q-FMaLc6W1(a#GL+^`k*~1tw;l*#5piS zkdn_Sy^qTxVsMV$xLh#Gr<72fKl9n$j~^I&f5(+2xL@$3p5{t8tu6z-8vfjoq;m%L z99MRd&{vxR*M#0LS;NAO==u3~P;;FRcJi^04zDL7V0Ral&wi#_q-ravAA>ZGb{>i( z?Unv+Nw|`O`?R_fGKR-j>{|34xIgCIp05NUJQV=SRf7VJW|<~hnGSi35;Txjr=I%* z+^DuGWQ=V^^7A3Q`7{#`qP2)hCD%2u+?V#m$K}ppsxbDvVqu7x;$$KnIEnw;lkuZ& z<~1~?jaiXC(N>en+ggdnEtmpdxxjZ!@H%}O5duIx{<~<@{VFbuj_ae?j zwsGUld-p0`2HV#~(zGoP@g`f#{ccKDMu3r=FOK|2LfO~TRt*UCxtu7LUN~;Ra|YQS z7k3HUxR+#@4ouO%NnePejWw$MGxk!Mc=$s65#*}dL7YGt<7hyhHCFP;fNO7h#6I}v^N$RO)P z0W;O;#Khx+m=HyL{(+>*H!t)KHGz37cvjbBfNW7A0?t^;dy{{eG)If zV7{znz51WZxdK|}Gly{S`PSTq`K6X91DBNK;qQ+pgz@on?_H(3)I!lGLnyK6wpy@n zL+IJcgDHST9CnsGz`*u*^&4MTkJSSxQR8Z z39-u7^U&j=g*`cKBkgi%?6S9R4X7SW*z=;9%dALxivN5(3JY9ied)$6z13li2Ou}h z7r!Fp8#QegD}7sr82qgEwD)Z&*va=9pfbg4bHWK-il9$S zYJ{@+A*M8+fuO z4$J~WXTL9BJ$~5eFcRx)fqP6f|9+4QIA?pM%^LG6Ee-rt%%JOh_Y?>fg*}i z3vA{Y2O0(G<8*IqhPetuIQom(kalp*%&>tykggu?F2eR^m$7UA`o1#Zi$gidP9pX2 zyN=63KG)yDYPc>hG5{|Wb!mR!QkT^W5dx<=4FS6y@h&Ofn$O8>jp1^~wI!zsPt$A4 z&)?o@n$ zp_Jc?z&WIN0`mRL@ehrtW*(W6qsk4D$AEsaxU<~NydcrMD%w%Td-n3{EpxxBY{lnf zWt-zPo)_#a_|5UY5(%5g)unI%%_5370*X8R z(1dT{HTkW07cQqhBa{u4VnDZ#2dfBIeO+_MeI0+)`kw zg<*|X#+fL$*5--`G>@=cG4dOtg>Z7JvgZDu)80=S;fTtah{9O2w2kFTQ_!?0{1M}w z`ZQD0&zjG+PuGaQC0=g}1h5md2)Ud)b2*98-^KrE!h#m4tFg!PDzq*Q$))O)vv(wY zA{KrsJl!Grd`N8F^&!;F5iUb9+hj-KH=L^#`WWRycX;@UO=r?hBy2BN0B<=Z(ebR> zSI0H0!boV+vt~7<;%AoVcA#|Hx?#K1@1UZU%fP!1#u1jn4&LiM&pi*`K0V#)pp*F+ z)!#uA(7Z&g0s6AVyD#}cvIT}{8HuXh3&=%#9e6h49#~|&o;LA$*xOG}q&>@g){TOe z^og5T@3;20^z+l#w{o)kK^?Q;#WwL-(cg@UbB?Mv%uN{&=_BE8Ob`%EqHTS;X?49v?SW>>J z)z|x>=g*Kq;D$`G90TNVtDM5vbjmnYrgEPeYMd+MzQe~gOkbEy^<|41hwo`E22^n0 zze02h+mRX=BAQw{xQ%X*l&uh@hMVu_Jd$325@Um`C|XX8u^A^@dJ3gIyA^!h4n$Kl zBj2U{YkEfr*G@t)tsi$VUh3EL*4qAU!Q2W_vlM#N7um1Yi=TL0-;Axct3=9P(*FJp zZvqM&IZn+VE#IRS>#odSBl2=ng zXFv)Y5{Ab^94P_YDb;DNs9cjWm)YDR$F)+U8xFRJM$JB3?B_|*-T1n2Ua2}zB@|BB zzO`CBe4y6v6C8TFYlButTv0Yg%JMJm_6z zD7_G7&uk(t0*9N=cfOW5ml702=b37XzkK$!_&Qm@Yk|fbbghHi^Bl#lF6(=Im?d4~ z1in~V)%}+L?D5ge?g5*y6r*z5%sr95GRFmP-=RD564No^a7pV9hNhO)Kv`kq6MuGE zlcoNaHe2NU9r@_TuQN%YqVLodRB&7C)KXnz6snj1M*x5|kQO?4VPQLN#zg^%G3{kk zJG(Mr0g^&eoqk#M-$_~bopTO+a%6KR{t;QW2T!+_L?1bu`ms84E*19pN~Z1j>l(I9 zp>zlrU}2G&Dl!5zsMY-(ua6Fb0kAU*@YQ$an617MV=GJnP-9vK|g z{sy2=T20F61n3ULTH%%vXFAId9c0noz-MfR@*I1)XjV;~KJqqK9<}fmiME!A))tW* zt12&2^vz13j18i3T10K625@ z$oBWG{jb#wmL0x)3W7URcXfwn-o?u*Kx%B1Ny8peHX4WSsASBBOM@DMWr5$+?)Bi$ z$J>F%trr!?VL~5%7q+$S_mSADeO>i>Au-akPZR1eKH?u6(Djv%g8IbrdGOTDWZy$~ z%a^7$;7x0NEu601W)2kDD!e{aZCY~xpC;ulrk#q1X zQ%~X3+htNep20dMFK6^|zk&Pllq%#a! zX!m;zes6@jEl5GF7a4?uUOQD-1I~US+a~>w-y5@i*U3AD`X6;mcCyz8Hhq=@tRZtu zN%dm_;&k+349p&D=;4C8tbQs@Lc$78vg6o%qlqfvj~{ao2`?a+K$A0nlVe4&r0MKX zjWF9BOrCbfV|Gt*(JGD&V3sJ zaDJL%C9v1_c2P}by@PlD_NEZO-TA#s5)*=alR>XHAKLxrkl@u3fMbzbT2q#!3p@ID zX}XC29SgvGbZ-ZDWtrhRxwo)Iw=4QKEw_Po?cVtCMQ&B5R%^JB}>K$XRq3;aTb7h@k* z6k1_NB%NWsN8-AvrIMN&IGU^R9#k_cs%ripUeD9q4?nau!gTh#}LJ<80lw zCw%r+#7+%DqXjqG_w~+KfvpR*k1<-UpY$c#)}Ye+aKiP)K>xZ+E$J(VJ(IJW6mY}d zDYpiSlWq-9mEkrg+ZPz4#q!!}8w5-rZ0 zO`DsyF!Ixw@pKi$y}F)F+2*_9PGer@#M<;9?@Gd1mt{6HpC5c3^>Z4p4WqKm2A-XX zANn5Lek5hmiMR4Z^4#)N>F2R)((JKr83_CFA#V#lwio90u%eu*{yswW&5f@Is}64R zAGFHWwvs!-o4q#H{zRlITyZg_4KR&X;VGpyZ20^#liqZd0~&TZ+@RU^Hon8qT~Da&@sh#G!@Anp{<5V|uN=0K zCaXDnZ@(rkrv+UOK*JOIjKRg^!o}A>x^v2-JY%`|)+8a)5mUgzIn$|wbdA04=*(e> zM)4b%7H#^J+meNT>^?4Ny5x61C6`xWmg>X+D&a;_vWA55gbfK4RcZHwnpmu`(GTWY z-J+ZGzs+2fYzKX+(`R{~KWb(<#@mAImL+h0R*Fk%!x`SkgP-}RxN>7@VZ66Er*QQVF8c&>zwoEas*Veby^SKCub%(=(8~@ z@u=HzTI&#XT18v=w%|%U=}8^U(P6k84dc56Reub*m!N|J+6&3Rfym~hZuVW&`g}>l z+kRJuZLEl#HoYSuX2b7$8^^;|M0ZYQRcU6^(I4U{#3-M(5(`ywnG4M6*j(8YUa)dZ z>ogL~c$eRE=iAwwiIwLXx=GoHKs`#ETZ#2!xCNJQA>IAVFoDXin~@@MymIqAqYgrq zHJl;c^@*i=@~>+=`Kvv|pI{ugN=MV!nxD(xh+7rbGc|Be3HHg={vus>+6r^tXl7}3 zx(UCD-KFoe$Zhz;kzi*B6+dlP?mx5Fe;cCXx9DFPek~MA)jVqwI?>Ef>-h-;DfZ`U zrVLj}72GgAR{L@her>BMghXySzLeVcL3Us_63^z$#hvfdf3&KdWgxef+(c=I?PhyH zbo9>m=)SamZl~L;k{~4g>}HP08_49u|I~ zfr@Hyc$zJTE9nVKojFk9HpML4yn;JE&_FsC}=bW5TLqp0qivgUt zyT+&H@4CFH*7~9T>GMO*mHmAcoc4S;QEAA8DPnYRa0O>fwqe;ck_`amz??c3vbb=q z7g}Jk9iwu}>3YraM@^1*EPUnci+70o4eoN~HfnMWEi2=4bB+A7(*EoIG4Of$D@@q^ zm4}^ww?KdHHm-2PX7_h7PCmq|3Fa^&KKf^~D`^QK|VffD7-%GO<<#BHYY1oum5ExW!{+==-- zieAJd8t8T)JcA5y){=j-b>hhjpDu=Cp9QB$2JqJwbx6mxe4H8;#pDe2D}44;JFnNu zbxWu?*$oh9qmrv0&bnC@ctk2+4gjk1w6{p7OZ@rJC@Ti~`n?5_lEqLP%BO#>`zNDa z)!jDftBp{uG^unvPT>dG4>D!%uuIwd8@w}Cn5szcIkK^NiGR39Z!?0}MZx7#{FaFMSELS?+CoEyy3AdIM( zOGfhUf&zs|0M>!l-4P>O+nUe^A8ZCH|Nu3IDJ{1gU;VSZ7$dgU4Z5$K}UthE3f}m#YUH@l}f{(|qgn z`{6;h?ov$_*kSyhAkf9bnK6%Bd~7WbNKLh=vh`C=*)d)~6ng zu0)gpp6OO-)m!U7>vJg9H`ri()Zx@RIyeFQBv>{Sz=w}eV5PY5*IJQRM*V^wgjVjl zmS{1-fxY(|C~mtu{{F>H*LmE- znov^r?3@Srm$f3!pSBdonOnNIn4LS2$c8g*WTynD{&cd4xYfPkw6qx_h#K9PJwXQj z;9Zeu7ieDxUqqK#skZg`i}7LV!e&zLKl7dkMW9`XT%fGu7ombPLB?kOKQ4|x#hf>C zdxzHNN80|8oZ-&SdaGg~e~5YRTfZ2^(}eL4bHZRIoP;Y#x7lkvC%js+)7~dvIVBlf z6f0&j9M#|?b{4vc%LzG>ZH%jc$MOMgmx9{Z-cWGsBjwA_4fjBv50)vWoR@#=L|jZ< zn|a-^bYSt6yyLf(_qYEqyIuP=%Af(gSgKVjkF)i1#XIc4Tyo-sNKpoJb?kpr?C^v8}D~k>9D74^d z``-8`#XqdTEnQy=&Fjxe_gUz`yE)$Rc3k876TpAggBkV+-UM!C$MMuIhJD;Rz^w6H zDA0H!wQsb%%N*y+i(~hR*QPe6c_+9tp{U$5z-b~YfucC#-*IaG)AlzR{5vN1BQsAP z#Y2Y1Wsj||Js(Mmp}vFGv^8QU3N|~1&NBrfaovY0dH>8|R=peBHZ39N=$yU_Bp3T2 znO?_{&K7yK{v1~aO|V02|2Z$f>@fG**o$4D%m83Er#7rxbr29{sJsNGs1&F$8d#us zhI3V!ewQ2}io7S@5GA{ijiNGN-Lx={#&}EQeLt18iszSo-5Ql-C1X2ZSPD@)uUU2G z%1P!kh`3&-bk6!d>hb-E$fTJxN(#l$4Rg0TF3iG)^NJs2^i*f*fm^=69dMqAsyz@R zF+Zfqec&~-{-(qBQB1bS8k5_;CEBa6FMeB!J$$ugL~ZkCVmhkVxJ*w}Zw0=Uq1lmX zee|npRPVZ%OHAs7Ym_&_VsSSh?O;sm3E-4KktgcL(`T{29Sk>Czb$#{M|f^l6VqCq z&4b>!sCW*Fsa$nH#%G)M^D#a30vpPvvnK?>E#gzfXzvt9kH;wOA%{m~@pjOC-HlHk z@KohMRPmn0{#x29eQdU0I`X~FlZI_*R=|PgaaIRpHSt*o@;oW2^LQpsua0H2aSR(Z z?s!2YwUNGYp<^itJ{G=9HebjY1Cx-86JB{UAZ0YTn$>wKAzUe;ikBtq?ReWDEh~Uz zRSd+dF9k@CCw)IyD3}-E;xe<0mJVA*TVH3pQI#lEh!)@25Q()CY z;Q!UC>;awD{*efJij> zU?z1}AJbk@=2J0%8&vF?e>MdNL@A7ytb4D}CT|`6(-D-;i%UP)Mje)z*oHReHZC^b zSsZMvJxQqyzYNI9V{zfaR-zvU)Z$36`%L^|IoDAwD=h(!o&ko@ z;v-ihYT)Y%&Cy!-Vmk!Z_z~~?dPbLu3v7F`%3myfprw?&-dpl0>Q5!?R~ppvDg4r} z+E<_W&Vi^eri}f2HmX5A_I?+*Q*HvA6WdX_9+#Kjg7xouJz3cb3O$1~Al%lb>Jo=SJHr*XD_rq zEV+!~z=*}^`P+x}Jd%C$30x&W;jQ_arF@*5|6&Z}mSqkA|7Ag-(fBl{RyQeb;z-VZum5MHeIN3r zr}3iju4G>*B5&^;cn@jEK&bty7@kCXm5zk?WwY^HM{O(lxB(o6SR@9sde$(nxxC96 zBb{P^s`UBKq9jIXrhLkTelX~c9-Y~)EP9AmC^TpB?q$kxrt+6!4$J08e-5ym`2;6= zC6}D*9Jwm$$u#a>?UJnX8gMhoeSO+~$9rKsl?7|_W-QuF8J3K`n|&z*V03|n?Wqm@ zY8lomkiAp8)c=GP|JJ95*rtJoHuND^_uAX5uvz6vfjA;wuxu;dB;J| zz)tHqgDTen;j0<*Y0k+aMI{ethSHX}=xJ`F12MNzref$6v3E5qJ2sYS@d+fsYLg%U zgP3p`(4b|RJ81pb260PXj#n|M@jqom_eOob5&nj+Bk-suz@ij>ke#Ug_OEs;`$pv) z5in4A!i8>MEFIgw0fv-y$!nGX!DO{4%EeEn1}GMnP};h!!1?*kAayqq0p6Vsk<5YD ze(6(Z0rft*s=qgVR-5tOh#h9KpVGGEw_9FJWkns#3q(+-@+)1rs#kQ*=Sz&xxbA{) zO9LYVX#~1@O;$Ka2GX0-{7d6NwkLYBl;S~_vJ2$zw;kKRbMLBHx`28=J@!AYxW0CYF z3!=vo&(5}LSB8=D6Q7q9KBZBw@ZTsIvjtmPs|BQ_@KBlG_@O&+ZTRi2+2ZdUbnDHJ zTo4#xUvEpa;Ys(j2WFBZUXy$AeaWpX(M?pR>~6Z-E0vwb+_H$P@3DaXvgnHn|CSR3;0IC6R>e(fyXScVs1+L26Pa5o0&P=&-cLFAEEOI|MVd-@ z|H(G^eENyGfl&043zqQ!!VTumRNkbTG9~g2#UZ7K4#e+X&0O7|g-qrOj~3=q$p*xa z+8HWI;~)he9iYVplF5GN>CVj5gGCR1rH%#Zs|Ku@d#V{%ouH+EN8tu9W%?#Ej8Peu zqnHZOUl$uaEaPBQ`x%x9oq7*9PCqtqLQGYnL7>3Me-DEk8kJ_p#!{A8e)l&|rLj8)oJpRq4Xb z*si8(J&?%LHjrTO>%|o4f-o=Lj{0o=sjtM@Bj{+I`Ee@IWB*i9H$HDMry2cW_eJ#= z*h4*V>lDG%$Tx-5qAk8&HH#b4E;&wsL6%U zJ$C3wEX7#(hzPK$=z&?Nc3Gw{^vSV!T-lq{-9T-)!0_hO41!_DXWB8t?p{e_XzH2Q1#*wluz3dkx(&molBPTb4 zH!JC5tu$_)O@Z)qsnGL1?B-CuQ5>T(R)VZ_TUyzySB=no&}(BtxR zP{Yb))t^7IpDnHI`kR;EA@g_e_CWhNt|^|b=SO5+x}rsPPPR0dx>L<7JZisj0oL>H zk?#NaV$Tj%-cfAXczs8Z)U1ESLC%h5N+e8TF2*sAgt3A;zQ)g|E`M`r8%%k0XEK1E zQC3<1RaWgt=>(&j+@00whu`i~);P+pi6vQC!B!YUM9x?e&EK$tM=`#MSiY%LG4OAY z;d?S3<>nmHq}K|mvDvd@j{#*f&0EG#G!a` zKh00Vn7DqYyfL5j(#4q5A(L)RZVSSVb?l}En!bl>SBma9?)!J!-Ox)fxJt3bWVC$a z!NyU>?f-a%>yA62RTID$N>dfUYn@u#x+cJDwMRl<>f+C>B(ec>;6k7B2$R(JC%)Iu z@RXnfvHbeIKaI6F+^MCreBdbz4L)4fhDw}7^hDJd zH3_;8p7ehKn_7eFa!&11U!>vvv=i)gw{OKfbdNsW5s6_T-Dhk6MI~-Dia+@E$?}jF zt>$#c)S#6~N6tmtPSW*0at#yDWg?zQkJ%vTO|5}QZz&k)%|hj`ww*URbm3WYe>#(w z1jc8c>a_M|sgatDFk~aIraxFOP{>tJX>J{DeUiq`rP6Q97nvK&dc2uskLth;!-K}R zj!KMz0(1MZgAkrZb<^eSM=%_ zSz462X*M1-K?5Wuj*se6YdZk#i%aXf&%HnG8RLVw%bV5=V54f}q*6g1rUy^}`xHAA$u z`+L?d?%k#Jw1o3*+Lc%_*nm6J*+w1kW~e?8w#XG@g&^dcsVuMm2J3z_+It z;1Pl%MF-H;_3pUs;D;11qwbSYH=WjdNk@F$x&}P$QPIw1i%EuyoLCFPMEMR862=vPY@ScU+O^Eu_ana#%zH49Y z-$W*mqb~R~bl-bX6%xoK}nbnBklwDAbQ|;Pt9a%93iSBLkjrbR? zdiGM1r}oz7&Leq$?5DvRjhK$$7n)dOUVqq$#aO9F1V|F$`heY*a3zYv;@PtxY46MN z;EypOO}t}!1K&k^R#l{ZJ%f`g1*{iDkp)>k!wX$_Bh`*`Q7`QzgUj_^^}TM_ZT}n- z+Rnk%3w*X+s--(u-(441KftL(-2|k}pYoB?^&UIk4z%nmKl`=%fidvF+c!G0XtFc@ z#4f4-%f{^p*L05|1GhImHZOu(e96XzL$>|s;K~v1yL45|vizH%@G3AMOziJ6N@!y zcGWNuXv6tmCG@s49QdsBcuMksVDIDZcx zQK}m_*t%VIOlskro1(k;VfcZ20Evf3ze9LDoKR4eZ zBdqF$*S*(0Axfk5eOuSG}TP=u>2xed>MMo$i!N+L5Q|mi2?Ikkjf11eQM!9^G;SFdhZh>lk z*ufZ91?+KM2&9y_zQ>7$EN0sp35R$F*S{+oeRCIJZ`GA}-=?AJW^hZUyo8m5g}?ee zB?7C%PSdhlCh8Yn577e7@Kn=FaU6D9-co&XW_q_nER7XT`t`KH0cDT3@1-okVBr9c zu~%tOE5AcZ3t5ssBKGbSCFJd}c>ao5bEEKqg!79$DLz>Vv4y~Q=&<-{)Vih#Yl@m5 zxd?y!Wx!nuZSk;oYH^#{cNA1fH+lEvduPSl_))bx!;I0`3X}_2HBO70 z$LCx=sVmGib66_gch0f!V(Pw~&ADU^Sj3)Mey(N{rxOOogm#Fw*Sh^BT0R2TB+l;g zJD1zGgBW{DWO`mqk(N5%85K8wD``Fi5}8Q@^}istXya@2^~hIEWR*{}R*AkVSW2Vf z%PWimiXkh6@9KE1sbo00JMz!y<{l_B*p~@!x_H>MlU*J9_q4kEVbm=Kj{uQ5&bU!1Q=TyJSk*H*6z@;BHgD zA|BhnXRzj#L$<1M?V3&aIM}HglMbz$QOB-Jp2WsW7Ach)?UZ;!h1J}yjTeWk4$Sv) zlJFVV)eyHu9*2p2gi^YEA#|rAkR9kZyl=T{=Mha0Rb68ip-rc+*48xTGNh zY66on5lIy*bV~b>A+Lm??Fz#WQzvA z@<_k*EM3Zswz7GL-tUs>JE;Cv)BFK|7jW~$n|`TZXA>m6&V1EAxMfn?QN81pe+c0c zZi0DV=A8Bw7ATX#954pp1ud+$$^-@Cbud|Owbyjv-e@z~*_!f6P!b|lpx)r#r%%H+ zB929plA~rK$@kJRYM0*Sg@}mBf+pqJ?5fAD2l=-56{N>Ee%=zooRli@u+yDBK7%jp zEmmg+gvV9wM2h%IEc=EoEH8F((Px0axbXQbk}q%66zw*WV7h*`3T%#E4(lhlPFO|G z_H?^EX3D{boKO^_i{Uj^C}+DLYNhq;1&u>f;(o@MiDIvA3>ZnluAA}F`LWNET6Rk< z)>*Sno-QH5HJRA7Cq*k<3~$lInuX<(F6_hx2yz}-dVV(W&~0iF4rhQhNhiBIK5TOx z%=^{=4Jp-ahrXP?9LNNCTu~?F1t^a3keLIuxS8+refEw=!VIWC>wqIPDWBVR@&#LOsh-jfgIE{Aw0&pf1m%vf zYm-T%Io-~L+i<+_Zas_gHjOzP6fWkt>^g`u#Y>syL!D@+v_3Wid7G#UFN!Ylb5~l} zRM;BhF*HEGmcpcn*i%?vIu`@6bD}^xyqu24Dek3RAU(y3o^z(QDCqoW!Fn57bh+5C zRo#Kji{{K>CUVKoXj*w~H1Fvm1zh~cK`!#>tWDH!EOHbwX4UO=PICP*2Eh8IxgZm! zBYA8)+5+u9G7?T9<8({7_IJXEiH>El_3EK0>_ONMqUixSXZzzhEy3sVzj z6Wyn%udZfkqDsZh^-w42((uFxmwDV6vlU|;oni2#?%)ec86N7{8|IR2{}q!au2Aj0 z@92qG98M@2^7%Xh1*E;>6=XavJufwom#5{k3nFl|#`&2DKjPJ|FIO81n;&IU^KCFd zaVw_9(WN>x${AP8$7%`L`_Q`0U%d?VOyqLE+bf=E{4t=TZ zxHx>UP#x_?FtfJfZQ-j`(lip~hSB(#^nTh+kaJ4FCky>Bp`j@z%8-o`O!aP~DX}vk z#JwqeM~Y4F$R`KE-0d12SWexF^ z?6dH5c+vH6Uy^olATk#J*6f>0e*TsQX5gLZXBVk88hU{^*r4~C zDMGX8cVd;wXH(yxhm{>MxP{=|iFX0r?e@K{A3(b6!-uVkitZ9%NQ&$P)$I4s(-1)U z+*AX=u>aBMrALJuZy^`w?sV^XjbfrXZ;y2(@HLhZd_5}CO+>5ul*!5{dy{p16?&8S zTw-(CbSh+{uU zA=DvZ`Y!X@pKLO~nmOP~n12N$|GvFc*nm?|7&xH$Ylbv!$v3p&RCcOuA?K&9*-QH; z-HFK2@oNW7diG|a@tK!&efW6l!p|eVHhXMeX}y5v-)+g#W#(J|bmP{oz1-$D3WB&N zK+-poO#)fH2oHXi=AnG;+e}i&R%0p2AuYt|UR1=pv@*94+sc-($6iJO+^$dcoyQEF~q~&ZDQA(po+RA_GKpAg&P8m?{G5J zz`S0SKixPV0uH!g9Bcc0?A1y+WAteIawjr^? zlY8lE0q~;sa+KKD$0M5%Phd6UdA9u74>yZk#%NGaGDYAXaVMslf4jEyaa(*#j2bFq z^yh)G+;jZ+v3LK*PthvX7Gz6JQ{ELlESHU(Ur6_eEr&CF>yjBh9OjI;B5J-A`+1hK zvWgi@0w~~mUHkVl2dQlSGF{HgxysAbBrcjMfpl*)%#V|>*ZOv4);wSE?Fb)^m^D5C~sIqxkNU$*tYpcP?|$xGXt3ATB(6?J~V~A9^JRZKCvt zL@s4@dexS`s|dd!yg1p;F%@DSaNAL64#;J;356lf-?8c zoAGTcvYA-Cx!$J3WACNZ+P*p6b4yOp*l|#I^k%s>&{Ff~^GSVwgS2W{rC9=&$wxCp zv9>b<5B6QycMfeUjyN{&FJSV9dT?QB`_zQCVBaV<4c*MDccSVtr;hUhHmVx@{M0?1i@a^l^@lYHW&^}5xt z;eGhObCT?zZ^KLecNAR>b1p#dgWNs;7MvMd(#yYzy&5)0i9u<{*`kw-J~t(e-+~)F z14*f(fNzoPNh@e!Sy@M$zwzgSUpN>st3N+qaveyMFGZt}=nt>ASe4x&I8F=2anN7= zonL9qTq)L~tKfGuy+C$f2}8Hv3l?!oy06&Z1-#F_q=WD|{Gj5a zaqULh*Q-Vw{EB0(+a2G|9p6f&RcP%MGq!gdsIhcAaY;Xb&)fcW1%vNA$s zpE?|g?t99dCs@WEtWL`45&(~gC0*h8fhVN5&E83gLF zTC%GnTM>y1Q%G0O`va2%JeKL49rgdD(p?)&{>v@z|2by!p8*xCg>=~! zA!akdBCKs0vqOX#%xUAqZa{kC>lE?%ym^juc9mJ3ZgqNMWjdfXmP;0O&%D2Rx2X4X zzsYQF6M&M8ogVMVwI?!{QWmVhUN}LS5~fgf^8A&AW(Gdl+LgqUuB2X=VbGFy;Y&nc zmQe-Vt5A(m3XK&-+Du#A8waR?GpsPG%i#Z6lp`W{O+(v}0jQUDp%E@}85hNoOhj~1 zsm7!kbZhHEG-Nm{4DtC5dHIpD&&!A}ibpKENz~cg0A@#@r1DRRbBmD~`&sU1<_aLqV?V(Zp z?EyJ^8Pe*n)R>fx=P>^n=xFeUxW}*HvY-jm!X6W`gVHn^kqwmmwKx9fQHfyke~U^C zI{b-BY)N|LLP|hkngPfgw`8ckypMjh=-JmDX`V}cL5^B9FF`D5b4a$7%Zm@%?vr;1 z7!Eq@ztK+@5cl@T>PrYmQJ4@)(%_nqLSDX)AI%Or+z0Qma7X(3l3?6~`8qCXeN$dN zb;2QKUm+fd_9sO7Y(xUfMKRS?33kHIZ1c zR6l)*LoK_8FoW%*bVIl1>N3p5gp9F-VttPSKwG|2>USsa0WtcJYGj?ZuD+@#t-`cG zhfa1)M?lUzAYt7OINk%7$UE0S>E64x;pFt2jR4S%Tv6?2)#^KdJdF(uP{OL)1Wb!IgxpR?;X^h1u1UHU(~;CT0CH zpN-oJ_E__g0U@7&ynyczWkb#aSWSWA!cA2O4QJ=N?`6o+@clu9L~N2|d@It2X{rnt zyNPBUsUAwQgzy7p6V{%*_#eXvkOAm)bqr3Vr0qUyjSB&AxeQ&rB!gybN7GX%GR=gu z%JVX^4oJ@z#WPzwZ3^l#Jhj@?SjpB}q8aMfGt;F~G3$y{jrMDQID&zL)ixh^P3Ucm zN6s6h*@#8rRXXLvsW|^k=ok!U!gemM7whz7F|qJL@}~gtT4;9&#VH)r7`=<->gx5Bkdh3SMDS( zk6aPYhCv325$S-?-xl&Mao(IeQUY-ndy{*l0@>pdbmNu4hi!)*v(8o9V>IJ-1&h)iFgcH86E zSdYo=6K9GNi)W6OcFh4FSiF}+?h1bfcHSN#yFYCjpuBGNANcojwnB6D)7f+c4#dg{ zTK^qSeDJ}l82VF!rIM|WGmBnDW&6MI_6Fp*x2vj)5oRR9!hP3~>l#<#N|M5Xv)eZN zI2H7BD5J?FW-2Inb*@JU!F*uo+didMJD180-}4}f_EsMQ{DGE;^c zYed(L@5!_tU)`y3bwxvq&NP1D+Eg@`NYbOfj0Pod;itaA*@ zj@9E&E-{lAW1qUJ(vF;TSmvWJQ|qNW?$*3U6zcoOKB0=cDt|8W!Fq4R@EHbA=4bbE zJh=o}d`G^}&%>Me7m5yL@WWdPzq7BbgmN<6q>D81Ga(4yJUeu7J@`MZ(MMAi$9Z`P zFx%jNO|+7ewu^&*CF3=H4%aNb4ZjdMjCnhJno@Z3N#?BY2V~ioHH#vL?bIrXS~H~Z zKqh6d(AMbtEI669qeGlevM?)-t6}`7ymI`v`L+)}gRWwQJ>zLdX%fCj32TI3rSTrW z_3M%kY%6+uW^F63!#_CaZJZ2q=x#&e_*bc2uU6&Xe9omkxfYrDuX0r_ec93pq(!Qo|8B&C&%z4sH^=Ru4rte59iDX6ZU744_wB1)QtBMNbMMoStK5Z28)W7 z=OPv9AupDQGNohh?bR*8-d4<&KN{=yoy>l7zPxCW{T#OE8TF+J4QC#I(p1??*GdUflycYciYN_pJ2zH>-mrNFL|Ks~f&CUsP3 z6kAM?PC9G zF4n1RO6W?}__qf%F&~^cYp`A;x){P{YZXb2OmgQKCHtjOX{o>1d&g|IRwDeK1*>o; zjte)~tn!5LI3VefRh%!k99)4y=!IK;cn_qRgpMbb-32X@1LP=nw)9qbf5+ zt|dwZg4v=jnv%A6NyD^R^Vw8R0rIQHL8m8F@gK(P_qjp)wqH9Kw2OjoxgOrPB$bVg z;TY{H5EjiS2XEiLW9{4tA5EZ(JJ9DKSqzwvdB8N;-!gV)a8#c zZyeed&EjU6bhxo(k)FRQr@tOHBFj!D7~f;`_$?y59Toibk&8EJ7(UY?~ zy0&D8oJZ4$9D|kkD?W{MCOTr%^$f4MQ)Z^@dfi;o@~<9QYMXv}_F z6$_~OyXnp+erMH}Z)@4B7_n3I1oh_us?;!D0y_7ZeRY(vAT%ulZ z%U)cdtr_LmeA+bFh~SY<7Pgl48eK9ILBtuZL|?n=%j^A^nr1J*P0)z|SGq~!FQUjF z55GpD)ISK(7DefuGuN^1l9;HWkX%tzGf(a*`h6shZ`D8spb<;&lssXSYJ0?>)4j9q zpQ$Zi*FsyKM|iy!R~HHLYemdHO4rv&;hV)Uy;?;pYU2h@8X0nhUg(CAbz(|ghw#mGx z>g+|YNRkOEpu;xo_ahE_ZBmV-_bX?i%))hjF!ojFkyeCq5> z5!pTmnu(X0RIM+H0{2aw2YPSb#k6cHq$%VSoT{b1p%~yiZL4~>to!-c%V8+M+@KmH zH#*sBn|Tc7ibJhn!q_dZ68tDkf%I@(8zG8a+4Wt-#AK`La1E`ZQ#RFc^?S!)1ehh8 za3tBm=9Beg$@GmKLNT5rOPasYFnxetNvIoG@OmxnC{8rpL!6<0ayIX9NmAJ+I@hy~ z*1?4`gb;Jewj{tH8cM~KLAvWeR4I>sm}YDr>_Ztu^uhPYd-Teab-gIloPNyQO?2a4 zsP^eP290%|rg+#;uu(j?sPjw7OACnuA$FPkt2JDOo{B1i**iVQ?(-jGl=6UF@SGri z3t7-ySE8jy=z!NacE9@6vIhn7=~Ydg zetOa^@$1A?nV*9kv+#7g(CO=~7voGR?R5$}pRLDArd+TiR!F9XWW%p4Y)1&fgzUEr zvgI9M@PKkH$2d*n@zVZbG(itXnep|WoQr=UxK$Gd>CQ9H&Qg+cvH^l=PeXu9UDp1P zwU7v)TYY(R*6>&+FILN-oY#xe2ONksv2)_cB@|<{QU27RDX;3(149I?mGP`Y_O3Q#Fgo<63GBmxacV=i|lD=;KclOTx-N|1`^r%uLRq@>Xufi@P!wf&|YwtL{@g|C~U#=ygR!oH$w*u5WD`u ztu~28?NJiEzg?(4O2in)OGKZDljLn1nG&*_j95vUOcM#3RPC$HJHIVgrHE&F5Q&VSe!VGI#KN&sIqW}+pH!S4%_*nR zA@rpV1G@g|)jtY2Q%39A^U^;;>h&z%`qaBPrx%e=Z2?25wuMig{sHpOiduQ>8X)EM zDJZWLF|_72ZS(^JyKZ~$Opy+|*a{8^5wB^I&~*eO&AmhfgDg=n#uuC49TAgiX4WW+ z-1p;jn+L`q7kFr7K4d_++6dwqB1ymMzJBNE+tC!vN`!QG`Y@J~-T7?`1?roI5iuS* z%`st8()#2JA^M3ez|7k{cuuzqpp1GuH2Ps9+qufKmE4!XbN*gUrHapKCN0D}6r0ce3u z?EXJI_&vEY?N2XAE7V1JlIC`M_C-u_L7?41F`hr3vkBLp*fxPJ5bwl%Su;T3N|PhD zI4A3g=uF)B3&HI;_5GQZz~o~8{elHUU7*9Fd_eI3kn*(XxT&_b(3twhJo0QJIsOSz%$d zkio(qZQ3|Ltn>Sq3yQ^|gG#1e#{vpoTCY8wq&@K6l{k$lNw5gQRt`0n8FAe`$ zg}D@PVYK_1lvESxUrmNXcQ{VE;sx{BB^Q;q6VvMM{JRTj{(yh9`=7QQ$#p`>J96Yk z`UH=Sq+-tVD=5J3ATa?1nx@=ff!qg-#Etgsj;1)={8FP4pLy5I1Ig1(&+!=gDFIbe z>S#V&d$(;do$B`TZW=@3&?-r83zC8xM%9L4$+i(5hKa-W_ci3LWZ?goi(!+Wa>ZdGJR_)}lpP6Kkx_+X75>0I9Hp@Rnc!%5P1isjcm5@_s|T4lw|S-L-GO$vR+<$KT18wef*OD5&c@6u2(N=K~cPy~n2c8-dx9 z#E+t3-9`;PP70iz{s-!m-DclKWadj8-v+-sHY+s=4@j%A2iabEkCz}Mb#pQ_ra>x8 zos(9thYaR~Bq^cMVY6*nEP^Nv<<~!j#TAfYbG8y?&BGbA^?f(j7TfS44`jSaPe(z_D4$?ESUSX7GK0K;2*(Bs4vZG~w^(3nZx z8{&n;+~#Y*07!d7SF!1vwto;k=JKUIDr=1H_dh#6f{<2{c+h3fgjNZ2Pnyi^f1|=MW?2p-Q`XUcm{G>Il|sfR zLH9(B*5BDDKDIBYoC_CFked??(go(fq$O~)RAKfQZ7$}BD>;a)l@6Tl@FDZhAsN^a z;18MBe0z-`HeJc$ixGo5t-{&hNNz6AU0aFP)d8BlT{A`l`A~8_`4H#JWiJJKb|9?v zMbO+w8z2ovG3p>}c+znF0++D=D$rXEPR3)Qq_ci85jnmA(oWv?whN^xX5JS9w^(eI z-!l>%%~tc=GNYWbQWwjozWOGw>{VcY9_T`_GBNR`#-#7H$*zj>l(!IBi|Dx=!@+Nt z5;?EMfLZ_fk&;A{5iO`@+V=ZR zgfalCN!k;K>5Xxjh)hI2+m2?JvHuYWZo)-i&WH!$7g{urb|!;U4R+aLJ&Bpxnx=y3&)tIG3cz7@?2WL3b5twnU-M`NZ| zDhL+N^*m;oipa)dR*Z#(dTo?pcaMTq_th+Nr}y<;33|?Le2nz@O%*J59`557<0 ztL6z+4cs$i+COeF;xv5b)p|>?FTP<2`@$>B!R@)hn^6V#J&>zGuY7!e%_8Ki@OnIX zp**|)V$8j_Qod4_XlR+s;rvb~tbuDY0~YXUmWC`$8`fXMSA_YJ%1PluSu+Kp06on@ zq+Y#Lzhwsc)xb||k^NDNeU?sjA0QAWSi%Uw0CZ#QB>~D6RBSQiyEJ4$>>ZpVG*Fv; zw#~6aXk3S)B@4Bpi75qXapcb3Z_YAWLhC>i2x#uS9hoc38Gig#&CD8^^5w=RzFe53 zdu*W_C+8!cGao})OfnHt-t7(6YrD`7aLzh$Ks!+D$o<$4rb6dlLp{qVJ&3<5e7Ad( ztC3x>c=ai0;|;R&e0i?!d|)L)J@AF`&ViWv)1Mza`s5?ia}LF9B{Ul752^jV(r zGskDiHp?Lj&@&OOyObFEM91Nnhc`0`HPJTh^!ZEC2_2)Zqrvyp6eN8nSX5Z_XuSbn zRaoniqtC9fkhz4;3)nbNEDTl`p-=YNW*f~8xUx>Li6&+*e|Yywbn66EKgvyCH4`fJEY-xV+gqZZ0=} zBp@>$0|na}kZm$Vn!8ND+$kEiE*$W2oVt*>THzgMz_~-p`#n8h8#-pZ+u+CdxA4hd z7w&Y~agkxEXu1dSWMM~T-IBV$3BR(ef@vHwsy?poBXyTgnSSa)onHe=FP~IbocFm} zp)M?bHMfvoH5Hv<;(wmu4Hskb7KojDtsEzkii{=`SYsCA_${ysE|{{l`QlRmxFd+R z&w&Ae0va34d5F8~JJ4_O0vB$Z+8EP|LGXZ7+yQ!F7@q3x&Q;h=%9H(+q=ExSx+iwqlT!{n^#b63PYkq0Npyc)8^E@EiiMS1?<1m8H=@kMHq8j@|C*YQ^GGn_otfLXAIAB)@xO7F5VY| zN579Nlxb`zpRvd#BAzr0Ba#h1W^vKG;8_)(HvLSHhQ^Pt%~dL8FFnK8-b7h(u16V#dFII`BB9?*ZHk>KcwN#jv8ndP)nB2Ba!4r8T+DH`mkv3DKfGQ6JMGyRMz>?0lyvt zLlGP+_HAE$_?qYF>$eit_8HP&>&hf5${FONCq6h2e6_+gyyAriSBIZ1AAaJLDrNAb zn0||v^1+japy*Nq($;ssaiu;;#LK=2&}jG!3z02+w8#kgraYWLlROay$GQEvliASp zc|2_TqWS$Km!7Rd&Lsehz)IjIwBDpW;rJ{ucT~3j>42Oejq;P9jg8l&vc08xlH6kV zCIdCJaBi{*LVG3CmNl+9H97PUi4gLGJoL)ndcTt!jXy;YXS`j)9(=i`r@yBKu4HwR z(MO^5y66dSkIz)os!shd!pyK*`?7#7GncbOCq!gxQ@l_zRQ~u?D9K4+X)_a*^IgQ@N$Dz}xjI7`BtaIIQKLygdHxXVJ-Wb{X(XN5*@K zV(D+o7QH#*!uTADxW5AlvBi)C&Z|*x^z5?fM;!+g^<5pvy>LK+JNcl7@?y#_hZJPa zcH$s7|4}o>`x3efkb*GT<3wTc=!ZvH*j7Aas6uJP6YDOxgqg7ul`6JAXCJOcll92~ z8{cJoVe%sJhGT3i)j~4&9mC6l0OR*^8Pp2KUUx7@{vu&4mP-uggj&Ab zTfB?<8e{F7`d?;+Cee}|&d?aL|5h3=s?l7#(^k-SaF(F_*TFSDQXK@Z9aj`7FJ z&_SrieMDwQV{jXh4UAbgWVy$DYzLuruD5z$fBE=v)MS+ouC-AbFld;V7QylJX&CRc z1yc2Yz1W7vz?jm-{JDA^48WJ+OZ7%D=+b4=k)_6-Kic5W9t)S^vo3cdB?i=>lZ92x z?{z!?`ZvI)WY6>UMz7OMQPI%`=d!!3PPsq3>t|5FPQI{vbuq($XNENWR*5tLkQcs^sL)gX4X%qU4%71@_=o`hGT-?F||uY&lPH-L?K!{{r4 zs2wBJ+yk7U{-g{b9fFyWL#=O8Cq6xrdaa-5WPe1}($}QaQkB&3Fw_|CxfgklYeeTv zqXH0tjYW9RVq6ehSPGq;Z$CL`@ZDD5N(jj$ixc#Rz0V%BK7V2)6X}+6*}JYRSQw2!zW4)Lko*P7&-JsPqvFBhpKq|W=cpvd8)5j z7O~-N_W%Ie%%m;?+7O)u(ZVDOOYf$a#xKwh1I?2K2aDnvn~z+I<_Bzop_k8{is3RM zOOp{Nl{hf3ZW!q4j_tb(3jm)# zSq|&G@rpSft{R%R1?m|W!ir5kKL5~ER#l~V>70t1TJ76k0c<3@I*a0$Fq5~Thq4|N zBb0M9U03_N3M2oap3Ghbk1}?xKS}R?VLFRtmXt2R+aXOYxfa|Wc+OTZb?cXWv8$bu zn;w1F^F+ue_HBO=TUeFRf2LVLC{{3v;}yqk>*? zSenAi@>gm}C1ozcE>f0u_0@dpk%CU7a!Bk?#NMKv=Zoh($xEjrlGwM&MjNG==B9Nq zdp0vyYQI|0mRN)QyegV`$ntkZ-P91z2RSrPnTo}KM;3m_6H;0iOl`jjcbUaD-mS+u zsRZKdeeQhMNCq2SF_d^84-+C@do8E68ct#c`#l5~du}$@H5P@=tuSGn+cVFn2t{RH zHQ&+-_E*L(3l_7Jms>gfl+Us?%#osg2zW>nX-zwL#Hw7sO&K}T@g2bb?QfzSkv)Vq zc{MBlOBd{!2bboiIGAS{!qUUO_qzA zSQ=nC(85Hd#-(TR@DYMLDKD-|zbWp03>#IJpJYlcQJp9Xc-xiiK-C!S}{Y2Og$o$sjoP?!ozkh2Y3?#WMoiy~9DiRI;+>L&as%K4n)HHGP{DkX73f7@?0|`bq3p% zLhRu3(Iy95Q@rAPLp0-Hl^q;B>q%p)vNz`%Gzv3Gx^0TbL3lA*=*v3u$APcUJd!<- zd)G!dGzOZB=m;fVb8j(!*Ft6k6xn9|{kKJ-kYPp>TmHpd3tXjs3RETfFjNC3us$Db z{{jcs_HKy#VjiFcVF0BXYxX_zEpTh8hDF-_z-JvT`4sJBqgSdR+5rZl_z`~lExu5$i0Hiz355-)hfY14qf0x2h*H|QkKSqzzBYq z4XSzP%N0?hJ|xBdylW@ezKLLv^iW-6oNWSYI}>YJ8lKhJ9kgI4ye-!^^3k7mPxro? zjWrc_!<{5*8l9>B2|+&_{Dyr-=-3FQMz5h4q$pf2i8NW^rd0Z;HDr(dk*)kHg_f)2 zwswfb7^_Ku%j>7T`|oo*JMb7#n|e|+ReAP?(z;Fu!;7E*C~Qnpqldr8fiMvAOyfPdEer0LbSMfSL|n=FncX0w7S%Q~z02DZkgj z0WDhQOAC#JC_0m8BlMGBvi7Mqge;xe_i}EC9~b&^paucLfE{QeU=~UTM?Jd0q29XF zR!Z4^w5)6ixGSSCj2##voy{L)`*S<3(8$Ea866?1<5h^guHu*B4_M2L&h=R1xVzdK zKqw9*voY&VEG?`{ex7K74@ z!hUF}gRR1Y5zEts7ndcjub0HxNd=*e^aDvoCJ{8Fd(m!R% zStX>|SpiS}D1CCNEQ7idTOzbpIv;#L2D&r*1+#b9=ada}6W#VXt|8xDsNXhg*K&y! zO+is+641NL!EO3{BMQI<7`#9A0kMKFiBVokaPyimvX-6Yu}X|~Su;5FzWvZ*QqSi6 z^2IG2sM~0`p~baJ?!UEyF5AMyVKC6sEy${e-=BuJH09jk7{jUQ?=wap$Rb1&LO3NC zt1#DA>ilQmL+Q*FR&?gpTMxN1X~L}Xc5rMp3eXj`_C?F8!77(WmC*fVeeMmQrSkYU zHGoGin1i=p963Hhu+|+-_dcZT%;j|{w%A?U#<6|=p1m>G{u@Zo++L~u)?b3ilW<;t zL(kKoLjf^b3})4)!~Ma`)7>xL7FH;VnDMLjsZ-vyl5SxspnVeCKr?0KcE~3z^RDOD zX3(7s0NAsPCvdNVs!b5yjr|Y&vynnZXp492E`|-tKKgR^9SAK9CDIREVQ25=&6~1hA1$R7tNV zwoB|?qn?s&?uVR$PN__VW&{Y3G(9#1)U*MGq z;jZ0Yssj50wTe*0&pY%{FWZ9n-rY!9Vld;>yqej#J*TK{d1ua!FL>8eM7Wy-7NnZj z{e8UlDo7i+gwaA z4{M1nHrkXXH7w?}cj4GvDR5uPDW{EvoAM)04EsnR+YBF=rgM6gIM4ECI5o?Jn<2Wl zZvM+uA#sR_Ma?$&ypts3Ok^V1>>U{p36{9xt| zUxGeiR=~Kbf~s>^Y8S($dhE{MZdY_kU8kE2+C$7zdOHG^0Ft;1!K~g;m&`Zh9zoFs z@?VpFOI4Eq7kZ|S1lohAf?05BD)ba!wy1arY@n7sR7_Fksb<`pfM^~N0u#vO=+ z5gtL!sqHbX?>aOGu3q~2XsVQ$0=xYY0;B|yh>!$R-*zh;hzKa075y~$qZ*x1(?LJ2 z0tXc#m6%$!q^G_E7&&7$g}1}V9uhbIlYC-8;T>{8hX*R~=`?PBd)tBYXayL@8fWwq zF|f?XD{mdu=&a%C$``G!j@65Ju8GA263AlD7`Qs{>;;kr39l zzWsgSlVzYslD~i#L1Rb0=%_5WG*Rocx(>pf-tu&X^iwQhMF;Ow+jQ3k!*CwoG~o!*nbUx`GSjXIoUQ{Js0q`5zdBt9PRE?w=1!@%1b*QZz!wz(i-&k z2dudzpJDaPV%nVlL4PTY+}AEWi>Af#&nwL#ND@7W5^tL0>+wsb8PS$m=;qR4_qmH| zz0EJf>x>3_{EX@P5y({eHT%ZlPfcwgRkr(x3M1eUeo33(d&n`WwtK(PS%B*zr_$$} zL7F^zSqToHDDmyU%Z~K-EW|CVwH~zy;(oA!GU288;OmMFzFXetEPG?KOxGPCHhU)V zsocbYZ=JL7YYPGBdiGPV4`JGY>@1bQ)x5)@-W5 z(B?}Pp>Tw(>0W5*y_t7RuC1si+>azHmf|zNd|B69F~|P|+}EJN8kC2MRl=sFIk=r0 z0zVd^onjtYlKlURtjdeJo-)B>lX6&2^DV+MVfMLz#NybI`QQX#rY3)yzCI83p%|`r8Ja~H`bTU>#%Q3rCN6J@N2TM;e0j{c6;IuAG6mu4}LVw4Bhnd zB`80MRN6=+?1?@LFZTBOcwAKpm8V>@AAIm}30BT)k`%Tp{4IlD43dc7;rH}mJ$$s5<{H@@k zW6|*T=WNd*{Bb%zj2gMy;_sgci*agHG4GPz&M}$JshQ4MC~jZv>b&ey9Wi~xx=kbh z{@m=xLEGTlse=0=KN27L9{g)d7}HsLs%%_obq{QVpPuYbBq zQWnRy@O1sIjU74i5UEvDpE**UEq0odGt0!w(RJ!jb=pjD*9@6uw8iTl_4CBSda*&f z#o)7k(4vHn;6DEk@>!zsmwf*8#_5oDjjfl?{;D&uR}Ty|nXIG~0%ih7lFuLx;aMV08|cY2H{F@nMMa8rqd8}A=zf8gPb%K=lm zN-g(!0to5Mg`DrRIb*j2N7gs++4m`YY%!p~{a4g`GvXA7)y+U z>#8vfz0TSoG|m+SaVMR8{KhBd;shF{LL)5=4>85HT5Z0ZBItBux#H*D8)=Gyl&m#0<;w^M+JoSS?gGWQ4@mqJEAxNU83wJsgI5>%yF8P2 zHTfSHkzUD(39rcRZ!lTMOY}b2REVBjGfgJwiJllKit#ESyCl0Xh=3E{{;^$gKti<9?B$TblzMYNOLCA=j-{fk z;)3?*Wjmx@YTi7&0MRI@b9EK+edN_?Z({OyBA%QQL9hInOm%YZiefrurfV1-_-UWT zj8y#@6?A8XleI^ZmYIkiCa#zv1P`9cn->0clQ`X$#bkm8Yq&j)q@7`mrpwfX?M8|E z4J5po2HLPH6+9BuIL|)sdyNNqE)pmKrq&Dt?klhgN*%@j5>dluBr1L)$rsGluye$< z6*5!?eI-a>6#nzJMh-<4{*S+|v@_v^p*zZVc@8TcSx_4>y7Q$tvBTmj& z)xvFt&xn%9f~FSxUwbzV?oFIbRW^9oSFOa&?N}bgwDebrrR2Geg5{F+PW6(^Cac~l z{nl7}{FsCUf$U@EQs+*zt9*1VRmA<->Ky)6# zb_OkD*}JaIrJHE9MzG8W>T=6X`l`vrbLJ*z(w_=n%2XHCcFbguoz^9;E1ivOR6bZD zsgyhmkv$)wZs{LV+CVnAZvb@?(toR(*1-raV)tq)Q@LS`6wH*cnJP0~m`5(SK!k6u zz5WaUobc+HBijg%>#5|I%_0AytL4?54q97J-T0&?LqxN=nJ3cJhGWsD@KSe5yIct2 zqG;E0zY0e5{Wl|{fp(TsglcpZsB?x>nG5WF<9t9pg|UuOoEx*gl8zb0`=Y16_U*eP zChQHMh2w7`KoF5ek+U*W+WC;`GrpCuT{)2%dZz%W@V*cQYJFq z)MxJL@OmAsd=;Z>o>MCPt0Adm30Lsu_w}*nvh6INPHg%gTh8TNW@1yk$W$X)jz(FR zhV@hF)XT0&R?pPOTraz=FSQ>Q5n8-&Vg^jbtWySj3Ikn`Fw!4UdUs?7UvXiU7~Bkg zqxvs$G9q{!qj%i_7|YlP6>+f z-|S>j&NjuotLH&eb;>@puC=K4Phm$lVcz@~Wnyd3)D4TXa;3)q8|J+l7gh)pL{pKs zdgsRGgig}GMa96O^9G`*jm*mL=_mx8q(R`=j)YtU(riy2?qHk^Tx<5Eq~mhDUN{{e zT>^yo`o)>FdOxup^fjh~)L(f3b&j3*A|xU1X%t7?+D;@9q{?Np$-&chmcfUMv^pep zJ9YP#sMejDNW7-mEp{Rzh{l~Y?*YVb(TZxjP_@GT0_Gxtl~Fr-6OZX#7l?nn2_w+q z_&X%okcb&0?5a*a{&~FoenA)MLJ@n~k)WO?>EqjcfAx0!bbJIKwmp&m+x!)|<;gBz zv=%36Nx=}4v!R*o8h3ihb}Oia!+!iifDs6&ppx+&m+a)iPkPqeg^V&9l`WB3MKQ<+ zSg)l7gU;GMl{`S^z(iufSjeu0K9E?lyUn{T?0M%lBUQ<5rT-%(St6;N`K!^o(9p`h z#cJ@^K3!)R!eJ+UQ^w2!DMD7iJd_DDXkhl+?wRR`P7GD`TDVyZ7Q5M!&Y$65th>>t z6~Cox!4LwJY4+Cdvs^UzGA_9arIYF$)FOO-B=XMD$qo$N_8OkA>4i4LVwE9$m}t?4 z*u}eQ`A^%+xNWmsZJ+vzz7y=MoU1ca#}b z6bjmu?VO4Tw24lJ%`j8o`{gzP83mo>KIe+$yb&;-lCIw|4|_t{DkviIKsTzc86_v- z(Xn%>_MSwyur)tg`a&DcOrE={YRFH~(C*&hcP(a7)nVR*+ZaV3Fb28-&n3s2H{9|6 zok9#Dy#I?>{O|83-^h*q@pvH*pP!Ns=uV@pAiWV7Q8mK<9~d>nQsuW$A`N;2>P86b zB%@XeFPWu)1GdjXxc6@=6}Jz6qr^^V{I73Oh#JOburvy7Ceol(f12h|sgWc;qA>H9 zK(EH`iy2}}@~t-puqd&ls)1bd?LbIiHCVDP7K_(@Z7=XFli45~tpEsK389-DNc>oz znYjkn_uV##|G+A?){sp1G#|^&uLbJUaF>hL%NtbGXAstVjFC$f%l3=?j)lkaJ7#p_ zmxW*c>8%|c2@YFbfuUV1{%w9YXTzJXc-sl=219p#5mIASMrXLBf*FVlWo;p5q?AqUFb%Ve3xhP=74oEQPZT>= z3G$vmUglq@9yk;`R|Xztxnd{HXp?LPn6k2c%J(3ykfxS)pxyBccVhYjk1z2r7Dv<0 zSm4?em*ZG5zMQqWn7ezwH)L8?9eWTEQJYAFSb!+3kf>yJ5sFspbC~f*?|;GRDrO3d z0(qQ*Ip_e@dLnv>E02)BJba{vCHmAtY|`V?>_|{xZK6+k#eTa~_h}Boj!{*KehO_! zYGZfJP~?;dmyY{d$O!HOvK>@F3|+NGQ#THF@av?h^J@nMZ=bcyoruh^V`ehYLCC+^ z+(a-?rViq_Lp@-|VL#KKRc{HP$Hmb7CMMnWFlcmQI9Tfa0 z3t=f4?;JL>AajIX!moIaa$%oiW_w<1HCvnryK4|~kJxq#ulWtOE?9+)h>eD$zs4-I z=xtQ`J(6wB7`^JXKTULP_ZnN?BbF>vZSA?S=N4T^wA-x#`>yL*&4d6b5UGmh7@LPH zFiyCm#HSDgJ_9Rld<@tgD>C)FPB??9^Ybf);7(T}nMq}x(d(UP)JqlZr)&>&h&CG7 z%xo?_0BT9(+m@u>tGzM5>Wo_Bu7kaT2&c()qgnnNXNKlJzsX~Gs0{BH#xei`jvZ&S zhIiATD13sRmoq#Po<%^P8_WPN2oz}VH9Gw=N`eIb96cSIG1bC3+Nk>`Wj9M>QtX_G$ks1ajM_uzBc;n7Mr^_?r2^m%sr^&_RcWK5^cCqY>cs3_hW9DJ1ByX97eOKj`;p?>{&Nt3d67}Jk@ckMjYG?*;^F(^|k1f(B*jc zcF`a``|UA080^QKCj(CQm?Z?QybLfwbbaI(b8Fnh)%Ic6%;Z2Idbkos;-XuO*}eFUhRo8T(5O0z8&p4A()&iUPewD6{L z>mlL$;fobs7(SU^IY>N&pRm6NVQk8Rb*kcL!EPz&_{ARUQ5Hr3xQ43}Hm?jAXX4?D~8DB=k!h|#oE5}D@SD>jI8^* zv|JWb#aw}}4E~FF)`APrymctiJ3gMHF)nI5OIJ+S(DWr3<(drb%khe&0#a2;NGB~o zO&oQ?e0-|vbIe~34ZB&7P>DjR4byY1BR=|bNU%T^YWJ}J6&HE7)C4N5__lQPi^9W0 z28|tJrJe|F+V7PFutcV!yFQ}kx@}(^pVMGMdFC=(`Bgv|X1aSbLS+P0`-)w#ZxTFi zG>Yjw2+4*}M6PyErL5wYuMw~#LuGgq$TPQ-&?QxHe(cb@s8E8SlFcjz?0co*??s0~ znkZTs(U3JGBRBF$K51utJE@B*loE)YT=bXAJ zJjHdebLs$a>=PR2*_U!M7GJYi5S(GT(3)o=wji+(>F_Z8(_zgasW6hM zqrfw7*v_FWdqSWL1LboWLS@V~CuRGA%xMaDfvoyI&qy1?+m^Wi?V$$Rz^OXHD41}b zHDgg94M3$V`=DKQcP$rAD58$lTh8K9j^g(>Q|uUf!`C+%+xVnxTZpZlaHuv={d>BD zH~@~bo(iP5jYGh-8(nziugVe9>S}j+YciiKo8+3WTV+OKjoxe&g_G zh5bA=Gd}h!M?%MQYzyp&=PN6~yN4ZUxwF7cD?R*go^^nSagdSh>`7SeTCzFXkgOL- z`c$tb$OvV2mH+L@pFcD>Maho>XEF8c!tzY{PNzQcNDej99am66)c#T z`y*g3G-VJ!_lMYcg);Gl6{SU^CP^*j`u^DP*2U~RFs;+u((Dq4#BxCXs zg3bS%E^W3f5`=&&e`km;rmO7QH(FzJ1sOP%%mxmD&0C8t#O@rFCgJd(J@eU>8f%+g z5m1mUrAv6v#QHQ5gqAStPgsRl<^jtA_~WtKCS%&8-`jVur3+#?Hgu+g8c!&Ia%}lb zH?2z9K4CcoAy(xuee)XAjVh7Tc5}TBv~I&daxeCkHQ*g2gy7bgOR6udGo(HG2vKMo@}9ZwAm-mQjZIpps8y9zFN%fy=tP}<}^suhLY z(opQrTJFzf7|f|j&(mr8y$kj&04-sR+*5S>h;KW`hPLl1bowa#&=!Z~b-BdW?029` z9pbF~6#U=4XtxnvoVXmNy%qEJK4gOe0vjzFG;>|Vy^HI2;MhB|+8&fAQ;g@k4@V`5 z5}HM8UT3_!CC@RgpL4!zXT2;t7;+s%gpd9_G-A`yqw?nh6nkZo{MfUSDjhd9zolYXTMEuL$ZsbqAyjST7$ozUk z4dsQCjagsG5Av_;GSsAU8jvK;Bq6D2uOE3$H0XS8e+wQ=#*>2|hPewTRp+E)JF_!y zZhQ~K79CR}HTLo#+hc`HQgQq<&iKVRCDl}fiHBE?hkl@Vk0|AlE%jSKQ!!<>>p!lw zTYtqbR$adv8-Zwta-|GUy$LN4K8;cn&0B>_n~gUHA15TDz);GGz` zmuT>}Uq>InGPV32s%2kgh+gFzL7RNTiIRx*QW$qA5@(~goFDdh>`*@!MZ^3|(=Rq| zE&VfX2JhkzlmS;todq*Vq68$!>=o0b$S>laV^3X;l1m7aKPV;LAv0>2?5O>mzP?bc z3aN{Hsz`gfo|b^v(k;@vElIc&Ue@f_5;JB>&YvykTqQMP z#L}%_XA++hJzgd(z$5jsF;urqHpb$?-`15GRMeC|Cl@CnFt0a`Z+R2vYouw>cKoO5 zEv+%CtKmIL<+Q`3SSf(RlHy3?$C+jEXI;19k#yA14_3iCxDv@KEw_Ri2w{B7nFV8# zLjnJC&z*mYn|)h->~)~bS=>9rDLr$IG%loF^P3NJpxtWE@HU7tv^kcR4PJcNOyNP64D{vNT;NflyrB4bazR2H)~J8=X;;` z`QAPDIp5jmKZj%B!nx*}bFTZkf7egu{;2=T)8k@d`}=hEc)%7|-h*~1fy3HCB~spX z0iI2Fi4gH_W8T|yCHPu}n| z*4Ov3*z~)wv+!2cTtr}byquTt0vFh_g?Hz9TbygdIALX$6Hi(bRjh`y8}PgdoLgqR z?RDT7mvzi8+9 zOmO-;c1!DlE*;<;uz&7D(*yIq^eVkA&@8gUR9qoa#f+)IAlIsD}`g z$7X=(x0A{j*>b;)G38I(dbPF_GlokwO_WXU>%M)X(>4~ljBwbhWzZF!HPjoj6kHQmtKemhs>%6_vJ@5c0X(DxXPq`zAN}MWl~_W7 zuQ5~THa-3J!KQ2A65%k_mwO1wpK|98&Yw0(CfU#x^Zau^#t%`$4r^kbY?B(=uc64O zo>ZB6^^krS!Z{kX;q31f35_j8i!J5ASG)TnneZ$+jE^6ZJv&y~Y1fYUkq>Tn8l7%@ z@Q${8klw={J-{AOjNqc-BT2kc!28=Gtt=>!)--s`V z>E2cKh(xyDDdrtBa5U|<5=l3Dr&aTQiH}8h4^I51iBfN|mH2?-AJ}8}lFifD-%wLe zf?WXao8&A1M?{djiHU-7Mpry0Py}H^@6}Sc)E*=lXeLvy6@N9IfZfqUzs@vcqHFrY zYm;oTL)5(ku0a~nh{quztX~$671%0@t%=5GC;ZUt>DK0F$%_O|!Cb+rHsX>D6RE;< z$OYyVi)*5!Hhj+rm|b2%ZMa?b_}&cgI3ed?p?HjfcdvL`e}~jVYz4=|?*@h-WX@9; z6vSsnY$IewZ4oPi`9YhhX_+YfH}*oVA<(N$P228ITkG|o?NZhK!uJL0+jf<$aa$q} zQ@&)CCupN4bOYbU5d{8=%h25O)2`Sf6lrV#N< zbyNe(`BN<@)4TpA#Y4vm-0wh51`Ms=RRRv1`a0SZNn%MOFm5^CwcKh{5wfX`CkxkH z=Ip{|bWiE+r|!9m&Xl623Gexdep+H}4!CXF?b>esx$60l*4!2TTmHoVkjrcK1v1P1 z1kx2~@e0Zp51uy&_UoN$-M73{!ehlfn+4x#+QZIG9!JQBok(Pz_a|aplRTkgK1vFW zJ6#J!uJj(c1nMzkZb_QP-K|*3D0R)i4n54chPq!}?gXGnVGT6|`{#~$|Jg62ih2?o zx7Rb@Ohn<5^iB=;L2ibLstx!au)D{$#r*nbC8j^~xs7DP zNJ=i2l6*A$zBUUmGKBG$0$*@dPbZ(?=_xws7#{pop+vR=%k2L-)Vi!i<(K>nG>V78 zzP&~9;?9?wmD249Oe2-ReXR+^+d5q)kJqAw17CqB{NF{Qk3ZcGF^d#*|D2zoO)_Xc zU~pAq@~p>fcT{KvS=5d=7=WE^M0?5R~Xq3n+y_a!=HREEFp*#VuS_5 z&a2Q^ZKrw^SnuUTm0-mUruNjy@DCQqD5^k&q2Nj$jzZ+;pVaO zw5soV+aFi?IgVd_W)T7}?X@*g8vHDhNRHYjguk9N(;)v&;FY3Gwh`M@MBP3G;=OKo zvtgQBCO&Qd-HS2-)7mcDKrwKLafJF})lFkKZht}r(5}qEG7WrSXMcvQ=t|QJj8pLL zXHs}nAzm55db5K=#KCTifw|fmv`xs7+z(=U`S1A>lI9%9flXKKj6`?*xJ2>VcK}`* zYh?=ckf&4_+91!(6@nuI+oAfD`Rewom@i7Qj$^Q}>Jfj)E4mYd(SHkN8ju@<3CY!Z+kglRs=>uT6!1Bh+O^()Oa=cyQ{B-sb zjT9F4d^ep?TGt5KnnwaoT&dX+cJ=bcYY|R0o{kBIT>GUcx%|yn z_6k^y({jn{WRAsODU+)0KD`@+B}pQfm@g5Z=(}e1FxIrvJW0PsaIoP)=Q~mWOAS{# zEBe_MRRA7g@M{p2>|-f%qLDVQ3O*+2GnQgRlHh&eDaE9ik`qEe8fR$y(gWB|^I~BAfvK17jU!8ZfMHqyJDl~+ ziaU?j(RY{|&Z3`I!-Ms7*{Vs3$T$F3G6u_fQJ0wm3s`7^`9lJWy`;^lI)&gKHr?PF zsQFH_NmUM+`Rfrw7Njj7HcC@=Wcy9E!BD-^ozOt!azL&C&|Xyh&qW<)ra&g_aMrsH zAhMH8Ev25X+G1=0t`z#mesaTv3SondyR53)z5Fgyk=n9)CEA#7>E>U`weO2>H)Ik&qIqZ7qOOk=E!_}an}rSID$5R&tW@?+9nDyL8f(#Wp@5o ztisaj8t+l?6z56g_3qqPW7`6G(hAyt&|Ux*3}wZ<$Qx9JFzqRrMk2{~4UK-#D(Bt9`;JTZUu_Q_bY zB`*kBMC#ikyzZ3v0V?vmvbT!`EKi4Nhwed&Nj&M#CTiwksGu0@?AaQ_C5Bod-u zf+DRcKmExl*t`9fBnsGJeaxBw|6aswnC?{sC9f`7+fo%~v3zf9zLU|;D;W^le{)hNKtXjL1m8UT^|KK2X9i(h`@Kz? zIO$)uHR#F2SEfAE#S1V?Z_|ND;U{e(i2Tmf#}ydp9D)9~nW}&NE@QtrIA1nR`wrhT z=M^%fZ_HARa~A>0vU*)A$e)=n8k^}`&5OMOXBlv7fI~*kuyd%YSa=UoWld57Q&|Yg z2!O7@GkaXnh@C+gvvxr@W!#9Sjt(^PBdW^!+gYeCcH$`KC6nXNaZgeo`Yg&~R$%1u zjwiGH=u3wxS}Q{B%FFX^$W5qUL9AF2hbQRC&2&?t7FhsDsVMT8YjNkCNBg2o`;Pe@ zt^@Pl|2Znax&!L})j$4CD^`gA$B1sMPLB*Ehc*IA?Va@> z(5R8Jo0Mk0J-xd?#3)GT`HlXc{xSHiKmtnao1iUtK!Ef$lrT$rz zty_>Jea~RrvzZ63&zDjd?u`xASx}Lu?%<_`5F9-?LURtWOA^5ruY+Is`x5q>an-hC zi|5pkFzUZJ>)DGM+tOrAbYS($8STxHqb2_WKrGF1m#2B*bw9Gpg2y-5R6r4%DfGoS z%L#^nsd2j7SrrPWmbP9<2(hb)2&HEchE-1iMJ7{f034-n`Fc}70WUJc&* z>P-DQfBT@3;2Z;%;+SlR*U{*brhUdiKgG zWB6f1Rc*&}1MkuGf4b_=N0vm<$^T1U!{GTty^g*{7r(WZr}3%;js%pApGcZ|y7o)k z5%BoH$by2AGYb7`1GhNMRMH4rdw?4V*TRc^ay(pE5-jw7>hCbl2;CnALIHu_)(=rRAnf!(|h$KlgxT9yl_usco%K+3%q@bOUB*!F$-j4 zAWiLki8q|ruoB(>j@LhzeYCLAp&BE!#AiE*L%aH-FeoB zUH$ErbbVxyD1;__lIn0d+>enWD4!+pdDf{m2M%`rFKaYSD}qM_K5W+k;Nwkbd^Nwx zP)0CVF#PA_AVqt87X^dLI-a9Lv6{xG;l_UgL%A4OLq>u^TYkhlY1ga`cUiVq$N}Qo zi?raYLWn!{XzN%1;;B6GAAs}VKY>(Al;V>*w!p3&e$_?^AZ@gT_ABHGr6#Pew7Fr( zd-f_RBk>rt6bpoTs+~M#eg0EQh2Qa-rbg>SjQ=wV9ExVAB6gbwB1zZ+qU(-1f&g^{ zsFtX70Qw-emhUMOU&kqUP5o2)*PKZRpzjCl4_ zf_3ei5e8q9xx5r*fJifoMXfXhui;q0E@2UHjir+1Z)A^WI@rI^)4_ zFAjhRGDPs$^HAW6FCf4Fu9D?*0gdd2qlqSbb#a@n$z9KRmcc!@++2@!vCGiiy%@yr z+DKCx_UKMxR)EG$y8b}QpgxN zaN_dQrS^T)pkoShFU1-myomV{(8jLci=beC*f_My_?7zB`GiL}TZ(`dY22{PZ89$- z_%U_kJ1x#_^Vta%f*26-*-d3dx7mPfUQsu52M(&EIcGMOLf?Y2@zynqe>O2ZI+vOrEL`^l>NpFlj%ycl;&Kowu zt7W}hYqn_vuEMj)6(6|U5ucPwZ)?ACAEge=4~Cv>%Vqvj8|6t3M&L+j8VT{lW61<9 z68dIPQ7Zluj^+!{l8rjaR+JEPZ4bez*H7L65I{#wxi%guQ_)G1x7M84P9osn?!@143 z^bR}}5r!9aW?Nq92g<;5ixgY=gs{mBa4(HZgcH${YjvsCqSx}-C`n%7jeO6ipWBEd zjWpU$0%2)B;>71=>Poi93RSvq?f5Q@qXT7r*K7Y6n~HP)CmiAU!ph)-Jtze)%(3RR zMMajSIW)w8`3e^eriw=npb6Tmt0&-p#6%8ol{i25{p`UBXQq5~CDHfP!vX=lVEAt& z_`g4@@(Z{4*M%Dha?zr$2(||nm3;Fs%U_rbiBra(c311zA|w-7!s)RvD5=PFSBLXC zxm=}jre~Y=zNk0;6!M_Ki%-j>+mex1Eb&f=j&L;dZh(+&|0P=)bj?gp4;Hn^VTrio z5Z8YLd>tO0LoeGaC%&xg76x56sUmcj{zm0)MDQ!XUssA}9uXk=_ zjnqphLk!rNBLlF(%CI9h6YqO)&a>PqT}$P%awFSh{D6v|2NqvG+vAzdg|J(fzpo%P zd`Q9l0%D;|ipkycx6;6_B5~lP*!)CmbAXs8St7|UQK14IaXVrkv6o{RmA?t`(nC{z z)XD$U>I{E0-P9E!G+sl+{PJC~yP>Lz1y?_g4|-$J&H<4hz=rqmplc3IQao<(ITv8N z8)-Tool50kLj!NV#a?~iL~UL4cA=QfVKoGlnm&S-`ib{j!FRX%A>6%|kUTUdx1S@f-|pC-piq;11nG0}ZF;jyo; zV&6WJECb#osXpxR?(D0xyGN0HG}BR4jNy#&cPYzqrdN_kxgFUnaq#&QSke(U5sage zQ_<)f8Xix>HelI}dIVQDuw-MVPY$MX%S}KH zpZB%GrSW^S7_FjblfdW_=i9J;7Igq=x@61$0JKX#!WKpo?rNI2iJx3i5Bf4?Z>z zN{Q8f;_Iv56dmhNjpIn|S|4YhQB6@drn6+^!>IAoE z9iVYn7pUthVjl;W?kwzYIK-@FH*C7-UtUTco`zZmzZ4somD-kY*)avuSKr|+jd*sg zN5yA@&pb9XBS<`r@W-f(B(m5JoQDWz9MbVg)l43>`LGBWM0l@dA?6NPx{UJ5-#;W>5 z&JKM3YK}{9nVrf|&cC+NnQzUR zc^?JCB9GsGKU6tJ=YVO*)88vl=l<~)y(R*@MNy7h@_o7Ww&v-LG)e2bRkA`$ldVqC zdx{w(18@>S)V`_`mcCB&{WOER#n z;pG6jL%(L66h;|t6#`qjr^b3?35|C?_H)plU zZw`&KC9=29WqjeOnXG#U`4g`cR%>37{1IVctKURPt495v`~Kwh_n;EL^m!B9z~em# zRGKvG%I00AV6NCyHOlok&m*L83W-ZWQ&i}NHWvY>Mw?R{&0$|IS+XXVc<1tK(qGP$R6=_d5q2RkyVMi<1@IZ{*RuewiwE0>*|2qr7SR#;{ zZS`cv4dy(8iSTuY~rhFQW=kh>QyP8YW*oO@ro=iv*eh}W_Bq(xEM zRwfPB(^0vN+qY`poC{AT@(L?`lKdgZ%!+f(fB_rT&2%x(!qoI#_*VW+(AtGUv_%`& zA8i%Z-j?dL;(Ck1i#T~R?LxopLL%TgPWJzxEI{bE+SN!mZdZ1}!24hr&bzN~cgBNh zk<68hCxAVTQz#@39=Lk|Gn}OrXrLrOqKT#~a7}d(JLT&!_^$}zS7UON0jp%&G3%YS zHXcyzSdw`Iwr-m-$Z5U;^s1=pYx2&x)5-!=QKX?^E*WV z%>7nK#VWBM9ff*_)p$evD*pcHU4*F{34B;7W?d`=7>Rc`Wbk$QNZY6a_>go%5Hfub zs#6;FuiNoz?Ab7*z}p*?50+ba+$!qHF04oJD1L?7H=>C+-)KMvulU8J=HaI5Iq_lsAw$>}wzW@90%sL*6k~{h7iQa)TT5zau8S)sp@+ zm$ce_B;JdZ1h0RD1+Kz^S;qjc;i?SYCRFTUQK9iD?-+9UjPF+ZBIDLvM%9pSZS8f( z1HS*mO7V8uGTq^p-$fz^pn(edQG}{sEj3^vQcnrdjTNQY_1QPQR;y}{WIIK0t8V_K zU>G=zAO@~9v+lBboht2;0w$l&>(Wq{k{7VIWlUdV|5N13=o>xa)RIzW8N;!%gEgFz zHkm|uBc-O1z^xxok5PImV@D-UsuvVDfsPZEvh_D$D!2n{1?hG^|oFHjiE* zV{|z9h(PowXxA_4=z56Yp8>GzYG0EPmH!Qji)P*9*>=JdSQ=n)Dz}f7jXU4QE_@_~ zkFueobs!%sLu$!+-Nxi8pskUPV@p9XGoa!0rTeM-fpg^duwORs6uYaA-HV z^L~9VpUS=-L<#S#U&c_C^q5-1l03a3*QHKs)(lzU{GsDyV6QZnqyrKDPW%2wfV*KA zIrDN*0G0(*X>7GE^gRnkvB=LgYGm{r2iWvl?^$0yS?{m8*(r$4GJQcVMkw_Zj_QWA zVFBc4X}2Q^63W)Hd=_Q+NwI$LqY$Lf9XWmIyyT?H9Vd`WMIA?$hkDDHsBLf4m1wev zfiP^it>pINb`s?=G@jYgPRF#5pO$ftWD1H`?7P``jq514GIZ_8x*!=u)XY3@3g-J- zk8w+7-7VzpCDOEVfVXwT4z&-j!JN(8zz9N9pyhJ~5!vbWpOPYSzw@kSV3iFdzWIVX zt7zwGU55SS@VZ9#5JrD30k*?6WkgfX>llcs^-R;`jGNuW!cc{}|XJAb;#ipl~pS@5IE!!W-qf}`3tVb3JLqQf)xgNwoJ>%FS^R@)&l31((1t@+zj0j7jJieUL5anhZ~`x?)~s(sBT^7aMYM%k@fmtN& zO4zk{^yX%XY8cCPO&1j%I^(;xv!;bF>-`ZA5ucMB(EmlEv(My)6yr~QaoPU^S54D9d=Sqe-MqKl*= zkz4>ty-FmTrnm2NNl7xHJD$r9)`7wWz@Pm_3rgB~We17)vpyaS3(W8hdiwnqSSW0D z>$Lz>rmYfy-%y@0!w4c^sCVI*hm$bXt?ftLr(oP$qT>freHlm#)sTzb3s0>KDK|JJ zC=HF^z=ULEY{l0u0!#PW=jKr;rD^lIW|;?7@M(J0HwqL@5I@%lydV3cT(%P3)TD4C zIwqFO@i)qb_xguK#Hk){h5Si70zS7!uL7VoRIa@G_Aod!(?!p`FK@mJ_Fs^iL{w&7 z2e`v&$rYyn$Kns}F%Y`CaMpR4;kRGR^s(|GxJ@(pNS5ZHKSim8c&?tIoegw0yxx0g zK;RCDCfE^XN6sO2l)i?^f8K977^`}^mg==Qepo@!Bfi%T6ccP_d(hSX(?y9zr% z(lFZo)MyN|2MlAH`ek&12})AC{7|R6SGofl*B`o|N^0SO5&t;$EO6uU9h^w)Ptf(l zX>309J<8(1p2YsS?#!KJEiVQ9B7SZnz;{q~TlN~aYlf$`1`{=bXZXvwq(T8t-_g%3 zL*~IN#qWy}lH~^YNVXl95m1ErVzm;Mv|fO|9qQ@GcPS`*zuurl)x-fv()BZUER)N( zV_pX5gRV4ULAv_Sl`d$gvOIrI%QpGMVfPKg3e&C?*VcexkZ$KNk&CWYMhalPJJEG? z`A;A{p5mdWr32&aYb~oQ+Psldn9Nsj=zy=p{ZM+7U@Eu(ojrA|>%xIj#4-PG5j)ziTX?`4TfV2LMj~Gf!@EalreA`+VgdGr6?)y|QLZQCi-LX>BrFeT z?@CdyC4;s;{wq!B2-;c773~~yvFVP-`&oMH&=I|joYs?Ip3@tDEUoPd6+jAtSnp#} zQvuo|9^uyd&?ELbk9`dv_@Zye5BOAa<64<W68}dw}zpzcZ^7VHA}P%iKI53@wo{ z4o{1Q<5I%o!irrJU$)@Ionl)_Qy~3~36L$tE!=M<`E(6M4nU)Z?_~`vi#TsvpD7p^ ze@$6RNYH+NeB9grwgEt;NjlAJr|MsvZdyVx z@h#+7z%=S@BQMEJK|%HgnaKBJ6NO-ruJ5h;E@G~iUt_2@MILJ+y9W)ZC^;H`vOg}U z1C@_7@4L}Qq8M^jD0=m_O{xJWIrTSkVc;;&^nC}hkvJT3lQ+@|XGfCVe2+#@Jr36X z+9eQ-(b4_AH7(SDI=Uo=ecigXZuGG}Q9O}3{@p>=Wp?Ql?T2hGS6cj?X#e(4+xN;a z*$dM6ku)*3D;0acw$%oFq*v`w9|CWwUFj~c!mjqM)nqhD#c!`5Q)ZfBmv*OpazXo$ zsu08t?4fA{501pPxB-hPi5I*agl(4`7ptnmxUnA+;myK;XcY`@CH#A3vWF>?bS-dS zAme3Y^(vKy>?Wey3&=Ayf=etV#3FY?JoY=-%t9(Z>eYz#PIA-#)wMwRHm<0l{Wb?{ z@II@B5}*;I+dn`#0T&H;gHlARq;D#I0Np~z9!^Ur01JgS7Dc|>6xJarZ5*Tly5bn( zQeW$H(!Q%A0P{TshtiU^R@qZTzDY%TZEJ@8 zP9a3ATLI+(YiHzA0bS%JV(uwiuUkWc^Btpvbs9qI7`qF{uQ#58WDvvi={{!JsMN($ zCer)fmjgg*Y)_hIoHBaTU`In@!?CI2D7v2RCIa@;5jdkgz>o^gOq|W2>Urn=n#Q)T z8QoayRB>m5^-5gIh%U*&Gd5k8V~BkI9`*tO?KOOu;>th1l-W~Fth;0|s-unPxHx8>`K0jd-08vz>7KVw@1rlnqNRP3uaKe-kV7&Y~ zSxc$u5@kYuYv`=&3?w82ey% z|5_gk1i{bjQon|hHc7A7fgdQdX6+!&+cZ}aO*-K}WG!Ys@!&?Uo(yTU`JEj}9RZY>JBwzu7@ zz-s-?5Zj)(8g}i^NvCfwECE9iJmQnV zO#9sqU)==u>C{RMbI1$0FGn)~c2UtiJ95|F6ncUI_O(bN39+TN z3N;m2!s68lco=6Xlx*Z+0knFe1k(-z2)mydNg~EG4vb*^OUm2+f+xFRUSst-S4ptb z76I-)Ua7ZL8QNN8AMVr0!q@(V*$ObTE~luE%-(25m;BrFao^gy>M*f| zn_mv1X;lU^cG|X&cgRmOKO)>XC(hk8c337qE?8%eJ!gZQM5f9GZj1K@QwdXR!!MU` zBc=Rf&A$Ro8$e_4_IfCHj`X2moEz736YxleY$8jOauvuHII9Y8-}9E_gnhUOh8HWC z62D1~^1OUWI#f+%%DNvXC6zmq{f3AfGrk*W`^^`i)Q;1dy$>+Z8+2<1LQ$EL`LaaZ*v@{i`XuU^(9vxh0mJ8%L}KO~)R?PpdD;6_s)ln18fb zyiU#zXJ8!a%5Fl+$4;OvSwESQG@7J0aPVbc*Vg0MM@ExX03NAto?3tlm0G-9rabtmIk(DVfTGCFyP_A) z|B~Zpr-4_ij@OzE#cS^#;WuW=xnzpZZM>P=vMmi4?i(EA1p{=Nv`b6h#d!U-N3--s zkwLn=B5^=x_eJ#pVCh~9tZ+)Ieqa<*}s-jc1NDOj^pl5tBAjG23Jhg=|F%z z3D*+WyCEES*+|qUvFlh!(QP;CF(-+b^}pA`Gv`p-?BR&`^5*jkb@l`*`o&7eN8*~4 zga{-+p@b3Vl*Gjb;hKP4T+)ek?NN$vanVZ0M{@&F%T5TqZp!`4G-QR|aBLY0^A?Zv zRY}q)<42I(RK4s477FicEcW=r1p3XJa>ry zwYF(aNOEUszQU1>{Z0A7P1$;@t=H2NOFy7)glk>U-Tg=ZThtKejK82v%b>4gOs=URvc1|vAS|@PNf24C*Ca$I|I^vL8u9_O1 zF7{jlQwh4W2l)RAqx@we+o+q7S9+-tF1550 zCM}dfKPgZx2hAsRIcIls4=7SBJ0k>*v-CLE>7$N2}QSt;b^rL`q(asc@N6z;5V!X-Khx5%MtYAk+{c*j!^$vg#ESi?aJ+% z(7$`fR~oPoIna6LzQ_qF?Lg0BNOvQNyVU0u5+YxRdu`7mn~DIC17)eN0~`p2xwkp} z>_Av&evI4iRgB^2`g>ozP`V;HL1^Hy(X*hDy65gj2@W)fm^i5I)3^0 zru1?Jqg1dZG?yZ^%QDJ9#E$PVp;bW%e`>2A+4kui6HK8IfPiKaC?y2Y> z0k(nvkF5{#damTbFU(9hmiG;Bq@_v4ZB5#Kny!;OVLVZG!TRKk94 zU%nZNG}F=k-$jDhQz-y(T5@S&-ysbA@A{WH1G0N8Z%550FZlKkd~v{(nA@AIu*0zY90jgqGu3b6HqT9(B-v%7$|UNt2{chfNm8*9ZG4$J zI}f@3i73ix7`5@IfB}+2ErI_d%Ago`m1LWqiYeEh8eC_s=UF|iPczBoB|4J8;RydwTFuDX;(=|Yf}e!XEW zCi#d3PC1oN)?J&JNYq9#U8(%uLyQ?xFIOwKd2ZHYPsh+JN@a(U0mT`yiz$EgX6!M# zWM|0WrvXjMeduq*4P-a1=^dicy3%_12rRJM#6+QeUjX{ECUFVMeP@w0k*8r-(|F$r z({iK}FOM|`HBvm6$%6nv$`BL^7lv1E7KS@z;m=>0UvkwF-y97k1?nhMBUTY!NkOB! zHNjR55*IpdY&9t7cymbqRLBoOl%sG+?Gr;xtv8ne_3x#fkOTjr7_{HkVnqqYkc7}D z+}gOPBLb*C(~9OTaojvmfRcSqzr9$V(N%9d@L%dY@c6Zk|^F)9TWX3yKU;Ss*8?a9{#LPs)XiK$ughV)ZGjg} zV*K-iq`@Xau#HkIJGj&pWvv>|i41{QT-ls2$1L1TiH{AyTz|&LjDf%%mxrcI@a}Ov zAaV>1btL5rd7jtv-k24-nwNi`EmzhhmxdzVT1%&8s+t+GlK4hJw? zG|s~s^2nFYh7ev17~_Db50&~)Z?VZmv1#j3nut!bR3abXE)05ZB}s}d)C77_O!$Fq zg#VAz;lGW>%WPjghFE~N0K@UAczIyv|L3X{wW^L6;HxpVRL`$UN6rU?D&oDwz}vpt z{+0tW7AnCwGT4BEl{XGa&Lx} zVDv#46sK~gIaKGa7wYlIE6@!%@_qZ%edBV7@1ON}X1d|g&&dp`U!x&!;7hYrNBVwx z0p}QER)9}n%ndfRNv#SM>X*aGbX`FDmeB0SVHF}_Gla?Jfvb^Vt^#B^oL-eehe8UY zw!z^e#2OJ{bvpBU_OI*q3QR)!o&wvd3x2YOZ6st(=)V1{LVSsIv`@-CHT+;%Uk|JH zxJ|vjn0FDt;!fOi4+r{qKB_!~$R+3S&qv=KhsE}ZafyQFN!tjTSDq}n{DyTb#y7_o z)Ay&f7(sx^?k6|8Y`Byhe*uo@&}UWw1T+~Ro1#yz^YDw9n1fU6UFn+iZh$@2xd+q3 zeP2ajsho&WB=@PNnRrh2#b=yljtu+)x(MK~;L%`HRJU*va0)?j?2I_Ma^rVzDCx?u zu#^*2zj~Tcf0T8m%eXH!yCOO^;-2*TJtqS8UuDoTIWZy|^ytgwN|`z0pB4V7^`c#^ zSVK=z0&KH;BJMWN+hQS0H}Or7fIyJL2-}eDr$Jw04af0}!C9XXR#6*~Y9d$y4!&0? zlq(A<(41DvA0tc0&c0bG+-e?aDdfD$7(V~w$xBsFRQ>Q^PGM;;&FC^%Fm@iCX&982 zHtw8e`)u-4eLypGh_h5e31^qFHsxDO{mPL4mcbgu7f=AwNC;X`1g3Jrye{nfR^FG4 z0X>vxXEpuM>QAl2>YXbfb6C+1#Cd7@sq>$hz=`=q6!M_wv^VwTc?&2Ct8L*kc6sM7 z_F%4=(5@U2rlDhh)4{rq(7HG;zHZFvLiJ_D&^C}UV;x8?l?Qi9qhAFL zE!KA{s;-BC4((Q1lMg?kF~U}4>EvseK{0`ofm8Z%fOEie!0YO1wY{SKqK_VmGic(} ztem6(#|-a+R)jjvn3>9FDO4!a0L?(5-e3`Iy_b1-db>80e7_ga$ci_gH|^4_)u6Rk zIa!`s$&)rzr=7Y?ymnY_5qhL|ldq$+H{0FRsQ$s=&1-N3_Trr_Gx37J9<|n=QX2>1 zE7*9|7jF*tbs;f3XXiF&ZoW*S!ml^cv{hZ6GKNG5nVz65)(z}Y19GKC%{)`mC zaYS26^m*vW$FwY@PU!#T7uftNinISc20;Mt6vMWU;NC-YYJ)e6J(}fo2Ab$rmTQ>1{&HD=7yx1OYrzBVv zNl_IVZXiAeR?b^)2d_%scMM5?-&ENjd8Qlq71axcAO^4 z2!a;hwlZvZ`Fnh__LrpAhr!79-wl<)_iB8ODPg+>*!NmAwVoHh6$GhHwGRTs%UR09 zwqBO!8lkB_lqO$?Iq%gLzSM!8yg@T^r{*?xwE8_=j@R?BHu(zzB?y4$KZx>)OCcof@b638=m`5x4iyWxDF|^Rl(J~)BMNmWQ3>g zh*;utXGMNv2gVciSk8Nn(&+T0UIEtF9-xHIaZfIh3f`1~A%lgSh#AEnF4YhVr4pZ+ z{FiShNaNP`R(%%(z-X~T#YQ|5dqvX}KOMVaNHw}U@4cSc9G_HlJ0E@%(_H`6@pI~O zovY`Kqa919dQ|URK^dDqGIfi>_2|b-=4*+RYe(LAoTZ{*2QCTTo^%?CUY;j@-De2u=L~nXFQ& zHMyeSyg$@F*wUP42r64+^SkUuve_Abaom8fTE;v=H3dkKHgXnpV7rgtNXy{~{(IyU`=e8X*BPPIvl#T0=) zJQst0-^opN+t0c8zHv~$*{>0=scqn8;BCW9-h@yX0P<>aQW6kz73sW4?_)hIK?$a)Z!kl}va@^@18pWMV7YHr# z6z}YM8Ok0$B?jEX90iuX9Q|fE6en?^UA%fyB)WCD?|&-Y-J1d;=S6<89I1`d_U)I! zL28<_%kmr2M(^{oFZ_pi)w1hnj2yhun0@FEo-k+$B9%r zb3#N2`aiMUe|AX-0IiXLuHFY|8gtNd(IoLNq%0TUZxqkA=F1z`YJ9>W1GREZ-c=f@ z(6B1~Hqu}jQ7#2FDi+?((MZk9TvtL`T8i(Ra!Ld5#Dk}2`0|lS>h7k&)qeZ&OhI0R z2kvEmckg^nXyLa`4Vf5A#;0?H`!&zHg<5Q9@u-L!jC0bg>VoQp!T$2qx-CV_$tds% z7<6&-xcW8jk7Sb@EimY+l+cIb=^0Q`$2zs$37k*|%+S@I*n5)S1|R?Nn9lqHH8V9? z-bA{Jv|dy$e_0SG+c}6vUeWRP#yZ*qwD4k6>f6?MKKi$Z%6LO8n4dZlo*WK4zNaN7 zA5Zm1ey-6T{#*t^d1kV^@apS5aHLH1-+*ZlWPyEuBg=GR1>>?~&exouHKkOdcK3kNn(PTgK(cP4t znq^nl*pZ8f#&|ACqSgZkJt*SMD)X8Az-_Qgcr$N=w5LmwSE|1wQ!0twqVXM_U`4Ds+)04ww@)__qDS`iE#b_Gd+(P?H|Y!Dw%Xy`XbR(KG*Fw7pe89N)6&jfUXv znqVQgI|N8@Ng%kpyTbs%3GNPqyAQ6x-QC^Yb>L3++53Obx#xTR9{Q!bmsM9+t@W!V zZN3}S2|MivNC%6;{LusDe15d!w3GWR%x`DT-O~P%U`J86riF|E_1|!BmWaNTTp9~^ zrj%M<@a`?2%`{AlDk*Glv1r=t!#A>RxYkzFOKxQb97*L2UMOmPP>r7C-G}OrR#^b1 z>0N@}T%hh-D}YcWNjF1by%F_vKl9h;6)P;=iI_(xDq?1$m-@a9&SxC(7B=CkCAwo@ z``8ecr6p;Lf^tq{MK0K$44S?7Hq2tisz6GY~^Oi7IF0(n|bnD3HqCNvJV++JhSLpG5P>S3p6He>NR%Spn=WSam zxBuFX+-)*SJLx4H)15?Ni#P4yR;qtnbjd(0@wMjiSr5yUdZS)AGI|GmB^^vn{Nt5t zW6p;FOb$Iy8^n4dbzcC`03lmm7FB1OetTkl)#*L1wCc1r`z6O)Qk~itMvRD@K1O#B zXEII{vGiXRu zhjFlM(}~Qza5s9-eZEFfUC3tHWtCT`7e6YcFgb%|#Kx=kX03jqE;eY|odIy`d?!dG zvo-MAY5RjpB8H}SJ^x2QBrraYb!FJeTPg zI89e#zRm}PYg%fYTkfHnbx9oboO#GV2VG>aU22;{EI^`~5cvaK|GBUNOn4VSFE9nv z7em;zDeYJqalzT`OI@k@j*ioVVOEB4#;Re%FQOUStsfX+F5{qf+ka?fk4p0=C^F67 zo>Io8iOs_S#-E9qip4%S+otxz@DtM2)OK?FaK?fRe>|&g(2}zbWh; zSeke3TU+^}km&K9NqsE&cr(P=Jt22pDSva=+}Fh*c%IIn)V(I0{t@ga!>g$H=;rVd zGvf~9KD}C7>DIM|rXSynOGJW+4|tA16H}-LBS`)O&MTHF;#NBYjQl(BE@?mgT^bY#&upj< znrv8NcW@9gcs8LrM|LEIn`pG(Hr5?ctGPMLUa@dli|puWhOPc4rzRyeA%NpH-&)WV zmGs#(m<8NrXAs<2TTz)HlrfLqjLq*(plln6aaF4ogsp2vVQNH@g10o{iFYWJUth0s zN_C)Ds~+r$P2E8Fj3WFwA63jhKfzR4e};oY5>=Sc=cgonFq{lAlVt4Ab~Pk|TD1~0 zBsZCo1s}CyE9)=XoN1IeHA*!K(oVRd}XuE^=ybnxgJ45UvyDo4VWz;rTpQq2cz8F`b_U`DG1 z;$sKP1=c|-ZphidK#M%XmZ8*nYqUIT>tZkK!e*=D)9KX6(eDkvg-`3p-_4HC8p%wa zUFRDf-9duGhlOHO0M}vb<90$D#kWAD=O-bBP-mI=JHjJgOZTdE`BN)|CCTNR(my$) zrAHoV;E&fH3f8dJ$BqMcg(#LaoX$&;QrD}DgA<}&FO`N40GM} z%Q%i2*p*uD8YlFxwF=T!B|eNhi@#3U^fmvb&|Ei)5G`}l56j7Hp*D_kP1?hU_$MS@ z(*8U#AEd;m@g>?64LttgEpmq2HyL%M@6KL(W;QKlB^dOVe$R2@+u_;7HfN^A zjagT!j$i`KsqLL%PXML5d3r>7Nsfgb6gU;8dE$Ig@2ObT>;%=cN6Jv6y5`PChKH^| zC1E-5sB}P6Fe6z{7vSZUl397tEl_ss)2yU44`o(7b=R+CPqtE*RDSK)R;7y$m&(=7ih5M^{d_}SE@U+{J%t!N;K4ztA3T=*DqBtX3ZclH9D!h{LJES z4-CZaGJ5=4#!=P-J>C~vB^)Jqo5j4)y@`)aV4d9#()pJP-eimp6_FUkW~jGLjDhyc zW8##pl87xGN}Ci#Iz;?s51ky0zyI`M$VhH$Sy*(@$Qh*3oT$b!PCGDa&zMbR5V=h+ zC9d!w<;&WW#svd#a0BA_&$!Nn5Xtg*&X>(GI@bz|8ov&^SIx_#)>`sUpmEIJn#nrL zfnFnnMEswA*yk4v_Bw!PmxO|?Wz8LX!9Qf!YQK{UvxU4DRt?)#-4+h<#<%#s)5V2% ztk1s`pIAju8PT3I&0?UtBBA3;c6490FA0PAW7JY~*|-b(ibd}Sv$FEf46QD^+*S1V z`uZ_s8KMk$K+u=jBOZe_SJT%n{ZSW0eFZ|LIl^Bu_%hW_C_NN9c5j>0z}LbtA|ek| z|9AI>4D>|kz4@Qre5Rt1YEXtwHRJrdKx$@J=;c;1-FJ(v%wUi z!Ayz0i`O+a$ZjFidbKp4627B=G<~3Kb^vUl_Znnf-cd zOEevt{>}EelFXt*=2k-_?P|r3(yw! zRNQ`?dY8S=s`G1W#!62>Twx+&6f7HCl3aB5b}h*P7{>0`*x_ny<9M)Fd-3)ej%?3E z_YSVvVw_L%%{rQxV&1C9@^sp_hY+RlDx|IRC4xEgfqFjlhzQTwAz(gpqWH zsb-;I*omSeLtK;HdsD%?QJDIG?dAByr_jzH)g)_82-clsXi=dQ3syN=zUr&ft+KUI z0w`b0SwPS1v#N_26=*YTO!kZEi(6e?)pH#*j=J%z{22ajonVQ|O_XW!z2s~;7oS79 z9M|@dVc<{B48o}d27QkpcFgf+7(NRpFd@!PWZ1jLS|B(GBnJnN9n&$y!83D8QBfEO zomy@uzX7 zvmB+2IlZ`UAiSpH;g9I8k5IbCzU$Z@aMo(Y(^1}`f2g^|hsNxdqD(8Yy(RyaEcLZf zK;tMdL`?5gx)O0gI`N}qpD`yLOV%3Z_Zq&T?&P!$4-DMA_P@Z@UV{gB@ z@Q_#2p%xfK%-M*Bl%=bRnzerc?J3l>Bj01j&6c!30Bo63jk&%{N`~ew+O+V zQ#QeK4ASN3e8!b-iS4^hEC;3|XhCHlgML_q)LM6%W9mD&=5el?YE`&yZPIuIc&=sN z$gb$klGibBm5O8ig!*hqU0B2sp5>qS^uy<`v&56= z#5V5Lf6i)aYvii=*YKV~#SEkvw54v245`9x=azm=-r+b$y$+K&qz-+h-^rfOZkv?$ zCuK_yc_DEKcefT8?n{|ooDNb@Y<6<&-3{!uy3qQzLlS2bv4U?rBLCO+kWWP|1Q0n~ zq2@lwcdK0r)7z=zhe#+e0|cA5Z->;yeV&A>qo7a>$GScj+_PG>rPw3Qw+9vRMVR!i z`wbHA`JO}RwgT3B+)uammc^|Dk8y|neiynuUY)B z=8Y_L&GJ+v?ZYApV&w06X`V+!yy$fWpn)ZpHH-=5YE97E+uI*J9xEne#TI-o-cRJl09(ELGEmQAaz;XRf(~k~>K;})I|;g&(E5y( zo_+i~p5CFtZJhNFoLi!AcCJ_A1I_ndA0rLpR9U3J#nk7IM&l)Vv4t->%Q7q$B~6f#vq~R|4TZ#%EUUOa5nqf*~y@QeUSX>vznA z&+Kins@!>WU;&r6sQMCZUchR5V4}?32Dk_HuGe)z)TujXnF6wXW9BhnxAobBMPN8$ zVZvk;ujP=NI}Lt*f8*FVgqb#ZB_TI$xkA~i7PtSPJKS`?D$Al-gsWXvc-Ml-u*1yY zcs$34HT6~co~A4*0y1(9cO*_}=lzB1Lj)JqQ?oS-8j(uicRt(3V>z4|cDp@`l=uU| z;zUYoe(9pv$-12Q%uz|ZH8#o$Ttg7c={`=&FV|0^`v@kZL}P!-_*z=;scK$*8XqOo z=!4-WF@EOOqq_O%cW57cu#m`LeQEZwiMR743jL4kA3MA?0`b}iZJS&P%VhS6iM5gW zYBy(?ipU{PSThr8s!>@i{wu zG3+V&1?-p0jE=G5ZX**6>1?l&w5jnGv@hpwCOQpw?LxL$c%Ic#71qlWX;yZWu`dqc zJV7n&+G{!8q3v#&J*$6^a{mz;5f&T|u>BTf?d~JPu@Y-x!^7+!|1Ja@s{o!yHS+TQh@(Wc2BeRPsuO@^>&64l4IlCTMt(Zm!Uy2LG`#O zPFJa#H*n-e{gNVO{n8koT|lJu&=eA?32wlxjA7bCu}HIVKB>!BvJw0ASJIqyF8www z&3r~LG>MD0+xD-TR>MqXw+S`~#9qCj4X`JTfH90g*9j-AgIM+m(8b=r8Vxz-M2eBwmo#*#Al7bYrG#g!?Km7y&UF4wv> zt$dJc>1uh-{)HbDU^}sDdq5w}R#$Z8rMoAxrWLpJx4eqc!qNS*Zy&rWesG^zPT-A& zme%OCdxc*~(WZ(DiFx3eb;o?n!FA#GZO<{uyU#pbm3Ax=uL^ROi?#a!ujf)+$cH(gEn>uGoXhDr_*t@ouaj)5|!}cYyu{&^jxoiGgks4$W-yrC7%XdSB;KD?7d*CydRUMlZ zS%H2!C`i(BCU0lvqeVgu@{EqGp!FWKK8#R0c{Dz->F0@;E#~`$0^_|6`efqT`g<@E z94V|sl5%y14&){r%)g$&`Jwp{^dQ3_-EcE_NlQ{R`b-`(cyTHG`whFwkMAyFM6mfj zyR^yg_$3P6e!!@GXStrz&JEeyA~1Oed~fvx2h*utpc7o7bNUIR7CR{lv@faLZyCq5 zkFm|)cF2DMBjQO3sn0XeANf*S@1bCKFz~@8iuHN!e$jP1we%&lo~ti9PDNmTfy(K5P5rKv+D3NBZJIwS(o1WChuG zDz`QqF}ak#ReXkP6Aq-=QeM+vA{L?#0#BOE)_SYYd>|=M))PrL!p`nfLu4UsMIm{!q^KnS(K2+ve5SAO6q0R|B5(5B0?u)pCxyM+=xu`lZ!hhg zsWN#_%27V_haM^>bCpWOCCoOo^4xlTUIr${6r}>kqj~GbycDE50CkP}p5#MEjt#BP1GW}>x z;@Vu{Co3Ebs2! zeY5McxjS9$PgK?4o`z;%V1@hSp794U7*WS7MC^^{>zx*Asp!@~Q}~C!yBDoN0;SIj z4ZN81empQ~F6oVI45Hncu!+q=Fuof}qjYz>=mrlMb^HfN1>4U6npD+>D%*1*7gIPp zx6n%SFEeiG^FH|fn;(dIZlc6Up4_-noqc<(IUnBEKiB%N#UJOlK|-b?!(9a)G81Pc z0MhlJ!_$-HeLw8A`GRZ=xURu7WG)j6UuJjT)zL4U;1?}fg{6O_b!T#1AKJW#A;GIy zh{Bslure|}wBvfAObQ^@T*#F&Qv6<8NCxM}^)DsF(w3q@eoGY#G{LC;0qzt|eEgBQ z@w2zMtotWHR})+>Tl2IK9!-b*ScKlkcZ3Ds>z>u?ZsSQ5uCx+&RI_bo>|elvkWk|S zpiaa)1Wb=tvu%LO|H`MD$-$CSX`s-zVltk&#Fvy_Dcd>Ik?>cWq^`8~Om^&4Tti93 z#9+SUMk+x%r8ZYucs2_P%1VaR!KG{h^p$^(iieWAnSPXoa)`yOMkeY7bm3jz8b&D)yC51>?G>Wlvppp$1QWro|tbDvw() zmh|@jL5|GE>)pMhqntXZzn$Y*_zLH#Qf2=tddCkx(Q&A#=>N*38QC5FM9T13N>BXS z?x!Z~s->-ccz*)pvj4S;TI?0geKslBKBMlJnk~MT7UT>-HsFZ@_DH+=`P3|~s;a7% zCwyEZ=bq@UZa2?-k3whR<{WeV2BZP+gxFh4L7?U2y@46vC z!?go$?d>^@AMK=v!a z+gClUT=dM<+Q=uDAJZ^sbIp?AH@3e$yjEs7^<4i#J36gFQK|m@FZT|aGf=T1-g_Hz z))4#^SF?uy3P)h=#qm7g(6gfrlfvVpX6;_6Z0mjNf93tqs&yjYRrb}-QU#06Ah%Vy zq}{Le=CIX!B4c5&yU2q+AiA(H?%s^yH#0{xJ6q(4Nhv&2qcEYYFh2ehDp_CEtBstH z=WBCl$%o%m?OFQc0W66G^-PvB4m@Qc65y8KvGbQ+LfATzRQ4f})Yv*EvlU3)e>4)J z&bhI84Jb{CW3F2~(5B~)Ly`rElE!lok$;t=A5$GbI0$Cq{f1nAlyaOUw7j0v%@>x8WFi2&-cfoFDW$AawV& zmrbMXo9%$X=W7Fd_!SQ~2QEuo&#@F5J}qE)R!5yS^(~)8dVulW2AudPs?QQlT|M0j0_;uT8)M{6F1ywG^@-eFz1qJt!9L}- zjpgM(Q68xDWyzg^h=U*+HvYAYZ6&~EvU}(vVt3a62p_3ni0L8UHy$AEVX@_gZrXPe zq{&-?{Su$G9g~H{#l|#Ke)2X!`g(>E&q5iE9TvZn6C?we;-X{3g?<0{NS)!$Grj~w&?1?snF9XeN6=RVqx)xj0-y(Az zMlID`C4xj&qgcZ7@UsFv@#!tUjBe*IHr`q*03!X3B#f?i*aoG=H}>DO7&t@10qs*e zg%B0gED#wAQ*9#8WNt!VrBId*0-$KD#H&bIL|)B4Y^eH~;JQm|lo zDbuo*4kOGD%YW^AfUIE3BQhUG?&J8|!-kg(8=(W(7Qx&+Q-_^&Tk$4eyCP5n3%J=FVn9f+aO(_7+C8NBDF8a zS8J?0B}6yLD7ti2NuyA_bqYndVI7AT@wbhBs@$%sN}zo?I*+QBoxL?(A|XTn+H=1j zZqIpN8(tN+_Ex9Pv|I95Tk0hp-?d0}&(m+ULZrks-SCB?QP)gU^E1dcs4#m>v6}mb zgQs!BQXTKx5@Q=L-xr5B1=nI9x13sipM3S_ZLTdjv9?;xAy0mds~g$)yH&;B4^>iW zw=LY0Wf`EEISxRjS-^Nj?V5ZoK~m0n{sUt}?vPLz^tX-z^|!4;qQiv>wr|T8e5?tq3S67^5;)@QDZR_h5~0ftk6do#KgbwBtHjwLOXatf6#_p~Vq#YYlv-bjTK zE9OJWkQDx~Ebf*B97W`~sD5 z{{z?TK7ccRQIp~~Ve)TX87ua-Q*y|cGpaES9(=5w?GH~+Y=Y}JXd}i)Pr$R88M?Wc z(}V`a{tBDMBj*Fiw+ojlAR0b58*JC|C(Ki$JKq-$DUli7KU(!@biA{2bmgpIW}vta zldkXvMd6B#U3r8!xcWtcdY>14?1WQn+r~EEJ{#RTeS7t;uPgX~@!mUy#8kJFB}ll~ z1z}~yp0U!x-T5-+J>Cv!{?#(loiUu_UBs7<>&gPok2ufhXQpoz$|LIAU0Y(rJ-vFa z*tFMczGn}3c=9Y-vINyN=KU$!MO!riE0J^G9ka>(UvM|uUMK*}ac_t@24+qMDXp#gfL34-;2HG z^<(ni%Ckm^hf2rSmRD(2m$6&c2PVq};1D$x1qv%}h&K`JKs zUt*_R@J^-ym~ecHLhL~-#^#*P?J%%te)DsXfb?0__N%a!8%QMU8H5Spl2%VPCI6y+ zL@Jt>99gCI)OvQpT71w?-6u-vgSNK+FO!t9Xj%TLyW28#$Nk!y95_`&ph z*;4)fs1tp)2Ro47U(Y5t8zblzgrJ!TbSCP13G~oWP%~(bSJM49Hqvy07)3#l<%9^` zon*K2lhxiul}s7-iQAI?Pr`!mc)?Kw_ad_j!#Rrlm|a_+(`8!fG3No`9p;M6TAQk~ zUdL_1L*f=pI&W|BbPeml)%C*k&i2>^{*9GZTgTz!zhN*O?3q|_$}J^NQ@|gnX+aJX zC+FTeOSrK4J45B3QvgUigGR95(cveUN%OSj%fxGlNb81>SocA~;>|h-3A(N>02s1k z^dz1dZP{j8k!*ah4L)K|Pp_zkyH`Z8LOuK1f%Rog5*@H!V+S1`s?V;n@hbA3 zOt57eQ#WuQ`3QsUY}#RvtQFV@u{CI#AvS8YbPE5l$}?MpLeE zvTR`r7}3Wgkxu<_@Im~)sHI_abrQ2<1v7jmXT6PV-Nw zFHJ4#2&78d$g@0e!(NcGqz7^bVXkVp*PH2fe%D(lSTN<`^>>8KY*chr@2)ZqR-1ID z^d(!pyt>V5Ij0(nTp4|49wJ8a*Bv*!Nt$$X8_;|fctSB87&5{j zbkGQ`?FehmGr`dpbo3zdaEfzhLX%)v=#*KnXtE5#HQ0!LM6J< zuq+)c8bE&NTH!YuNMFFEmAug#%!e+#aw7=o$LzQ`7jG=2Y zjPW&uMh_Czyu}o$O-|%ab8W25`aYV?%k!hBRpKPA!SyB7E{DWyw2uPi*Gq`4 zN=%?qDYn1khdWs`k=vrLO+IrxG3PHsZ}p`GY`izaNb$V8XK4uIo3%Y%P+w~=JniEe z-eY(@Dx_?V3PvS$s#8?pKAK#e5TQkOZr!LikMq{_oxN)kESk@@uANeClmSDx$;#4pVZXY|%cGopFL{n#DuG)aauWM* zl$DVEoZj#=1XI`c!L&jH-da^&a|2q)6OzWj5axJns8EqscQXjtNXSg_ctkd zxF<1u#r)x9vfQ>MZ-TCWS{<ID z`S;wS9WWyOe`f4g;W19>?^LrPRs zR1Pk#tLt&$(l$s_{&KT_m2Sq4E^q-QFE7u-&(9A)Zi|JK01W+wHJl_fOS(BnhSlsL4p*Cwc-CC&rlih?4qN9R-zPVv7+9R}WCd==w&q>d7H3!Yf! z4}9V0h^~AwBGM5AxP79^c$u!lVSK-xoh)E618uD+uX4qYV*zcAIUMnZ{GZQE3dI^? z8{>f2YhriX0BdK?U_6;8dvH%M#$jioN~XT(j-#z2?3h%xHF|~8j~YuXj~|_oA)zwJS*MEK$FeNnaG{c9(+E`| z=EXdMMhdnegHud-N4=H+KqO50->c36+5OW~?Xex*^lrGW^s$Y67NZ z#LmzD)VvZc(a-)|lh5P}tl0xnr|V$l-8IqLg=q{ms1EUhCUj44hdVjLqsHPLuXhAs z(q}SHMKBI&Wfq=Is92GO_x75w+QD}imKyx}x5ljBZt_~_IpQ<3MA?63NzD5GM~Z%) z*sLK9$f!&izcif>p!Zc!)EFwI@#DByP%3>1aqKg1Da8n4GHmVA`u1c(vk=%JvJ0B` zGYHJZ6b!tkyo5+HU`1oXsc*E7eN6s)wTXZZY&Svk)Y6>2Amnh`lq*0oiH{_V)Ok{l zCEs?rWbUhgI{WTJYo>z!^(5bQQEHKiqZYv?;T1nU+m7iJTB_(lO(<{!jgDFwbu066EDr5Sx|@3Vtt|Kz7qP|90xots?i8v`c<>{7|i9<+44D!9oNrcn@SbGL@Gc-G6m|PDYMA2QvissvkyX8T`%q9>=($pM`>Ihg z_VrnynSRzx;f%)E2#!u-bQ=^rbl*SiX~d^4Q8EepWQ%$pK9bj!Y^sf^UVrNlEQ$$`HYF>sNrV-!73d|=gCrXXmKHX#j#V7JeLfgy5J@ezs zq;iwlXyMQWR~?l4+4V~gQQ9e;l@c){-ezP&$CPuQ)g?rp+{BI2qjy~whXN$x z018zH=-X2KS^mW0kCCRUyvmLA^U;Pi!rB(H_4Z_|<4DUP?3u3IeXF966T7U=Q~XBd zvLA1MebmBUO>Qj@Xq$e95_CU3&j&mia641{$JjyK4$9W~=enO}z!FiYp`yPbU;3QrJ^(N#@FT=&V*&kysplJ4ewv;kxp>h4BMQYuA zt=s2hTV~STqkJqOqT8(2)W_=9I}3RQ8dH2l7KKcni}ZU|(^qxLr|*xg-WnEk8)8Rq z@fWAU$d7khHdEkVafBle-^qH8q&r0J%@joO&-RW6MaN#QKNaFX!tSd#w9ULY@3)JC zE-<(|1cyd#UE_{WzmMzz_{X+@!&EORC_@M0Bf;b>xs^?>-#T4K9!z<;nG+m#eM~kH z?bv{TT_!~SBA&H~I^86PYehQvX-}BCowJ#X$@zROaD960ys8O zce20vQELghhm@|Lne?c(6n_^tjBsqBgDgr@Z5QlGv6WfCn@0UaYwazAKUf#d#3@%l z88_GjNuoZ`u#fV@;E`s`_3mxd4)Nn&$J53VcVhLNynz`69a4tn{E01)X9B@>CRvm4YC28b&zR?IxWc`A2$PVw;; zzMDd^kPmn+_7e7bC3i2M@jI00Gr#6aiIsm2%oTJO?PW=W{>IKsFd^t8G|ydAYuq2> z1A41x&ksQPQ!)RFM50sop|PRIspA>VnqW5LbbVaq>ENOtUQimZC(99lg0Dt)W!1#2 zns-%nD}FDASjkpuDXw>sRBOap>2Y)Nv}%-}LyIb)h+zCs_V?3P{#*yzx%*|A|5)*D zrW5RB{q+H)ZZwixfL=+n*pfsQ9$)N*0=qUAkUvU;vmwbUVLj9s=u~W86QQ1m&YFK+ zw@RHZ^hj8&P}H@YM|-L1;<)#nNT2lX8s#5f@7zQ2DnwzW#Xv;y_66~~%%&+ctW)>6RpP!B)r#IblX8!FLhqobnxt{OWsuY#ffE(l6b5x}aX=ufAL-=t&$FNbL!F z8@hP5fLbzuk~y6gP?XnM`<~PKi$!=xJETi?v~Fg|$1gBH(&0 zBZ|-wu&u9aG3zv=F-q!Es=Sz7N9F}P({T&Os~(dEqhD?Vepw_&$z@Nxs5G^N!8&ix zIJMZ}q(56d;lWRABnT?BlmX?H)W~M9hCZXs!5_w2Arg;yXHd0YJWVNW#B9J*RR; zg83ge{|w6-;tJKojtYcPdLa}p-NWQiTWi*e7CrYx3oNT1 zJg=;XD7Q$|r;XagKtuxUVEXl#7SQ_((-UMan-!OHI`mWrcFCuz=$99g*i4`9t{%~4 zmWnbJhK$~Y9oJ7YFl&em5r^-dR%PGIQJ9O-gl&B!eB5;XqnFrzg#HqA76lUzvU;^} zxJDivO%|Wg;Abka$92)a9n%I|I6(Ab@xNYq8!MDcx4)tUCLXdUx zC25lq<$GYJ^wbR;Od&oDMk)@i0vZoeuJ!kU_zAcIszNr1yHi5?H8vTJ+WUGnsn#Rg zF&U16{BdyW^yWXmkx0Kd$`+}JcHK(~eeb&R_Q#vJ>rZM#pMlivin5al&}EC)reVNh zWJ&d5OJ8)D^&jqe$))~N3-uFrgc>prf!OQA7jMx7*W>xcmzllw`Pq#QnPrbsvdO6; zgCJ1KWDw(Wq8)M4P^xK*$hJEYNHy*a_o~;7-u3E-7Wo8TTta5&73_Yt1c4=*G1fZl z7qO8cS(BUDEP9V%I;FVeSkz*=$TfM}Kh)vI=ijzqaO8nzPfkm#J=x8NvI|3kI1HgZ z4Z#8nr(~d@SY`iN=yHs~RE3<0-Tjz45CJ1P7R$b&ki!G*;)pdRH-FShZXIM=Xw++U z2ut^zSIbbal4)}b%uVMurX%w|e$abk9m4r%BJW}6D|HbzHiOAkJXfi24xBc&rd~dR zOb%{21hP68rB3z3G-?b)CHY|ViKQ30h9d7dAS2cXG_7WRg&K{Bb~J6n{k{Fdi1Sk; z@Knt^ZN_FGc~7~eCsi5j#+Ox&gIgYY;e3_oE-~iQpbud@8N}8uhon&W?1&zp{gFN{ zx){^xM|e1K-gfMjy2G6|5IO`uVp>!tQ>c!8?1o-r6s6* zLXRP#bvc_p;yhAlju=_QhHF82A}uXjpJ+4Oh(Kt8(0h$5_KlON(|$-keA^_OQ+qmV+Q9!^L6Pt$>1v{6MH{I7^)-0XO@j!fRCR0i^*@T| zyHWAs-6biRAm6Dg2Lu+_TJEaY`Q%jQZn+_v+CZ6I5a0yZq7w%hEKIHasocSJtLym3 zk~UiYuTh~r&Hozo{WC;@6geRRnbZ0oiT%xo@4W%WKSL78sP+G^ktZ_Wr#hSpts<4FY~)01o;Jh4iCnZl656F5BciG1;_l|P-ebk;P;UW z6)StzhyGO_kGj%a2(X!(oG5Z>_|q#Jba$bDkb3RvVz4Dm_xKoy6}blcWm#mm_c}|(+Vt&85TRe2$&LWv+ti!y3^;AvJ9OpZd(cq zOM$yZf@J=I^RD)>gX0Xbg{9D;+l=gr2r{r}n2*AFe}**WC2eh8{!>%4)aC4#RveN_ z>xWUmDe7Ua(&tLhBjngzTS<0ZO=@f5WeO=ygnZ-5whyCP_TQUJkSt<=V^WsD>%?ZK zd&osj%}8(iJ-G3j;QB(N!b$Pe^*A72$fa?IFrC4=10ePEiR;T1!Cm4{J9W}3T&=ML z@xRG#WD{MyD(_moDoo_!clqwxelKmiL=T&SUXx>jVCdRgfE?2zw-1o^ot{rGb{A`z?*Xq-o13Vt z-#`(qJjr(r$W1oZI<8B5!P@MQ(${0rKCh6d>?6XRh7k0uUu(tA2{F2A(4H&vtn+=6 zyNh2Hn+L|NGFipIw0O#BXVw?S&Pc(t`|XO1sqelW*}1;khBDua{}HR{YFjLGzd7~Z zIC#&Y9q0UzHn99noDW2)esx1Yn|~%uyV|_!>ni69dBo^^>GjOTFO`%hSKUoL4y}|g zr%W?n+7e40b+#6}4@8CKgskzAq9yh6OrOc$=P^Is^jEw(f1w(udz_kG_V6L5bCfT-h5`e!dAn-IwQEUe( zx2(;R5}tN!F1yPur)Ezv<)gIoncOmjE@W_bsq`%W|3n0KX{TM`@8j8#4iq6TfV!5 ztd482sE|^PXoJ5!=O1YxmrW2rKuHqtf<-B{@{aBQ)sSW)?$hWk*X*$x&(*Y@Ygymy zbh^iy}Mi|K6-Dca&C}euerbr0dQOp6W*9lM6ymv@&dBD&dl@*0LUkmFa4r;MJ+3@QpVHZ5YU6&b3m~ZkZRMcB9N5O!*eFs#NE3wj z1di&KjLqe|4%#g5~aJm?920>E>Wdeme zf8U8ToizXDfCpZ~(S^WAqerGsT!HgWeUHm9Q#x!1rVk8rq>VvWYNb+l->==9Z7d2M90Wde?i?J90$~3>|HlntcMpjQDDjfj_qzR_GRQ)b_t!}^a4N% zGYK`khU{6N)9J|SgYLe%Fk1lT;w%)07K6d6tl`>6!j$xG*4rjP*uf;}sF7$$=bGuB zovhb^qOP2`xMExznwNPU{PU!eJ4Nt}a6#P5{A_E?CDU*N*Su}1)5MSia!@S|@fNxi z#psDu7Q7&j`NRxRPigAwW6Gc3RQujy{-DlZ*6|2o2c?6$(O)M1aMU|r>F*T?aDjF1 zt{}K%S?7rEttxk{CPc8g@rV4JADo|6AnS~(R1jY$J(Vw&C0ku^vU}b$Zd`n{RQ{5J zT;UkcI?q52Pj`z%0jP~ci;ocYCx~}U?^znnWFfIlq{AHH_U9h)af+lk)Ax)+<$lAL za(ZJ>HHsjxYcJO&bM>`pZe{Okl=)Nni(RS~0{sd@4#0=(twas-?B8s0^<@!En>XmEn)H$KLj!|*HadKs0Ys=vyV zn%Wf)Pe{eACO2e(+-y~rZ{}Zp8e+8LC-8a3IvvX&6_4vS1?M!7b=`jTQvW(x>rBhxz+2;giN}br89G%W*@_}p%0C9Z$A>=->p73u9u`@bod679Y zYJGciqWhBdU`A*KT4#c+OSt0kDBAKWA_!cprFD(N;*D{}mlOHCPOfksb7(|bERKHA z!AL{zEYL>DUd-ZTcf-B2FEiIJI*~3qwo>#LC$7v~KD4PX;Uw9ZVzo&W#Cg^bSquJr zW&EAR>z>}E7;!hmIdkfd88$vTu%F1M=Y=Wdt1qjjE~zf~E3#$%8+zs{_Z0O%RWK#Usp?Wl5) zukq3_E>7xBV{ucF^>{D5&sP4&G`xWLB5+XdgP9^(!hvj!Y5=>~_V|dokXdDXjoGCQ zZ@3LNU2(5ja+=XD?0?$^P?Vszm(wobn{%wyp%J2LzL3^r;8| zzF$y<7Pb2|i+2ifj$$H0>Z&D^LR`rh_}6aE!t`6W5k@CR4gxtrTg8%5+DR}5v30wE z3<_aN|GIqu0rnjBnDNz$(zN6-W773AC^6^6sS6yNN<&npSd*Gf$v*I=;;pN0!?)@4 z9i=D!&LbDvd+vwI1sZlH(O5i;Qh%vIx7*7$+W>g!qDv--ji;_ks-t{Pgnp=8mNcy( zi1kB@R{gYNZ&?~gXdbLMy;%i0+nw z-V{h2McNI1lkd_@VRjaHvNWs2iNi~hj!_^P#vA*%mY*CSGZ{2UtO@A9BivU18lRq2 zq+4xmK`>5d{M`rj)cCkKvDjpF1_#Mw@EmyP7~vcz9%C>=|EEbd2u)V+G@|px7*d9u zcX%wfvU5>}d-0G>(<`phlc`lxFQQ6AsWKM#4R>&JcvjPrOy@BX+qgn zTeS`c6_*Xd1|BVFx*>Aw_-vwWLGw-Q{}6YVQE@!|p1_HcphA*ZSNFUP z%?adjR7A;TK=Ns@3&e0Y^|fEV%da@A9bw!f-8CAROskJzG3G(f<8Z3iPym^j(*?E2yY=5@}KdY4O1?dH04J>57h;Jzb-q z9krKiC%hnj`x=@2Fx>hn_v`pC-d}lEryS(9%{+uI^t!Qgj*)XcE`KYAFy0!bY?+5c zK_U(?_19sA19^cA@pvTUA(NX&|qL>AdFp z&1;D%amD(pLGzL3&n7xT2l-LrYaz}-wllrXwJZ7TPIBug$JVz*q~Y-SAe0nC&|Re? zvReXPP{1>}>I@oNHoF|#7$1MIx2Bs_Oshn7?LiaS(gLG$A(`zg^L`_h4<-!gY!fql zOyKA@A8qzZNh_#rLDg$*y(OSsc3^4*TE=aao6uCEK|ObvI|7F!6l2q)_s~MIhk#UW z#Z&;jbXo?o&XC0_xDRupnub@lEHC4ZIau~sm*hqcZn>u~JO*))^N1!FHAKPINMJ)L z%I?Bodye@b8q2bEoAe3&;WsNDy()oB5isG{ z6jP)2kBxvKe#}~{UlunY&H=tJ)TOB7pnCROGR93_GN=3~Aho_4ZY`$n!DbF$KC--9 zPnmSThu+P6Gy7^A=VF#^P?a%qRo*oL^PWw$Uia}^`1Z9?>N3qMx_#|X$D(s1nv`!P zBd%Xt8m7-?t>_7TWt#T24A-5M__QJY^ z?0f`fWDU-i>GtnvH;1&FDRG%y<7`J@kp_w1?@c=8Zof!h*Uil0n_84on~t^!>h6E2 zl;)!ta9tRP)R0P%#u~#lDUFw6*STlV0$y@-T8LY$HBJ9PXB&E%lgrdLFq!|bDycz0 zDb@5CelY=E*xovHf?&@2G=S~m<()U(ButruYPkn?Pjq5*YCKRb8?z%Xx&s3y7iWrP z&d}OFHqyG#LVIK_o2$p~T|}i(iJ9ae9>Kv_1mMGA=K-Akyt-9&ao_EFn3Dpj>ul2B z4}h<&JH4=F!a-FD&WHo;FB{rKnp_Jfj9B9wW;>C7s5}&n?u0;2Ey#Q0-DCD$Wne^7O=%Roe-}(h6`8@Lx zf%KLIgHi(LgU1HjPJer!vITidLZv{!Kr-OCFFSo4JUE)S z!)R~Hl|R6oeB&!TcvbT7e$`RQ1EH@?Aj`LlN}?U?{`yD$Lo|z+c3LZj$_b)yo zNpJ#@ZPS`T)G#YDW|$!t-tZr|Bk#D+C|7Jn~B<`tR9TkpUB;L`m^se`8b)(ENX zM`DBIIfc28aU$y`Ki3W&_aG!H!4Eqok7PK^ahuP9081`0NoI)fPJrYgibwyj#0AzH z$_yn@Nmvc~ustW|m|E&Hb&M}GZ8Mi4GyYYeXuH^uU2EZy++{sB(Iy_a{2ql+&@i2) z*YP=BD))k?jBh{z!OiRaCN3vvqZVVmE*ZN#qi$epVd>0ncjtt zqk;ly%`A_<+{cTejk3;#w-&58>-Ri~h%nxyjPqEx9EnPBBse5(ei| z1&|r7S(mKN-!mRKFXh~yUmR_Tp&!3%c_7ET%W~{e&9^QNuT zs(3=VAKWG+FfDdE#~mmdD7cK)wz3_-kHCB^bP^XgKp3@ESgHf-MI2k9qb}$>IYKtw z(PnqBT7OuyR^13N5U!KpZn{=<+0EK&u$##$AXuJZpGs3TokFwQYYQN5XGO`Ct@Wr6 zkXz(_(9^Y1z@61C5BiSjNdv11Mi{>>>&4><0x^P=F*?p`S^@JC!VV7ujOJzxTxPR{#Yk|+5Sn3OX0^m zu(HMKW=0dLqLOnXzAsdQxR4GWZgpbS;?*&f5$t(01-0i{Cjetq%r$7gB95$enq2jF zuJQdc1S)UTwXG6ZLStMg5#0OmhVMCG3{vierMk3?g*v=W=AI0%ph`c=?)~we;yNIC z0pE`M)4+>oa6M!mcPfJ$teH?dXY1Lx<0t?R43XKXKr%ZW}dEP08b$9-mD%W1l(woC^Iu z+@-O?l2JIpJ!pQw$;4&WY`{)DTKOESb>EVI+`Zel6;Z)e@-!)sv=WgQ>Erq&dVb!K$55^Bc|lL*n?E=8#+8p%(# z%E#IkFSO%Yp3#k^Ba(EJc4TQLe~P2l^(Ij(?_Z&kUmJE`s%Dv6{@P8n(0wev<8=pA1YJX3v;TL4JC}mT^z7c(q_xD`xbQ z?J#Ns&;#UCyxZ(4-*Z0dHY$$-D5geUOTBd9rJ!dp`dZfZgXNEc_&?XT!QgbJt?f9c z@<;n*V-w?<{NaO`>}j0DlYxq#i-~>pJA;j&Jg|KYby9+3xMB9dv-vo4YfF2HL+ON1 zsKsHX@00d&+f(aw3A=i5HRU&eM^CcmY@>Vi{{fXPgi67o&C>y z(0T?B!EPzcL_ipcdUoZ$l#WYkuQ0b=4cVJy66_Nityyja*49`@*g2ceZ?MQG7dE&| zMHd-+j;W$}OAmTASw~=}=Sn!pxMnfXKF*o)pym{18$yKKbnor-%oC20s~_j^27-w4 zW#IrVH8a12jJbT-K}e_LDjOqJ3HG^cz71OY8&Z{OtFwjGc|u)=%5#a?2ImU=v^yE8 z#s2bwf0ev!FPqJQy>wf{S8Zkb+Y}Z)1eu^JJEQuyp}=%^a3<$0$ z+Xpm`svcqM6Pu8FfA8Oc{G6{|on76L1#Dr#p_$pa)8LA+^O5sc9o%5=x9ZyK6)x&C+jA`;M{-l2~|+il|!n1LoRj*P=`EX>4%K{W3SLX=mp4%JbKL(z)M zfBKc8r-N1UTM|pFlBMYrd=`4X?bZJ{jty9^F$z%)SPi1zDYE2}bbGhrIP6gkmg9G? zIf%8&-XNDNX)KU;2$PI259_T@aJ-h~Gza6Dy+)-96LrHXq#7v)S6(j#{6MRA7u^%y z(xWUGIruY3cV4Zbu1z<0{m1)+#!e`|84d5omAyy+qPA1&XvyM#l-H1v~#$(>*YtE@Tt+{klkAsCiG;Ya!j zmz2h17I@8%+cYT;!MKk8ofmtpnc$;A55Bm7SW zAs3l=IA01Kv-4w#Mz01T}UI?8kx$gOp{Pq~#6<^7e}?_T)Q`wzwGCbx?pW@f7C zxkhY?fuvn8NA_&->-MX*c2cb49Sq#t&sGGlCGflA%T3wpwqIDyStuLKR)QuuW{Ha8 zG)ni!VmX~LMHEs7UDskCE6*9Jyfx+XAj#F}D!9>x2GBL-do+v{=7jopa~aguR!AwA zhPgcp{85&u%REgU~694m9ZVyC2Fvr>Bh$k#NG|z(xh$9WCO8In-yiiFz^lKFK}8h zUu(T^R`NMRaQ#6rY;Lz!j?`=dpn4_UU)BkbkWTF1G zEo-P*Iiz*oka|=pHnFsp_F|e)Yh->}+4~9n3Bf1$QdLnxnPT(aNMKu zru701Lm8>4P~uT_b)zUu5008Fp`&C5+`_9Y7U0`TC~Q$ng(VtvpqEF|iWcuqCmn%n z1lv>bZ4cDXU{V_F5oT*M^q)_+u)nPx0Rag_Slw4LIGGHBzi$g>877k|(fn z1JYw(;or{3DZ)5|*?w``N!4JC?UAV5>Oar&@e6pp%VvW8ino|i8~criGj)oQr|4fH zb>2_+pfGy~cRpv7^TC*}^CC7ZuOswgPJ7GF-YIWI*K>hbfg;=)WBBCC)jWORAN9 z2DP;XaUN0k502)Xq!N36uHGZDe5twUGp;$BjmNtSG(6XuO&Ohgv#Px=<%s(J# zhn7tSJ1%NwyXHSOY#_lG)t)g!DEiDgvc~N`XL3>07%& zPT|@@&n`K{Bwlc8_~nQ=esS5cy5m2b8Od>jQGdcmudZO=ebb1`$YnRQ`at_Jiy`?a zQ>~B6Yeq%s1kG{}L|EeWZnY>WRo4Xf5Hw~4omAB2RhYe}q>>%U++0`~Ry=4Q;j<7< zp3G%5TfV2v&pponF4R-HJR*@3Mr6r9Bo=8QwD^GZ41b(>Sx()JtZB8q`$}uS)0p6Z zfn9GiI!;eg+?cy-Qpjv>I{oaEpD7jlt<^YIYd5$^N_HS<8uJ`>;nx0S*nOJaX1f%0 zv^h3uTfyPQwKgJXL*O1R&PsOH_@^zJG7WWyLR@d{YLa|b02zKR@#h%l%S}Xjsg6ZM zVdbkX^)f1(9S_e7ww{NrKMW5UdD{s(ykVWCBca(ee3C+u_%YH0nIOzkCB08O2uu3j z!7<`b1LX7lTX7yUs{@AsMTlBoqr%=KH=|voVlVZVw%9;QJk>m5fa4qvD{BwU<(^=utqe(liF{(|^-6b=lVCI_ zYY2^^ztoNU%3Exo=;t;8?cC=^Y;#Mk{Weh)|_6>R$J}?ujgYu8F4t z?D*Xm)POrPL5-tgcHgYFPvaEFE8pf zuKWW!eey^5!70Qy$VB`P<{rK}8_!%N12iRgKXdM-nz_U(3C_81uKBmxFInTe?3`P&d~-5HyRi=C{MZoI~`U4?pqc%%|ig7}pMs8q}KVu*ze<*!|SL(`K zR1XIJMo~%DSIrsiaxGI(iB)3FRV87^Rg}ZR8*}nI|Beg6N@$o8$M1Vs_|orl@cUk8 zG0bl^Un^Sd{SfTUZ;3>x;RkrO*mV@F-UHn+;Yo(}4e{gi#2wE3KDhi<)V@3AsGAD- z5eC&|Tzy|Nxb|hes;K2^$y0f+u8fnXAlGkSUZVj(u|dw04K%~>Jk^R;TA^cA;qkKy zP_Q%3{Y^mwQoF22dJ{N>#!a^R7Qc-<6?J-bo&CFKtH8Sn-d!`>PQhTAg4M4FF9_Y! zM_)!Di8h-Fw0Z@vak8K=GyApdWKg*5ky8;wsBokTj+HqEjaja!%qRGEGXIoka(SP| z{w-6C0y2e6D3+6+Tp`1w{n33_@AlREU7tJl2HV~SpBU&Wk{fqaJ;?D6diRp(`)fU- z2A9wLt^9j!I>)U~QIVf(y;H>y{M+KIDX0re5k1~jm?QpKh~D3cNca6KF*yMK`JWgR z_#G(RbMyTFLNx!a!~f4t^gaftko|{VUIAniHmCujWb9v`N39Fa#LG_q@a9P2vmnho z9u(G}?FzI&Qx|J}j@Oilo3?n4rg&~j21gv_-(2S8kKJ9S_yNwbA`vNZoOI?8{GN5J zR^sy#E%UHjH3l0;Ip^wKKVXP0tta*Xfaia!Jo~YrdiPwrZY-^nKW1(T`K^Is?v*2o zMu;Xjb#yz{U92G@@2vIormWGwCU#K*IEfaE&BT~*d_{hW+zTo&2GXlYNvl7VQ5~2t zqVBJ~?wXB1vrG-DzGmxhD!j5apo7R$(6Ie_>ll4A!e9mc^jIRmn?WD?bs8UO(zknC zxJ^u)Vz+IW3})xn3;mY-;y4PNI(YV?Y)oZAT}z%3ozR+gh`AG{z^^rT%}+Om73+7> zqDJ7vvq&CK{kJ}HvHSVoAI+Ugi9d~1T`y7*^pPism+UpfNG#q5yu%DhS*(}=$C_4@ z9*WA}HRYTkRIFR%r*Rpl0IyX`mu_s2*2ipv2}~a=uNSJ#R-8#nkLJk2%Cn7pkSxX9 z4L@sg*Ui})jWF%g$VO1oP{w3#in;_A-L&74%23~ybnylmAFO_bYY4tQpDJwC?wEkg zs#^iOoO>uu)YRsezxF$bUq`qO$8FJ^{m^5y{H$PkWL{s$BQK%Y2Su}H)$5HOtL0em zdk)%I@-pJ(9a@jyTMMK0-LNK~eMRd+S1$&^AHkk-6dEN+^Z19Aw0+Lz2O+C=_&Nmo<3l+i}x|?emtpA+>h!}sm}f`i(ZmUAn#3N?XvfsA?2pLaZk|s z{#SKcs8wRFq@Tp-&c0f1DMOd(ZJ+mDJ+6yR{`&QuH-{KWfsxyr$0G4vLQdR

vZW-DY|JT-9GZuk2=%!N|9j7ReAv?Z9+FoMHYH70-t+ZvF7VK_q#3! zk#pL|?)50s?43#sK;05@Nbf%TGTV(^f!h6L%h_vvOIJr0c>HeF`mhw}(vYN_*)wXl zlm5K)-W1O{*0gL};`Y9zw4Vi)Z?o99Z(tc@$il~0b`xb*C~jS>v4OA4<2Zf-?+wvM z4|Nmtd-;tk1I!lr&BZs#DTJ=~&x)0^GkERJH7~r#6`Sp?=et6Z5vJNYX*p9@$uVeL zkXE^M*ApLaujw_cwn1q}>aMhjLdLV^U=dua!L%CvQ)jAUmply6Ew8!4&cJWt2)Taw zGQvFQG{vCte#pDrC?IP&DKl*&TX=Vq=ETPc9`O!7#5D{Z!(qZU3cqC}U7T69#r;tN z9y?#l)%@s&Oci`>-*r+lwT~FAKE+vjm~qL~wz!uatXsB$MR=r*(94w*8a)fzPw?H) zX#u@DLQh>MG_dq4=bO&ucH*pup${vg2X-gYZI>9{ov ze!$nfi+k5UPt&k-z}Q`1YoI5lEIGF!iY%O1=(>87`{qPjHJ65~%hVe^b@ZRan+)W* zwVhn^{#HF?>s$Sqw(5o^!DBKSv?q)AZA;LO6w%U+YY`YpDU-BkzhZi84opupYTd%; zQnP;aG98JY7LT1LU9s8D6>7d5Dn?;pZ=1vI(+dxzANgAQNP5LLY6!uxqZYKh*HlRM zQVkW`IPv4FuQJX{DDHT&$vQPREZ)$vao2y6Hl`kb*4C^KeFluKhiUTI9&RqN3DrAr zICT^?h~r@8l}{|1eg~&efM?cqoBnBhm=dLrADRL>Uv<9S$>w#1+B5gVVUVy(%l( z<%WZMep4~`z{ISCW(9E!QPE84_L8-7#Svo8+{(WuOB?7p;boDc6nuZ+y!AznuX1jh zH}$*ZA_ZujY!}0Le+=NUNzI>Ke)0OZKT)&&j?}zzj;6*%4$H!l<;!gZr*szHRTbs> zqdzLVq5A%t`k?G-Mo=M#>6O#mra>_mP5WQ@lC9@XciY(|CmY-#t5HVlo{*jc-kI(K44J2tqClUZ(Wn?amfuE9F{kS3!` zvP2>0)a5UiS8~CH{n==ZaP>^{=w17z+MsnyV*gK)n&~z-;ww>?z@leeEQw2t|D8Qw zk6w2`yetatU2TM$H64oFe>D_)K!262A!vIMF8q;h@Cy~2p6HFpyCXQdjc7j8qJoMY?!ggJ5-#JgWFi8EV zm)^qj7wJX)ot(aHs3^dZ5q&!MiR&fng4?iRlvqTm${H;gGe2#Z8^LyAUE+jwHQGlw zkh@Vu7}oobsc$H^m~)iGLDTv5!up3d`7SDUnNBg5u9XFn)b3Rlhq`o??%6$ zF;8E)qL-KuS4E?)nl?~OcuR&5j0zpc@XADeSSQin4hntVaMkC&r2SRTraO~WpF}-r9)247e^p9*~LSpooZT=X=34UNAyH9LNWZvULN(|AkXrl^R)r@s7) zdrT0+h--9=!FTivcp!e)^$R*#Yt$cP`r^tw|DJSuOb+aXetz3)@G~~@E%5e(U~%@z zQMW*`fLS&2?P#1Ge1adZ^TB2}JtgygzZ_$1B3kylrWW!sF=_G4@;1Cq9leJ)OiQnl z!dp&G$*juw{?jr4{$1D0^voMV#O?~S{YtQ%7HIf$)e64Se5V&td!qV<6bGAp?DH8b zzJaXn4_T}AVGG7~_66J=S3A=L#fI&bG=&aU!VW7TjI^$IkvE&P*>HT8FCMqI6cF1? z^F56wP;f=PWw$ug@7GJaLGtB0vU^eY^qONS{xGc|Tc+@5jD zg1!0XriGh+pWY^B^J3YKyfgRbffpPK5GSt=X)YQ1ijWVSMaY!ho5a$+#oytvFX@e& zm!NmVN))P-4Cm5K{`)M0V>vk-@4>Xfv16`v5E^zjG~10UBheYvSr&7- zO1o9fX=Bj>i@7{+X9Ecn$&=X-kqDQcx14u1Xs%s3q;?|uIK1FA>XhLE+fiUMEeZSd zPvp65W>?SRSPqP2jBetbzNWAQm&i7Cp#|3{Pv$_ZvjNWluEHD4q_vI?=w`roHEfxZ z2+pN|cyuoySHycZs<41Js&Ok&xht)D#~%_DOy*^7?67+Ou+`91I=kBS6_*!u?CITQ z!Vpwf82Z2N>kNR6q{%%1$Mx7N&W~yAvYU0Wu`fhl5QvHP^wjn0q#rOm9Q3sORl3o7 z2tSYh90g zfH+K}%;Ec4Z?v}}2k!Qp>o+r&dZVAR)^&HgvCKNpvSgbV^}8prh&uV$V9QW2=rI{> z&RP~(z&}`OB^?ywg?@XK#rMvZ^WnfH@_espZh7V9+yYiNqb+{%nk!=qg-nIc+jRv$ zE#qCY;AmR9mJ0x%aGl=Dt4J7m+(y5=tA^9b#alP~yDr9||k(cnmhv9#saRQqQ9S#H(b zTQ^hM${9HUx%yA|o}6kORB(LHyRn|JpEQ4Bi}$dA4AOOaYMK1ocVzASm*2k4C3l=X z=U}XGP2dFTZH?hCNaFJzSY>m$y-5$$eMXL$i~kPo%>HsOFd?GZCkKoqqy~T>P9!1# z01K{unbGNu^81ct7sz=b2`7OJ(pneSBH-O>0VbC+lE3eyL0glzP7ef*0FiYWm4j>O z6zqYp5epVBlZzW34Z(FFY#V3UjItFS82Uccx$PVm|H3I~A zO(o^{8ju7?ny&DcA)D5#l=hisBbzaaVmH*E@+|BLFS}tdVTf;Ozt-5fKevc}{9fy% zSG>I4LP|}#pcgiUlj;+rE3$htI{28I8GHZw&!uZbn|$q|nM4$UD#$Up1+mJ8L9k$KuoTu+2zI8DoB(pZd0=NC2hAW4W4RR0yL* z{`7EyB9h-ub5q4GyU$Ewq3&kxn7AMLTe_j=90dK_9B!4=6qllS90XW3jPFO(U&YBm zaE=^`X5lxf!JV$25%-MyujkpDVxvU%m9?sl0Gi83@J_lK?a8z|B#^_fdbT539!4Rb zJVt;NRZ<`b*9B;3Y;8*Ul^E*QvkFvXs^IH?z(8MneBB_OiUp7WPEVaX8A@aL+POHv z;ZurueY8t!QV=H+hSI%l?@2Fqczy+(;y|;W4hn@wL|6lB`x@2<)*TQ}F_-;r^t#hM zJAVtvW+8E*TB24n?w1^B?$x|F(qCEBHm`_1f1iKJY{|X>FJOk@-Rk&W`GsAb5rrA@Q*RY}s@&`x> zrf^q9GM>TZe2aQ+PwQKANa~(C1KEVh({2#}`2H+8vXipN2JgyBxnT!ilY8VFew#|} zptoN7n+*w**Kp6_H2V$}DJ&pi+MUPII+*6NUbCTS%Mz8fXl0SExlgUt*-5HsnDiM&Jz9?I;7{@h5HC%npVe@PBU|bbCo5Z$X(3IQGWp-rMjQB zW5gtsair{5>*$z;{al>tO7(*5!1mO=_7X_J&undW@we90!#aUWCp_6>ALHzI-?Ix? zZc5HN>|HDDYz6?mb~uXI#KGi-ulz=WyPg%kcFVAbQ2RNbk9_oEzYbjRamt7vPd>1@ z(C|s?yATm#V}D%QmyZ2`|AxU(^5c$0Qu0`>mec1`iL$1RzEvjj7Fdn_gg?PB+M6`e z*qZ%=>#V1z?9w9 zxmbq9QG=M{3N9cxuZd3ARD8RoQPi|`u z;{gPVwoJT*8fb7_mTUMmXR57Ny7hg=Ki`{@lc&;Eu20*V9>#SJvrtWa7xU5JzKs*$ zN&H~V&{U^QSjniT(2NyxS)~mpR2jj5uhO}a`jRGnsSYN&+VZ*Y;4Ce5K=*0Q*00Jd zNsKkoPrw~QDyka5K7ancVFZ3$QZIXpE2lP`!C<-OD)4m3|BwoU!@`iQQ=YU${J!h- z15CDvRU?R(n;m6+>pGhcopu+xb#6-~R{?R$W>jp$P~fnhIg!^FaLeA| zx!pOAZ7~`KPB!HW92)>eu|p!kbe^^iw>#etlV0F;^1zp(Rh%PSjP6MhoWNO`?wLY!|w3J6&c2`qbtOKz0 z3qT8q+}KUp)R`Sdc=IJiAVdJ1$%ET>Ft~CWG1yuM-Z*jE4G zRSzIlxB_g<`|E95GhJTScxXr{U-<;r)HidNW|P9TTSMkgK_GD_O2v46ypv4xro9+# zU=YA-mPG}ECzh{Z3CIzYJXw0D+2z*?UXMB;K z*;Zgxk#vx$=LD<`Kx3bFf3w|!jRR&8>PB2q1L*~UfCiU$xK4yD+kKFbxWUlYva`fmEfM){XV)4ZqbVlZUs!$qxiJq>&;f1dBLHAbnT2u?e z{B#!GzCZHfJ%8Y9KiU$}xIu5D?h{cY$aIq*2ZN73I#s3S-kM zOLvt-pcKLS){fI^9i5jvgu~cy{D<wJ` z2LQlh1rnT=g}P=}<^j$-uBox2z?;ZR%6+tFi{%;L4YRr#vQk7vU5388 z?a{@VovLxiQWhR-I8w=<`05F7n>s7~UY<#^^yOhGxF7&BPZD1N=9!`FnHDDm&(R^) z{rXdp0K9Oh#Ui+OjEAGWUK(ET*p9gQ&Z;VaAA@N+Oy>Z`v48#`bB6zvskMg8JcI-Y zUiSi>b^l@rn$ga=R?Stnz&;jB6B7Y|3qVMK^|zODzkvY~M8}vH1)K&ks5@feOT6w6 zk>U-ON7f5~X9eL65{L&~zZTb!{DFK>}gKTozl(yHn)spJ) zu=~lUg8Yp{o!8ZbUh(X)tARVAO&jrr@;ddW4DrX!~Q{P?-$TJ|dPUJLVe+K0IL$-!s{FnokGZN`=I(WN#s>pcLZRSNVROneuJzb5Ngqta&~d%W1&XT zskIPd2d8`{=j%viT`>Y^&pzoz@Qg)73am8LDxS4d8Fh-e!g;j5@ioLzkh^#RUYGpR z(VC;N@{0MF%4rVMT&eRAxRiz)7lPZ^>H#F1giO#ofOeE)7?Q*1l*6UOkb8&Yk@45h z8`cbL{v7k&c?i#s88y|RV?V3ap{y$hIdeiXpNR3;KP*ota%6)$7%=*LSOO`yo z6kGJqSn4wB>bqHP!dAGEMlxHbv{%+&5RQE>!Nlj=pX?iKbbz=``WZC?JpmUj)U10jdP z;|YCCFh%dakC1)ZmP?U(R7#df*7tWqH4PsXt745W^;za?m}uqCSGyX^TL7k7Kh>Cw zQApB_N#h8~*O4Fa;(L9d=&&@uF6UE zB~!~Di!a@p9ZEfB#W3MW96=i70g0-BI-TQcc~uodt;9W|`_ub5RP!&%x@n%Ve`Nu2 zSJ3jFf}#9Jz?d1Ga1nM191=~}iH9QfGY>qo9i%**G0(sgaG5dPio6p6mI?A@Md45= zJy^hkqRip{s>K$uQUA#U(r(bOK>W#iifSb9DgnUNK!Wiz=z9bKwCwkQF0gjM7$HLa zDX>X0mWto`+9b7kT2J-Iy=)-kOtO9Q1P^4ByHDh1c>&=$A;<99Q*xx1lsgkx&&V;% zD~lBIA)-Klwqx3p?zu-F&(A(_dEyEDmOzRg-0-a zTw(A&e{dP`=cZeP%Mcll(_7@c!ZotGD(G*NDl3r+$=EdJ=9qVvriLuGst;@WbD(x2Uqf{dTw<*-=bV?*I+Y zYWK~%m%jYv5iLA?RKQ_eU_e2AK3Aeq&lLMFo#rlv-L871pf7$R2cALqk~8w|{i#xR z9e`fO!)9-GKH5f0W1k*M5kReB7jj^Ro$?modZ_p60PRMl8NR_S2zYJIYr@YIn)P0x zI7x=Z86R=J*p8;6ckkluOZ!UGdgI|PG zwx20W))DAWbrr`O_2{rO1ntwoL?25^LP>8WDGa4+prb9#FxHp@_as?J$#=O{M@C@+ zp%t-=A@s3ZfK*jjafo!V^>QiV?TZTf1=KWn<}QTQytpvjV1K}*7?mDcDG$ifcxDNS zIyJjCxvR4m0icq@V6yx!JVk6*GCe{s&-|T3w8t)P75{WVG)*#!@^ME z7SDeTJ?#hx2Dk~~V-23W%`c3wn1BPM%MWfChHKzY2)VRIE2FR}gOMV7JtMFdq{#YO zCM}=$b!hWlyOW`85J8*r{c7Biyv6St0;Mf7 zEzMW?X@~Aeu>ZYB1;8>ue3@#-0pEO{D;7cPwL$LUFP}TBaLGA}LvJhuEEf8BnACm; zk?Sp#-GHF#W>3fNLN$@wRu;PX)#X>8TgLSdazJJEd-*QeG=o+_@`B6aU z%5x4%_)}m@eL~8q3LkY0MBU;sM8G#&8)E`WfpcA#J#cB1Fa?V8_v6k1C*~ksMl1Nc zvwvnf+rckwC}H1zWNhoodBmmB)XB>2a9zK1E#p2tyTOvkb}l-TO=E`c`?dK%mK&_G zNzeJ}yV+A}vcjPzuF95AZ(;mur2Tef_#J++L7tw{S@&iuC0$POeT|q`U!wmXi}^nQ zJe23|=z0o6+Clqy{Ve2tuSw-@5aq_Y+8F}1DMD~y((?aM_a;y|w)_8RqB0~B6;TvI zltd*eQ<)_p%_$8uN%LIBXc9_;22Gk2ns}N~(I^d%o=Pfdp6B^|F7@ucf9ve?UuT`Q z&N}D6*LwHzdU?8^`@Zh)_5FUPE7w%;P3Q$Y^)RE$7Zs^t9oJHrwc;ur^dHvr-p+fpm48%x&CerwbTrk5rAGofh&72~5rmYs-FP^s?kYao{BGz%-Q8PfXLajQQ0 zCu#A}>`QnY-ZSC$68#T~Mv?sw-ZhWP4V73g_5$a^a@+LrS4*%gmXGORiS!ep`)7il zC$Q%NY9*veng_?thUAkA7<4X$IYNT~<;s&JtD{GbG7pY1kxftBDpmgZY$imCpZGa1 z^6I#L5m-@1;SfsgD71KhTjraTSRb4*o4aQnKW4Tuh>8eCQ#N?ih#$NguI;$-0nS|a zlU0CAE%q1XdGPYhOdPLsXWi}$$n5vzN}8&mD#hADbZicE$tt_^lhm9lfzJMp4i4c_ zZ;db7DJ&1~LQj>^9;I$(|Mu=Zf?!Rxd#U`r^jC4hT;a0K=Tt)|AF0+=tK?2Z_ofi_ zXwLj2D63z6i_LMM;%ZQ^d?NNW7l#&}UoO|Z3Wt^fLb#GFULx>iHCELJuc;%9wqhCT z_GfjqbanJ{blvWwO9TO^YGho$v;Kd|a(pxTUKmefr+7KyhS8_;T79dsQq5YzirE;N zmWqUYfgNuZ)pia!`Pl{MR4 zJJXU->}@gib;Co-ka+I-g0Gl6#%_-4VhE5bTagkZTk~}H?&2_G^*=eNZQnB^6a{_= z$E?8(z|Z|%67k&k^~b^48F%F~169)OatTx5D*;H6)@=XC>tNMBEonCqCpT~__L<*} z^>k0&NRxE<<%RtZP{D3IuZ>ch*uk$rYirayx|M8ravgioNeK$bhX!est~4ama}}9*~*2GHLzU*KcGPxal(N*XmCtthqDO ztx+lS`@z3J2^Z>e2EJV1UN@|y#}Yytu=~I54i}p)RnAtgx=6j+-60*SZ|0aSnV+>4 zJS$ktCEI>9@RZ_o(q=!90r}?bJhKnlR6Yu8=2Vr5&$*~3Qot@^>tI^Hvo6&_@qGKX zi6>;{vOOJt#v@7JR5^S33|J5A>ou{?7utq&nj(W{bVWxQNPBwOX3wvTj@5o2JyiOB zR}3a}ni4xp{XbF{MYNbY>pTHI|M6p|F8~w+_JFA$sNhA8$xBe5 zsF*yI0TKvVlEvHAZI0yr@7Jge?+nCh_1_*ta8`+}YtQK=DKAqt<}TevpGmd?p{CO$xNvF8h*06w#;p7@YAsv1lE7O{0* zynZJ{x1b*_8KCn8bU(0p;sHlU8i&th%DZy$OKy1IEjVquGhoT@x-RB&W6;`TLr=9k zngq+$-SCEfBf|s{3xw2pZ!7SV=mEhDmDC>#Va7X=Mo+oMm4Pj--Z^&_#=D^gvSmoc zE&&5U%;XvJ)d>^}e(-a?$7fPG?wrA*y3GsVdgmB4Izq1Ih_Fa-d| z_HAGOJS!RmHDm#s_5A-H?gw&wG332{kYQPvhJ0_x4`W4lcwJ8KWw|MLVA^U_$IH9E zZvh-7gGsN!QIXnR%kI*S%OImFCQ7;-d^iy(0A;yHcqWZ3cW}!2FT;6?I=e6~GFgQ8 z+k&5WMWMP`+g;Ad$sRSCalqscO6V~E-0NNMTzPR_N15!S2EoTJg7{{bIKzIeh_gHR z3y51de$~=74vPY@9OMW^#mL;*>KQ)O$Fg_0&va-jF4(GpP3TT0@B1-o_NCz>rA(Jq z3+jM?J2+ZSdxVa4-*&V+%8NRmGHbRPC_W`nsD6CtkOp5u)f{lPLppJ`!$566M2M6} zb3_N%XRqEjh+kad{AB6zK>F5l1~c={#ekrocRBsMLh7Sd!B0t@H!HF?^cCBt%H%Z? zHAyIL8a8Puz4JFT^0&>zwA9j6-y{4mp%{1z^V(IBwU>7%3)y+}{q_%rkY~SvU3J$p z!IoC)pL);THrt~|HY}iNixvqaIwrgh31Cw;RQa8siUwk5aaddhM1oDl{wBZU)rT40 z(3M=ZSIYG{vy)VyP9s?e~RM z%cKX}daSB^n`+c~4jzfC6USTvMhwl*>?sYgip|SNFw+e&NE1UXLqD29t7uyy+&$B< z!%<^qeer%U3;(JV8pye8x8ELZ_{^)EGF6bq2Z zs^F}*koI1zGkw32V-BX;q4pb=9lS08xKS;jrgJ2Md6k7=jr#ADOuR;vi-*xX0`3xs zwp@???@ZcN%fXghrBObq##4?~DH~*d8iwa2DN8$jsIO136kqSrQ9xc_NNAQXbk6Ph z#Gx@A>nR_ul!-!Utw{^N-@F|cl-T_!`J@Y!ypIbcKz7wi*|M=0Rr%-RXA#*}{f9S3 zouz^d3?3ap5<+StRu%WQCJrtQ9(EtG1BgT5z1PjXnM_#|GP~CWwte1jyf?UbqSCc| zVkTePg}=nDn#;+S+84my^`6Z=;VF4*X#vt5g$&^$tpaB{R>i{rG-hx65sKsQWl=?e zS$&nAR3)L#LJQyW50>*V5ml3HyfbsXruo8??+ucf`yN$_HD$w!!&|7CO8H{JayGQ( zRLA2;=dIs_A!`*6xFL+ML$?z5A25tx*3>Yp5bg9dt#mC*Cql%;|D@ATt!grUJRZDo zNmLBIq;JBv9zIxvK7~Gezdf|XM9m@gvRHa8&wLl@S7-0i8W}*dELQt)k$ccy?tqDP z=8w4LcV35x>ZOe-q9&d1h+0!D^?4*u^yPlv_%8R*2@8PE7Q>aEuQ11MO%rl``)Wh2 zZo##PuQl!f=A89Z@=Q3|F5F(+WSv}K{buToV#A_ScBT4h+b2{a=vK`p7wDyGmFPh%h>5Y#*@dd^I`}6Y_o&p~Yd+q1@Ub{8R$SawJZR8V$ZwSK|;ZQJctn>AvOV zwDK;!e){upypRzxglof7@7KMKo9{U3#z;dAC+w7Kc*sU8OdwfEFbGTnvuNz}t_xlI z3RhxuLf6Wjr&YnSc2FZ(&N_OUkXB2+O7~Wqsir$_99CdBz+KXjP@m>~s9?i0(Mol> zWQ(pxnsaw_N0OP6X&x)A-A zfhz_JvmCr@9rlUGoVEF`ux>+Ze||_paa*s4SuuzAQ8liJ?(J#y+#b*%b`*#)8=aq0 zc0b@GVV5;hU?7T4Rbws@p?tY)aa}hi(!?Zn+2eLhkUVw!`g`ZDirJJL!$;v;bJk=W zwDwp30E5dWeg19|S9JI1dl+ZISELE$zzun|j-~y**r->Atc+t{{z%l~?g`;d-|dIC z@OxgiiPUMoaf(#~H@sgtSHPM7W=NX-Yvtbo`eq+)_H_BIZy{I6V)ekDezWq^k1|}Y znZf&o_BK!U&Mu*QQxmq3a=q%QC-;8SvjsuEsk`nbyhwDI4(JyBS^yJ(dFEV%#ZEYM!Dkr>wrI>v<|(ZoIeU!!3VKt9lzx z11j~8jn`V6eHN#!zDM0H?>PV+abo}G?}dvF^f2DPM?lgAa$MZ-{hS|Y;qs}9V%a0H z;(%S&M7nOET6*$yqZ8O7gPX z($8^6VjGQa{?xGb1cdtA(A21O5KRuFEw}#)fK;g$4RuE~zhE)v@Vzge zUN4V*G_p=dZh-3`%jKTJ_51Bqhn}aFqz|sACJ2@K^oWO)=nn>Qb*scBHJ>Z-5lnba zGl)!$f6t;ns%_3~*Z3BFvb-~3zQ6tm# z7sAg%iBCypI?_`Z3HAFijx#E0QgV!MZ*RG9i-e=@_RHK(fH0{3*O*WEYI;Q`Rc_S5 z@U8rrKV^Br7JjWVm)Gju8}&7ATsI%oLEZl=;avOd`F%?nsO<&sJ<6W2yo>?~Qslfe z%X^JiXo&?#e<0`SP}kgkBKdGTl)OaZ8Wq)Bsa1a}@X3;3?OoCt3xT+T55+usk}AEW z){P>$+;h%xkeSK_{33dj{2{U6X5y;qESR@n)!&lhV%0upXNV>q1IX`v!IV8c#Is!` zg(@z*zO%zdKb0p%Rk2iYIAB9p%4q^lD5N~dn0VLrdYE!+_Xlosi}l2f1#pEj5zrli z@spn!D)RxI;iG?zFG`+Kmw$wPtnxam_8&$hUS5#Sjevf_=Jc4n#58cEvKLrj;o3%A z{k5Di*_(Vkb|#v3Y^?Hf?}xl*j}Gl>76-f7xC{4EC-C--tJUa~y*z?MM!C=WiEdFHi?+cf_@ zI~0W8h5f+e9Jvk|;>i9yHjYKAqjIH)m4PES-~T02BBp zJm!BtOdyQ=GT3@xC;(hK$g}3>1CXx;jrB_?zbxcKh+CWB0GQ=0 z=DzOAx%X?^Mm!ui6rg6jW{N2XlCtR6DYCOXB@+eSOUu=YlGm~pMOr9_}K?M zr*oEMN^QFH`TzlNy|LY~ce2y6XmIUd*h@L_9MD{&|EqW4>!J&c#TOk>@d91toi=qt zjo#si%%b+=f8h@=O!P3uDSDcidiL&rvxaQ@>5_Qb9qIF3%)eC;bz9(<2*ap|^08zG z-E{~ZFjzl8vs8e>`9;m2ZHSrL^BW=}n0ts52o=l1)N3ALzzlr*M!Ke}!_cwG{JCl0 zHohmwqNz*=ki{X`0ib(7qgL0AJNO85I+oS~I&iw-%a}Q`)toU!qXwr`46_>5*Ywwz zzg&VDhEfKb6+8IRweB3PIk*VF$5Uo64qjsEeEe&4P4rehk-`b{*?M|O)oD|*wwXamfzj0D z3gfB5N5ItJ=^Huqa6wsoC6fZXb!5!$*wcSL3b>dgQI_aVvbUrZVS$|~inF-)L|LZz z8Z~NiWs%I}Pl#6_qvxY?h`i3bm85-sZMR}_`8K;%;O!nl4E;5gbyh;8NYRe@hVlww zE~T?FO;Ga^7;N+a+~~r)cQT)+6&O0mZv|ig(Zk2iE0<`dW397NuLWfxQ$~D7p;6YG z%~W_|FNztZ$Fys?7Y0@(A;m+iQ{x$%1;-?wd(*U))RW0`6E)@=olWCcS+Gr(k2Dt? zINkE&X0XX^_~1JeyS0tD^gE+_j;b&XTLU@l*DR#}5!UqEGS#R)Mx~*xZqEVr@OPj{ zGkoJ(Qy#noLkl%1wU&CHtk^H&i#-rrZrl6;dj>1q+r>bS*W6$(3%m}1{CuB z1z$)KM`Fg4`z>(XoOw$mD(IEWbq0MCg_*wuycVKYCBrlkb$9yPTs)_j57w+o8` zupxcvUT`S9`5*)c-x*4hEozeNr1O7Do>fdEwD~*PrSs+kG~~pcK}Xbr zbj}b2|C(1=?`$alxk=&U{I=SmtIU%a{}C#Keax#FPerV|PWo%ZaN`;gIi0nPPaMjn zQeFl*I3VgHUMX`y-+C&{Jt(Uy#@^e%o-I~TdRK>uTyeyO)v5U#3K8G|i&`S7po&w! zS1+f#^-gi@OtZG-&Y-Kd-0u^Og&vz8#5@f*1QWjMwz~HW7$gyxaS$@eyjk$>DepPL zJMY~1pW+SFdXlp;C*CME=fb{!JK$??@4=z8^-}qWi#6#e^j~sKHP(Siu21^g#Nv*f zPTkhab$Q5B+Ba8zhZVYeSYC|BIo{Wqey(@3=Y>^hRMN4>SJrTeEyY_8baAOQ{bB4c zKX=-$K$|0OKBTQS@fZWEg-iY+Do2q+yMFH><$PjYB;hP{&Jz=f*n0RwSV*k~$O1p% zwmZ(ZqumxCIrm=dtAUCx9?FKet&3xIE-O}{>s9o%p~-N?ufDDTVa9+Tnd5G%>&rIg z@4^x}giaA3{Uv!F#0IF9QxQXwXbCCQ1FD9D+b26p5JJ=UJzEEqExAe zxtv&_$|Dx8it$c1x;*PMYVo~6iVzV7^DokWO$cF+4>Xx!1^EJpwrjil1=cg{LK~|n z<9~R*+W*kg65TD4gkXnJqcJCGuj-CvluADpVaPhEsMpMRBz_QyEb4MbuG+~g z4?hD*Kas|~V7+2!Cy7{DVpS01W|7?dP5;G5vz;Iwqs~|_-f-%twNG>)8KhK_DL>q@ zwU_uN4sP@_HN<96GgQ!02He7i;iUtsSdcwy>+c>lrfZK??eEhi3TKTSSTVgzkwJvT z{R_@6r((+8r6BdtAna4QzO5OFD>8TB{XXhCcsb(S_obG97x?~v7P#2Gy1SJHt-QLG zi*wkbH73&Y#`?QOQ}n;(*R6RFS;W3zK}6^Ihe|*DerK7Ozo0VQo+?k61>qsFX6pO* zQcfdNG0sw{o}ET-ezeJi1Z}xpjyq^Q>|;M~&qKFN)c-G(cIx`Y9dlMFE?lu>`QggP z+fw{WL7fu)!$AT>vpkS$@$g%s3eQ>THOQ-;y!1xL8f)Wq zW42**0d!LcE|FIqOHg#qj-&eZuosK(=B1rp?{>!Y!Ik(YDD+ zR`JVxd+31JNcqzsRHjRH7|Z!p+$b9!puF}s{$LL!MsX~lCx4dw`4 z%M%uq(j67#Ei|H2txJ;{OYQ$y^@`bERlB&O_9^`aCS4)FO9hb<&5|Gnh$E0{MJ^QA z3u9TNz%n{?XR;87uQy-~<|t-aw>}~6)d;1-gomay-eHQ?{2#q*N2~GD4@;!S+us=b z%O}EYMvQES=!D8v6LDIw?n`(a71eKubXK>1t)Zb2vW5kMuC0<;4lo-8@w%^cotSpa zWqI6QDMx!eUbsvl{YbIk{jFP;?sYM+){Qan62e7-j`1_Da)_2`_5v8y>SJH5ze83jgUOXuID63Gd9OGI(__B{DP@yHYNN z#Ojczy041wjj6d{~3$@mj*_-$d$jEc6`bMGUJe8 z5<|aWhZWf5o0zX)&8U*8EYpEn7Y!l`Am-Pl^SIZs0L@cmLM^v20X##jZvEfiU7F$x zaBW-X27D`PV8zYounTIHiTHb0(kE6}J5=-%+SV;5q!v*&bvCVXBxvhDK63R+C~zPN z2B6hB1ABI?O!`j9AdrZ0uH?w-PR0vDD6dd76M;0HGZruj%Ok30oA{uZPbs{{yT<~* z`eG`9%%r)hN~j6mmys{R*K5BUGG=d6z*n1`?O{KmmJoTUz$|q7dK9KzEA5_Z}|kPg2*uAd}yz%~^pa~re#%eXT98td;|T4K-n#~ME( zEjtC!qK;+CJ~GJ4zNN-`LXuL5ZZK0@FF<|SXVZxT($iR;LlW$!ERcvxA=c^hvzV`x zmfQiWov{YxS&)%#Q2(lSXHhUB=7a zWL(0xXQOUqVqsB%#>{{;ktr+Rb-kT47t?ET5=6Kf%Pmy{UOxl3N1yr&cMyUb2ogOV zG?^@yLaK-b`bsN znFhJKZapa!Re^sy5!&8ruQ@#Nmz0J%Er9VmiPgY--i7Ix?!4+y0-SP?iS{Si<>#=k zd4SQ}bhdc1l6L`{7&mbPp{Lv9Y>D6MyLMwM)aPUqfNht7vPqw)o}j+{JwjjgRP2>) z;h(Tkc}Hs|#^p;pM+`1#yaNFV1TUtd_0{{e&!Hdenf!)_l_l9Ce#vezzGNt<;ZF`@ zOQ&M16YE{6V$dWvu(bFN}hj|E*#d7Il6EgJ> zp>eCeZsksx_|UjQakc#F!d*cWsa4+G^Qf=0r4`dmTC-D?*MTh{;Ni<4@sHRr7SL5v zonEtd?jw;h7{0LsRLVR-Gemgi@{4YO6bVX(*8^2}85T6|{TocpQwbyj?zG#PJYkD5+P0{|j?a-+#A`aR*EezVp`geH^qBCxI zt1DETmDA2KdvW?!@Cu1Hv0JjtFrmnx&fm%a0BAk*76(c4AyFilqWlfx$xiF0M!~9- zR}WL$dZ{D9nj2O5Qzu-+^*w=@0bUSC*FElbr)nPWq*Ee zccIVTmW9Q4!(}KgYO7#f0^JB0%28045)k=S{`q9Ub=sd>NX>~=EMyUWdS$9YaPax9 zV-Z`RiSeE}lS<))243r#u0*kx?uGKsmGbP1TzpKvzOEyUh8n zWqdm=v%T_rg{GaZDH#REb-xZW-IY39{=km8R!QX#Y9ag^4R73I+mAdwXl$gEL~&Ir zXku{JQZrJM@yGf>q3TC?M&Ax?+1l2y#AG7_->tG!3I%TpqeGzY(!|?$XZx}Y+JaL- z?w&_$Lv)*$otm8*&z9@-edwS++I{PT#iZnL!#>Q}g=VLU2dcQ;glO8-YK0Axv}T3p zG2L-SPAT<^xcZt(6MgzzKQExs~RAo6l5o_EDJ;6=r1Rxp0JKk97ME#<3 zL$Xrz`zpC*nXt3KvVKl?nAAVm5BVoG);B<*dA03)T!ZN~Xw*Cpwz|3fg!)FnrDa{2 z0tZ|-Jl{l99QMK0ksvy~+#}Pj@Mf7`jO#ahFa3uGw7D6}BVGZ(CvjpHpE>ChLiQen zt`LS?&5r4UUed^=nCV#GVX>^_=EHVBb+P@R4A&2<*kx8dxGm8`M<>I5^4Q^v-?`I& zcp`{kvZihFtG@rp=pu`d$x-fSMV0;f$A)|?d33vXOdOMx4KG`^H|8l&5C)Zt>!G+J z@l}Y?+}Rwbak0bZrKDJi=K*|`1XX|j*M8#0-Fv!C|6-L{18O8pW*|y^W2+>q|36`S zb|ys+qQ$Kjwa)x>tv_(ud-8j4`oT|GbHWeY#FxmzWZ5u%rF8R7xIIyiDFY-OMOxOw zcao@JpkktP#{ccnc_8t07NQaLD^KDki(S!XphTAcm0xA|hSp`BpY?I9rB$xWowrCL zx*^W(jE|wv6OmQ0!jYL6Aga@B;6kPBiJ&PP_=pC)#t(TG#0M>daBKt7*b{v~wa6gE zJvMa|$7P{{G>bH&`t5fXk&F)uLr5C9oR;o%a4Txb2ed(y-leveYE$uLpa25WA+|o8 zCLDhuZOhoYaN==<)Zy0p*vgr`r}I`D&T@7up4GPa2OTl2pq_q+Pk#u!<#&7&+y$_3 zJebp2_F;;|d=Cp4opr}DI@qD}01DxEeqoj;!_4|bvawuoocGJ@9&PleIDBLiKaDt6 zW1{ql*BIshP}gpFzeIF35DRD$7L=WRkM_2SpaEQB#2dHUVM5kUm6$^ig&V+FkgW4u zQ03YWb)Z835e4#mtJc2_9#HZ(!ryB6^TNhXM~DUzoQg+U6VJL6j|lOKpbGa9AOCyP zWuYI=F)FWcG9m_`{|tl0ByCZka+!rlYo{6T{qR>b{GKrsAEMUz-Hy>l$U?dS# za^=OiN|(Sa9lpXpR+JAJ@GXjYzFa$d+xnvcz?Cl$MT|cC+9>DQgf^zI3_SssPDRpq zoZ0T~+M0eH%hDw@33oYe0h7m^tObIpMCnssrj?#Gq6Dr1Vu*q5InOmdtlM_EwiD*q z`wsCWWefBv#Z1i4lEE3UvnL=GuhOB4260;B?UFv1^|59`V@vdKVF=Y1vVw zzi5-{uUu^>EJq@mgKyFqOC9z_k~v3S6h?&bXpUcYau*;0V+C>5@S#9*ND^;}s}Q0T$lxw2${x&5e4V}S8q3`u zR7@-5Ok+T@7r(t5Et82(_bS>&O6t@|x|{Ive3Z-%s#K62(J*kUQK`v_tM~ z-E%O95jqDOt7eTA(;F!I)T*c}tGcf~yZtrfEZ5JE=&8V65*xRA?W&l^v2s;SzrRY` zZAh|ZR?eFEO6A-ITo3Txt@WwfuJavpdelEYJvw`L@V3;I2LBn>$r3zWD?{3$V&?@5 zZ^n*1jvqvd<*oa#Zs1}Drv9D!au3iZ9o_fuA&+%L<^pbknioA=(zoc1tk;J<2{L^h zWOX?IK&0+qq0`P(X^d|74 zWQZ1gYX1q}o%QQ3(+K2LLb6Fl6v7XA3-sO$_fcikJZ%27*b%h|AhC8=@HE)wF1qDT>#)h+0z> zl#Vu!P#U3Z1-po41@4z$BX!BFVw{!2B>$7-Q8rwfDYfI>Q!t<5JbZ_TIY-UPtJ*Kg;Bge((ql(-zOGVX99=ImH1uAlSe|KKujQ@>2BO~VRJdALSih? zb^G&~C!V(VzcyiUFw+=5kkKMKt-A`)XfZnk^a`LK$a=hA_@)bNwX;9{q+vi#mOLLP= z-ZTq*GZXZa7}HLcL+^~IxN_g-Q@eWdH0<{6eUWF^YafyBqQCi@Hqv%7GOjD3Y6sif z2G=Es8hHVRJwGWE2a+|rdDVjTX})8|2M`BSznsxJn3FBW4im@^?7QvDoey=$98;Xf z%H)U{;{s2Jo1Z)%om{A6doO!)L0WBPSdi}C)29wB8yjj(A<96bwBvj`E9tFoM>Gld zA!FD-&)vfG(?(7$-JdXugicgeQApe@G<(0@^}5gKu!=)GbO-?V`+fT-9pv?{F(n1T zA~fKot&!|uzAM(uQF;ZJtAMB@OvNtX}J=*2ud~VGO#wogg zN4jKbLFD?b4vFgOa;%*4X^ENan7;p-jruKgWU5DWjZQ6KTmyTQ&@x6gR&n zcFxgpW#`P`n!2Zcq3lZn{Ji6XzUn5)G_y>4^`kY}v(MAg@&{16Tl=4_eUSTS_Zkt$ zJa<72k#85ZbjSXRY<4#*6))l_N=aInB2td=m8Iitk{#Yb|GNKZJ+*&~k6VRWz3JPq%qd!`t z5x9u&v8}3`=(~BV6DyRy!~w=|Q0}Uq2cO6iE3aH6;gW_?jeC%Hx$6(l_lKFvtYi=Z zRFTn?yGktaGaEl@Gs0aO6s9~96N1fxuBOVl@z=YLFm@m3>1j8AGd<%!+6c-30U>qg zOpdP9ZVpOIYc4(Dv?F8yi66?NzmJ59sbA{&paKyDw97Gh83^PM9Bm)U*n64W5z^Tp zF%4mEsZ;SuHfyXeDnRsG<&EX#gfUB7+ktw-tZy(Z3zvw?54_o`lbV zFz+jT=tY;7gem@h2-jK=WO|Pm=HRd6Q-YT84|8EC5Bw<1pe*dtlu;6FYny{ufBxT}9#Q=3{?&1h;$#Pi&3!Cwb-&-%x znw?1pAMb}^s`wrelA)`XdLqjpGHEWdG`MaSKcaEm$w^kQtg!T(NAqGHu_)3HL0^#r zY27Eyn~;H#7(N>_Q^Z?P?R7J=`&&9+bZ*ZYfp+n4YPHeG$e&de{NeOacesGQGqtLF zX8fVt3`bG;?b)Z~=<7SP2!^!%mnBY1TJhktjE^4^4pz0{aEc_;_<@M44Ie9J=i_l~ zg5$J#KZ&nLs!2V4;X<~^I?qO z5_?4+pIg1tTREKn!a_BJCf#FJA#u|&9?^rgH|*sbo&n3mDG+^=AT!k_3T8J=_}*mZ zyg4jV`Zs7f%vfl@&iX-gycs3Y(HmQR&>~IU>zWE>rbUaB&*o8F@4}(Nf1dA`}tPHZhC@fk=}=# zYa6!xcBx)Ci*njYq6{eCnUFVaXE6@qY=`__l`w<3XefZgC$Lh9P-MiMBZx@Gp@6Rt zkXfq6GZ%v!aa^wjMn&_9$Y3hh9N62k{526tqEz6exa7uI43oL{Q@iwJj~+r(AoPaT zIsD#x`14qY;B@;zaugxKzStVxeyh9afa>x^X{2{L%;RAK)Bo>a_AhQ2e(@g6o3Yej zt`l3?AA}f?<7&YA>*r!}5ynG;(*85iM(jPb5>Ts~Ywkhng4WWPUP8B`~%DI1f+<=orEDRjN5izQjkZ-nY{C0S*v| z{kBoqSqUy@Hj%X+v;$XeOc&FRJ}bfzGpI82K{&dn>yIX%)mSeei8E!s#brVsa}$6_ zJ>4tTP3w08E=4C0-Ualc*?!ud|dc3tC1)?=bP=r9ggGjp@)L>?tI@!!QsMHFb-w z80P?pEup@mIHYI3=Fhj%!(~~k9nZ5;maFbo?g~Euf1tI8O}G05CV2Yb{&kBrsJyQY z>d&Erz#cmi6Uah^OJ1iCeQC9r`Z=#d{)&fOA6YkvA9GP*ovZ2MdjqtM|jak~E3bALKHy2aT)x9iG zy>-I@EE_Rw5+5@r!>|MVlp=_P`oSB1%4$2;l&=3watT`7$0O#KL&Tka zwclwjh`XJ&zX!%>Uos`Sui4Qigjc;=DHHFMi=Jsnam#u_|lFG=@2 zQwe`S2w0>*7D?Zq_sON`hwQ<|03=3YgM9*QTEl!n(2X6nKN2tag>0^Xk<52E_o8Zj z(2%TZpIC}Ai{B%BFdY$=Koz&!aK)tSRg$YT(wL?Vn;wCp3#%nwwI5*vFj46>=`$C~ z^$C@zgE!>X-%RVzMK-7HcJ|e?5hWdLhdMXgkf?39dCYVdO=gkNhO;T}XI&-H-dKWeq5XM_*2==0qhFGE^#JpGHxstQHH76jwmz&_O&;Mh zYAHpa1`r~iRV21@AuCEJZqcXb%lS#_9@+^%$Lxs%OnCLv?M05C&h(#z@M(qh3$!45 zw(FI3r`-+eR%CX!WJH1h=qNbm!1%w4M!GGE?AXn98y;X{A(5JNIS_FWcM#YO$~&Fl zivmp{mZ?v#Y~_CHwfjV3xZ3PI4y~@U$oq0j_=-_YC{+Q)*2f>|o+&QmD#>4!I|>qs zo(`RLXCX@4Va$O_asX_>X19-A9bwA!E00T+18z+G;5Ji7DH6-H(q)ady4t!~iqCd4 z)Za>xq|f`JsEHRw%A0Ec;MQ-d6(In( zTAp58e(GMX2X>>+8iR!uSg-ERN*P|=y=CHK{Pef47PCVIyXKDqA*#f^eCM97=Flx5 zr$0EZC^>U7?_sl3b{C0Oy>}Ta_|aF*2aD@RANnrT2qL6DP+|* zbpup;k|sqRtNQNQGHth_ux#Oerj*H5vn0C^Dt+gH^he5aZ?iAL$DSL~`jZa$oJK33(CVWHqGTl9vqlS-MCkj~`#)-{doeeF)DkdFVn)!@}- zP=uFp>5fby`Cj{(gT$=Be(cD@XEm3N_(}VMUcUj4q8RO!H#y^t*61NUE|q`}@!!D| zoVBw3P_oWv(b2A-UT>}BDpKl^5Pq%ZkVWJ)18R_5lx|_oBsP*~wX5A}yP(-o+&vD8 z?42CBk&|0Xy`_6=G8;O*tgT08pVJa|1g2W|~NC#(4>MsF>W zPY7{wyRMG${|PO3qfq3~uu!!9Ci~8$xb7vV2WkGz&R2lz8sQ*)OI4QuBP2)S4SekrtaHKcEaTX;3XN=Xind0e75=CWK5`5{D)t4H3Hj%F5Zy7w>iml_%IL$mPkE^d?oan9V;5*kL4Ey z62$<|3lCLq!j3VP$@nsm_FaGdN4ALJss~}gMC%U1YeBGse@e*8VhX+y00|vQ`u%0@ zp2uG(PL5t8RCAZ)`P+{l2UcDZw!Zm3W9GTd_eTyjH;<&6kict0<0R8&OSO}3$)ZjS zBuESDI>J@Y9U2<2bj?V>U$BB!J- zi4oM5&C@F1V<=#4Z(_*8tJNzyEXt|-k_Cq6^QxY*9YcqJu7-@u395RDZn~|T=u|U) zJleq1od>VZJ>cxP>G|iT6OOKCo=+5r0TW(2HC)IedSqT%dQ<$$(Fe*pJ6uK2tj8_rA|B(`Gv{+o z*O|XO=}TRQBiqhUSMA%p^toW_xV=2QTIuF4K1g6$;9O>L`z_>dAG$%6Wt1M!z}AgK zE0RwCEH_LeO7f4$&COFl#_NUaEz@ThgRAinul>3Pt|73NG%TZOA1($b)I~$_yAmfYJXiY;35ZEKzAa_ypHK;w-^3^b&%fT zP^{g`6*vK;k0KbLcC4`r7xGHzFO6i=Xl!!M8<&pFfcQ&pO5=mh-mfj`LwMmXYRJj+>9jvKd*CL zE{&4sKICMNWeYz4RL2Gjc8O%8mKxuQhSIhhlLilMU8SQhMLD!&eOJUQ1Ap&9Joka^ z-iV^6gm{RrOw^4*e4hBtWIz=j4f$7 z(ivHOuH}){jqmGme#m%G<_>%p5_O2obmb-CW{1vC0!RKFf)w@p)LkMpb492K&X$=C zU%?~1ewxo^4&w`MUmk$2WvEIB~Jvc;76ms1HAI5N>_1+A6zQ!_}!nfhn^ck1R3t zh4jDC@XaAN7n_g7UPdz~nxVckM108~+RJK{Oz*HN+n7rn$*{Qm64yIe`jJWPq)Aw} zkkj=Y30Aquk+A{}KY}W?ak9xmmIO;5iL76^`L@~@GS^;X|J}P3yUEbH$kf4Z8S9q}3E!2lE%nJI!g#{x2}~_PxR)YZ;S1z4WgUR3BQ?ReVa=Umdf41r9GLTlvY>ugT9uoAon=DqYQ zyQ4-fK`t-2?}%b_?#A4mTS`fq*LZFUQpFm@soPAxV01PJV0zE*`fMRBBNNT{3Oq() z#9zKOjlb%_Mo*1mP048+qa2aW6q2i1-}gOg$ACmLK=#=Lt6F zKnGD3Mhr7ZpesAr5wsfN?kEb~NDgzyHv;7HIZ$@VQPfLNrbS-E%6x)35bY{FrLm#NOD)db-HrHyno?;sGzsdf8@1rEd-i}1ek@v zYir@CU3I?H<|+*P6r~pY+ig7cI{QH{d#O^v!rZ~8BE)%B@&-M@-laZFj)FiumPH8U zE=%=ADRZci`G=|nn)5nhI%oWS68>&H4%OR4vGj^Eo)nS*L2IO*c8X-LSaK~rXdfu? zM9YEWwLh_M_ba6VDkn-0rx{rD$$>d&wXZ!s)g6a52M9gqrmy~+W4lij@2-rRCCmlU ztY*>^i6DaA<*dv$n>G4WE(VfL@a7N2RewZF78>^dmSY$5o?`ft4B6LjsZ0N$>L-Az zlcT2gr|HQya5x?{bE0Yll zJLg*n2!LxKH0biHiF0Jg7cbqn&QWio_Ccb-vr|TTrkQ;*M$#IsA*Wu~{sYnP+B&|j zXr?rGj*&Q&`V-_6}7C8Xh}Dz6FM;wGI>P&}gSH{o3EFC6dVTHB^qjLl9TUb(ZY}NnD%LYV(Eux}x~+z_l#ukFI}?anuMA zqH1UB4@XK-U~9AG*X;vd9qo3;LN}=M62>!ZN#FTPeGFoRhTr}x*X{73hIHV92+g7s z&1Aizyixs5v^3?vc(ENW05T;Z^QCv$+BFfd(adu_qPa4oa_^DBJw@=RuGpseZ3CKk z;cfOCi7ZU8L*ZC}drF*GD54)1p89H;oG|mo*g^gL_4Iil&CY0QX!Ee{%sg)- zX&aB1%JW4X*F~P82s}5EtcA8!(zY&(c-^G{V*``6ROvXp%UhWx*z{~=!~cfCac%eK z2^%u#W2XLlxSouw?dpHz|5U%li~z)=-To^3FNR2XK;=$O9z?^|*c0M$*{$v-ut5Tz zVPddg9j2Qb!oHlxy24?RY)FQqM9b}f?hrp=Si9pBg{B9r9xC-{M|(9wUjC!YKbDy@ zCs9Yh_Bpj}n@8_!&4M?Ho$m@8`mqje z9po$uER-^5)^@*2n)0Z}y$A!4fd1ezhedl-0xso}%{ZDe;>kzGx}w|I@3^12L+!&` zfAn{o6f?%c)T^GJC+(9cRUX^EP%ay6horHTY>v}7VF!QnI|jx!Lg`S^5;Qr@$1zA z$TLrWqh=-)7M2q8I$AM$se~dIb(P`-rw&J3axMr|k**Iv?BPx){JM0y9y3Z7xi*qB zvkN(@2KFn~t-cS0LlZ1A$q2xp9)q)ErX7AzN1e-iCDkIQ9rV$?5RM$Kf@t>6Ri+Aj z7U~j&nnxl2)O)GBHw8ukHK`E0u*?a8d4S{t2?ihf^3d6Ff2o@P!pXkc{+S0(9V3T+ zq`U)pL4^OLDl$0=pH-_1w>8M7F6(@JBI89GVcV@|KS^7(dr9Ta(V_&`e-356#OKkc z8}XmVAZ8@ydbCRNQopi^Jyd7b~0-?7_!7ye>qp88X3%=|0ks< z{N49Qp(ZOwqVO}96AQs*OWBackKHB~6T+f`PDO(-LH8P_VyT@qRh_p&soBAm{(-74 z^Y$jtC<6gJigF*=TaTN#_o6^Bg3gD*!8|6LU@AC>Ay;Z@gnN=3$(GD*qXQ@(#3Ep9 zblm&rE4(r2`I`sVl{OgIG0hL;b)mTn$qA*G$H$wy{tdEI3XDNnSR9VPTTBjrmeM=j z-Czfk&Vj$=furz3xt(ma7Pj1{9pM8)5N15pR`zpo=wdgu(j%*qqVigo2iFC-7;6+y zkaHnPT%7HcII2ut>WJ4);W)XSc19&tO6RpKo8alO+mIGo--BNi4eeCMjeFuKx`D7- zXdGZW>QB3$-O1fr3W^kOIf~_@#gRN7&uj*!*7@v{xLoep(Pa@%ftx~IZX0qZmH`%5 zn?!a7FO~>Mj9diA4j06gcb)6@s~oXGK$}7JP?TiUUue6k`9guWGXJh_@u@QEOoU1l z_bjpn-|H1$1cytaF_=l~*Yx&y@YCt76B?6A<>Mq(1jxh@H6$1#7PmWYqVl>8Gi|tv z|3=|nuUwE04dk%TfMwf^VmBNOJFD2z(n*s)@TZto)n;D>M&b;o&#=Y8b0z!qktc$a zLNyfC?G6%E7~WvIvxQ6FPQe6`S-LP+L&<2v=WK8&t`FDt>|?(~n|L@=?J43kdEe{p z%kLwZsUGl)?KZ6w%IMfs-Jjy*)ZOcrDn&CuNn^P0_5P^hpRxWOHCF#eYgZmm<+k^? za>^w`lMJDgbD9a2%rcbIL83xs%xj(!!ZvjhIb}$7+sc?uMZ}RQlA!@bhT7Vh6CvA} zW5}@G@3Qr|=Y8M%=k=Elo4uZAuV<~_`u@JdiIPk|yN;71vbD#`3Zzq1I&b=Gb5gW5>3F-0wkebo-;aK#+&rY7zt`Bg|ZFipR z&1yej^OlBc!J?}&)!=2UVYTqM-@UJ@cg#)9byU>@^72-C49>mD%9<^aY&LQ+X~}OM zjidyt%Zlb~bB%LkW(@LMGM`tPp8sk-cTIU!XYt3t66L_z_A$sh2({$fO{EG4ry_w&!QKd_##e!9F#D}da=vyX(j6)K|UUkwf zej5omM79$lR|B!Ma%FMDzvS~Izkwv1O3n)y`Sk9_XPpk$O9g;i3NEIrEW+epX+!@% zk-~9WM+e-n4)n2)c!_7Mk+-DNkVl?PQz8 zztsr;Sz9XXZ;=q_p6nSMLwSmZKpC2q$aE>I&TZRJZYsoZk_XB6W7 zipm}rPJI(WEdQ7Biiab>VM(I;dr~d{9o2|z+1DZ*R1tH7s)vep&?``uq;-xNn*O;tlV4w?Sdq@(GRYm1PVoU=5fp^&7AC%^89G$hmGp0P1~EHJ;lUJI8le{we|)diQXh+_1$hS?9`v-&wp1JAl3ER^9A>hTc%JRCk_uvW3~LDki_Ii+bG{xBQ(-zVb^OfL+@Bl!Yt%j z);Vs4&HVj7w7$I_a|!Z}nM3w>I<`}}**nVKa5%ET>0X5Os1WUQlJCIk-0(FZzc8~V zoI9*7NcoJ3#XruI0}Z%ovsy+fhjiO&WtRU_L3?8o;J8Wl?Of~e)%!)pQA(D#q8x028JK}(3fPg49Z`zRm z3c$)pm$C~|9XT-}NMjUEch}AK!UZR@y`02}x`dz^fDu#Y8aOG7!5d%sr!PwQ9cGm_ zWuLu)@)D2%v1M5tBI&6nB{*18DBtjuhHb# z$MVjwU4pmQFIDNK1C^3np~*=t{$vKeHC?rq(t%(B^&Sfq+1MSRp*7n#i z=SFiTD>iUlG7sbDV8-%?`$}ixq<32%o+>+65uPnjTbdRHq?j$tbUlvK$CMR(>cOix7;jv z|2A>iiOFH*4_yqEDdBaj)WHlR+N7~M=lMk&=wHAya#V^(Akt@>m)JK0X3P`fZ@*pp ziC@!KywD<7L0|2>lRQsd5^`aRmpuA6xPL*gPO|O5t`z_C)5~we7#ok6Xpf$8S|u@v z!i&9ZsiY_4{PiLYEw=TLt!0SX97x+15doNdq-Np_sPB53oU(39U3;x6Vw%Gia#?GZ z&LpU^+CL3X(dBtJZ4k+@s@-Zy_dF(g4jOZ9#6efHUO{0b%{=5Jc3x3_^iIziLlfL&1oAW< znzRM)Y0}^+Q5bxx9*jkstoO9lW8k8U7P!}%A0N8;hBg=cibpAX4+TTQW+$s099gRK_It|Rv zXkX~9iMcC(`=f1y!Lo~}XvS#;y1hw}I=dF;z@9pXjyATXosRa zBjrAu!dUN~!DkHHVlpvgu>oIDyz?)uWOlotsN7WL%V|x`tdjWMSS2tHYrurvk}q!Z zWOehcjXu(n@@WKRxh#~RY7&NdE$)?oC92kJM50m@kQb=SUtHd~GiHV`Q)Z|5;)mbBTR65U}uD)j^HkRd+nKP*py+$6Jj#GT#`-FFC#Sh{L8|Mts|iU z2jH|suA8Ah>sRdfR4Q?O9J-(+q)u#zFYlK!uZt5Nq?U(&3{@{Cxc6SNNOXgKMhp*;W8EBD)ChKXnj)+c4TBHp69BQoRUB3Rn~i5 z7zgj+>n_iTUzvC?I{tp*^W9O}NsPkU{@3H0aVeWO`-hsam_tXw1)Cg42IwgA1`t=S zjm|Mju?;QtXifknT5`+dOV%9WcGp>YXB-POUJ42o!?$FfV~pxGaqIrj6Xi-j1KAmI z)usTB`?5Nc z8l?E|Nl;tPRU@s|Sp6sXu%sk@c5n)n+BUMU{_p(!l%RV!(>a^WbL@{B3uJW~cs+#0 z0j%M9&`c6qCS7#--L*@7n!bwxE8QTcPA^3ox~x_?CfaL18FN`B6++@*~YLH7UXnHhuzEnHYJzBI&Z z7Zwu`pZ(gB_!*bku)kkh(EZbPY)93PHPlrYT;G`nDD0NUwT}dG$TZ{`v#t2L>rj1X z9h{C%ymg#ltY9WWi$o(Wxc;%6p=q9cb+leC5GY1w(9kQ_@*ux3JMqVhIV_<1(!oxZ zU3)+MZg83+N;CX3(Qz?hmiX0#Q-!~S-3x9vPa_t+P>EB{#uSSRwvP#E@{m**_UU-x z5C7(M6L<)x2~Z86e{nHh$A0O!E_(-w`EidGVI2d~0Axu|2_jTd7zi-hi&)&AMcJ4VE)8XFDa z?m{b}_&jl8l+##I;SL>MRSzC9U`XP{4`dloty7HKKcC1g2mQEfj(`3F>P2tag|Qo! zvM<_DeUsh2`5wl00qg88;>Cjx8n9wp-3h?Gh@9qRyBAsD z5it7w?>_a&E5usWc)Aqw{+FmRv7W$UAr#LT=7QeM5rKZE(q~)zuQDds_jG#ocaRc0 zmk^aH0FpKF;UX`kR3%xS$p5wW*3PP(5ZZF-9FZdIHTEH#^h28-6yUE?(d9%{M9A}( zcsd$?=-$h+-erLk8eUBNo(M!e@<(fSQ&1@(4hu;L(Mp7zvx~lsR{F|Tajc|CNquiBP? zWa&O}avVhV;<(LLa9YT5DQW+SL>Pg<+@bF^qD=7B#>L*-HWH=>I6BoycydvJYe5ie zUO03V&lyQ=189Id2G7L@fmyAl@@qFFO zs}lTZ1x4oe)vI!5qc5nGa|h`^Hxjv=CO?mE8 zw7lNav#~w8!%w5ND3qZ0&$|>SeeQeArg@$mJ2U7PeZrR)fYv_ ze?2rlF;{5HPZ^mhS;f@JSD;YN`9%cNqse=!kBDW9;|KFyYnQ$ia1)NIGo#?^;Qm9} JnVMGT{{=yo3~&Gd literal 0 HcmV?d00001 diff --git a/infrastructure/storage/chromadb/37fb3117-dc7d-45ff-9fb6-bb48a0497e05/length.bin b/infrastructure/storage/chromadb/37fb3117-dc7d-45ff-9fb6-bb48a0497e05/length.bin index 1548bb03774714eb88cca8941069a86328a8f523..6c7bf2de8e3c9cfbaaaabb6e7e569a469a1d67ed 100644 GIT binary patch literal 40000 zcmeI#%Zn6O6oBy>U1TMcAebn@Ho6Q+fi85^*qZo^amrCa+}MdBh$K>u#t5SHj3Nkv zoyhpeY8FZs5p3Vi_?U|C*)`}&!SsM}BSJ6y1GM|zBiu`!>ppzFX-;=d&&TiHs;*AE zvIW~GL{X(V{Z#uo9h%#DjwVK($Netz9B)6et^K~tS#5IXID1(1?yb(l9v&^|Ix#%D z>&ca)&$_1!j&wbaCiMK-{`>Rxz}fcr#wXi4ugfg|H(vdP)A7cBaDHdwK>KrK&bEX* z7AhTeuOppV+)+RHrPDEBJ6ipdIHI`U78JDPvF9XD)8{TH(?GN(Jj z9bYLO7rl;TR&hu2!tJQqj>ZAwA#?g`xMQ)>amnkbm5Mu}{=`|A5m}etjfc!>CEQU~ zIxc%1@n^*yl`*&DrR|8jOPxNMTfc=nmM9%pypCM)sFvnA9_MUFDP7okUFO=caK}=m zYzJDQu!{*XEOKHRZf=@{`kTGNX6 zN4amlvrj*=e-}?R-#;=Ze}p?$C>=Mvj*7A_(Nec#*m|Tp&Hj+N^?~d9N~Pnb*HKo^ zmw2t)aoBd0_nZA8bDV`eRw*5~ypCL1m&S9q9t?MUt8|Qd9kFu%kv^U2?9)-(k?%1cGB@^yJJu*2xz~{?-!Jj! zvz(4s(lKD}gJf=O4R?H}bo}FYDEl-TcRS>IF|^see$@KI9cz`2J6=a=dhzw6y0zQ! zIAJ|ncgF|Y_k}XY2f`gaO2=KVV@O$-@*KCLVLP&8y(ilpGS`0$cceYFKR8E?GDs$@t*N=5d$9=D(rF_3s_k8TE%Ng5Iev=(;d&ry(ggttdjt5>xX?F4c zXswy%bUd>i$mqY3HOIq>4v= z-0e7RJ?gpfkU9G)+_6#V7_%Mr=NwN6AbL8A)KL+l2}~biMsXW)0dWp(Zr)mP!-JFu z1uxQ~2SGzhjWOfeM0`x|Y_=TGg@6%*l;u)pdGV@Zm?*hh83ZS=;f7v&mAWH7Kc@Qko4NggzvD}xV~^8O`X#@kGIX)EFNbtT<&4@FGbjJ~ zJGu%Td!3GALw-lOzui&N9d#F#hneGN{*G=!$3CYc6*_{O3oVa>x+B=6Jj@(k@ON|< zI{tP#GGSi|wf6bAuRFqtld63m=a>GD9zw@{rz0rj?@MxVj^(jicO;E!y_wTH{*InP z$3IR-`fGkidBt3-8+QGZ#zU(6ic_IrFSbhJ1f+0Xg=67-sHc^uFk(JZyz%<)Zs$2UU9 zL8qfA+)taP+Z|o?^V0tts(ofIKKFNoLdPMeBNaNz=~Bz1MSCRM)cG)T`q1CeTj)6K zbVS1cxynPkV~y!(Q|II3eAnO6N9Z`>bfm-b&qv*`X3OJi9jWlX z4c|0c9XE7GFj?I{%p6_ucYH5&oN+p`k@+6MpEIlu^S!C*g4!1|w}0Y%{2+9kbvl~H z=69rfmRKEIv_~{Vtv7RW&F?W-=s2f4^w&8q2q1s}0tg_000IagfB*srAbw@6HEh5Sx-Up()3f>~%LH46CW1woX8XUWc4?=OA*+Q{c zC=`p2;rH~X_4?&(;VGny=WtmW9}jPZpe|j9{K`Q3@+05U6I(u|+c7NlE-!IQ`8}Gh zIdsiU9&NL2n+i^7aA63;7=ar)JQ&3o#^Hrv0tS5WV-f)bF@AKsJJ>}LdnjQa2l#)taEK$6QNb}zaEddW a;{unsLKWAjp^gS_aEm+KcWkqntp5S_d$HyK delta 400 zcmWm9rBXyu002q>7tt+dg-H|0R|ajm=Q)9W1I;l znPQq5W|?E21r}LinH5&~zh_uuoeehGVw)Xy*<+sr4mskO6HYnfoC_|wdW+N7U8jE; CPOtI+ diff --git a/openwebui/pipelines/.dockerignore b/openwebui/pipelines/.dockerignore new file mode 100644 index 0000000..a56310c --- /dev/null +++ b/openwebui/pipelines/.dockerignore @@ -0,0 +1,11 @@ +.venv +.env +.git +.gitignore +.github +Dockerfile +examples +docs +*.md +dev.sh +dev-docker.sh diff --git a/openwebui/pipelines/.github/FUNDING.yml b/openwebui/pipelines/.github/FUNDING.yml new file mode 100644 index 0000000..ef274fa --- /dev/null +++ b/openwebui/pipelines/.github/FUNDING.yml @@ -0,0 +1 @@ +github: tjbck diff --git a/openwebui/pipelines/.github/workflows/build-docker-image.yaml b/openwebui/pipelines/.github/workflows/build-docker-image.yaml new file mode 100644 index 0000000..7f2bd6e --- /dev/null +++ b/openwebui/pipelines/.github/workflows/build-docker-image.yaml @@ -0,0 +1,120 @@ +name: Build Docker Image + +on: + workflow_call: + inputs: + build_args: + required: false + default: "" + type: string + cache_id: + required: true + type: string + extract_flavor: + required: false + default: "" + type: string + image_name: + required: true + type: string + image_tag: + required: false + default: "" + type: string + registry: + required: false + default: ghcr.io + type: string + +env: + FULL_IMAGE_NAME: ${{ inputs.registry }}/${{ inputs.image_name }} + +jobs: + build-image: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + strategy: + fail-fast: false + matrix: + platform: + - linux/amd64 + - linux/arm64 + + steps: + - name: Prepare + run: | + platform=${{ matrix.platform }} + echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV + + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ${{ inputs.registry }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata for Docker images + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.FULL_IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=tag + type=sha,prefix=git- + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + ${{ inputs.image_tag }} + flavor: | + latest=${{ github.ref == 'refs/heads/main' }} + ${{ inputs.extract_flavor }} + + - name: Extract metadata for Docker cache + id: cache-meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.FULL_IMAGE_NAME }} + tags: | + type=ref,event=branch + flavor: | + prefix=cache-${{ inputs.cache_id }}-${{ matrix.platform }}- + + - name: Build Docker image + uses: docker/build-push-action@v5 + id: build + with: + context: . + push: true + platforms: ${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} + outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=${{ steps.cache-meta.outputs.tags }} + cache-to: type=registry,ref=${{ steps.cache-meta.outputs.tags }},mode=max + build-args: | + BUILD_HASH=${{ github.sha }} + ${{ inputs.build_args }} + + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ inputs.cache_id }}-${{ env.PLATFORM_PAIR }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 diff --git a/openwebui/pipelines/.github/workflows/docker-build.yaml b/openwebui/pipelines/.github/workflows/docker-build.yaml new file mode 100644 index 0000000..62398b4 --- /dev/null +++ b/openwebui/pipelines/.github/workflows/docker-build.yaml @@ -0,0 +1,60 @@ +name: Create and publish Docker images with specific build args + +on: + workflow_dispatch: + push: + branches: + - main + - dev + +jobs: + build-main-image: + uses: ./.github/workflows/build-docker-image.yaml + with: + image_name: ${{ github.repository }} + cache_id: main + + build-cuda-image: + uses: ./.github/workflows/build-docker-image.yaml + with: + image_name: ${{ github.repository }} + cache_id: cuda + image_tag: type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=cuda + extract_flavor: suffix=-cuda,onlatest=true + build_args: | + USE_CUDA=true + + build-minimum-image: + uses: ./.github/workflows/build-docker-image.yaml + with: + image_name: ${{ github.repository }} + cache_id: minimum + image_tag: type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=minimum + extract_flavor: suffix=-minimum,onlatest=true + build_args: | + MINIMUM_BUILD=true + + merge-main-images: + uses: ./.github/workflows/merge-docker-images.yaml + needs: [build-main-image] + with: + image_name: ${{ github.repository }} + cache_id: main + + merge-cuda-images: + uses: ./.github/workflows/merge-docker-images.yaml + needs: [build-cuda-image] + with: + image_name: ${{ github.repository }} + cache_id: cuda + extract_flavor: suffix=-cuda,onlatest=true + extract_tags: type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=cuda + + merge-minimum-images: + uses: ./.github/workflows/merge-docker-images.yaml + needs: [build-minimum-image] + with: + image_name: ${{ github.repository }} + cache_id: minimum + extract_flavor: suffix=-minimum,onlatest=true + extract_tags: type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=minimum diff --git a/openwebui/pipelines/.github/workflows/merge-docker-images.yaml b/openwebui/pipelines/.github/workflows/merge-docker-images.yaml new file mode 100644 index 0000000..512f42c --- /dev/null +++ b/openwebui/pipelines/.github/workflows/merge-docker-images.yaml @@ -0,0 +1,71 @@ +name: Merge Docker Images + +on: + workflow_call: + inputs: + cache_id: + required: true + type: string + extract_flavor: + required: false + default: "" + type: string + extract_tags: + required: false + default: "" + type: string + image_name: + required: true + type: string + registry: + required: false + default: ghcr.io + type: string + +env: + FULL_IMAGE_NAME: ${{ inputs.registry }}/${{ inputs.image_name }} + +jobs: + merge-images: + runs-on: ubuntu-latest + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + pattern: digests-${{ inputs.cache_id }}-* + path: /tmp/digests + merge-multiple: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ${{ inputs.registry }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata for Docker images + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.FULL_IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=tag + type=sha,prefix=git- + ${{ inputs.extract_tags }} + flavor: | + latest=${{ github.ref == 'refs/heads/main' }} + ${{ inputs.extract_flavor }} + + - name: Create manifest list and push + working-directory: /tmp/digests + run: | + docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ + $(printf '${{ env.FULL_IMAGE_NAME }}@sha256:%s ' *) + + - name: Inspect image + run: | + docker buildx imagetools inspect ${{ env.FULL_IMAGE_NAME }}:${{ steps.meta.outputs.version }} diff --git a/openwebui/pipelines/.gitignore b/openwebui/pipelines/.gitignore new file mode 100644 index 0000000..d454a74 --- /dev/null +++ b/openwebui/pipelines/.gitignore @@ -0,0 +1,12 @@ +__pycache__ +.env + +/litellm + + +pipelines/* +!pipelines/.gitignore +.DS_Store + +.venv +venv/ \ No newline at end of file diff --git a/openwebui/pipelines/.webui_secret_key b/openwebui/pipelines/.webui_secret_key new file mode 100644 index 0000000..1a0f016 --- /dev/null +++ b/openwebui/pipelines/.webui_secret_key @@ -0,0 +1 @@ +o1jkf6Q9i+/33ijt \ No newline at end of file diff --git a/openwebui/pipelines/CONTRIBUTING.md b/openwebui/pipelines/CONTRIBUTING.md new file mode 100644 index 0000000..3fd6f03 --- /dev/null +++ b/openwebui/pipelines/CONTRIBUTING.md @@ -0,0 +1,50 @@ +## Contributing to Pipelines + +🚀 **Welcome, Contributors!** 🚀 + +We are thrilled to have you join the Pipelines community! Your contributions are essential to making Pipelines a powerful and versatile framework for extending OpenAI-compatible applications' capabilities. This document provides guidelines to ensure your contributions are smooth and effective. + +### 📌 Key Points + +- **Scope of Pipelines:** Remember that Pipelines is a framework designed to enhance OpenAI interactions, specifically through a plugin-like approach. Focus your contributions on making Pipelines more robust, flexible, and user-friendly within this context. +- **Open WebUI Integration:** Pipelines is primarily designed to work with Open WebUI. While contributions that expand compatibility with other platforms are welcome, prioritize functionalities that seamlessly integrate with Open WebUI's ecosystem. + +### 🚨 Reporting Issues + +Encountered a bug or have an idea for improvement? We encourage you to report it! Here's how: + +1. **Check Existing Issues:** Browse the [Issues tab](https://github.com/open-webui/pipelines/issues) to see if the issue or suggestion has already been reported. +2. **Open a New Issue:** If it's a new issue, feel free to open one. Follow the issue template for clear and concise reporting. Provide detailed descriptions, steps to reproduce, expected outcomes, and actual results. This helps us understand and resolve the issue efficiently. + +### 🧭 Scope of Support + +- **Python Fundamentals:** Pipelines leverages Python. Basic Python knowledge is essential for contributing effectively. + +## 💡 Contributing + +Ready to make a difference? Here's how you can contribute to Pipelines: + +### 🛠 Pull Requests + +We encourage pull requests to improve Pipelines! Here's the process: + +1. **Discuss Your Idea:** If your contribution involves significant changes, discuss it in the [Issues tab](https://github.com/open-webui/pipelines/issues) first. This ensures your idea aligns with the project's vision. +2. **Coding Standards:** Follow the project's coding standards and write clear, descriptive commit messages. +3. **Update Documentation:** If your contribution impacts documentation, update it accordingly. +4. **Submit Your Pull Request:** Submit your pull request and provide a clear summary of your changes. + +### 📚 Documentation + +Help make Pipelines more accessible by: + +- **Writing Tutorials:** Create guides for setting up, using, and customizing Pipelines. +- **Improving Documentation:** Enhance existing documentation for clarity, completeness, and accuracy. +- **Adding Examples:** Contribute pipelines examples that showcase different functionalities and use cases. + +### 🤔 Questions & Feedback + +Got questions or feedback? Join our [Discord community](https://discord.gg/5rJgQTnV4s) or open an issue. We're here to help! + +## 🙏 Thank You! + +Your contributions are invaluable to Pipelines' success! We are excited to see what you bring to the project. Together, we can create a powerful and versatile framework for extending OpenAI capabilities. 🌟 \ No newline at end of file diff --git a/openwebui/pipelines/Dockerfile b/openwebui/pipelines/Dockerfile new file mode 100644 index 0000000..9ee4aee --- /dev/null +++ b/openwebui/pipelines/Dockerfile @@ -0,0 +1,77 @@ +FROM python:3.11-slim-bookworm AS base + +# Use args +ARG MINIMUM_BUILD +ARG USE_CUDA +ARG USE_CUDA_VER +ARG PIPELINES_URLS +ARG PIPELINES_REQUIREMENTS_PATH + +## Basis ## +ENV ENV=prod \ + PORT=9099 \ + # pass build args to the build + MINIMUM_BUILD=${MINIMUM_BUILD} \ + USE_CUDA_DOCKER=${USE_CUDA} \ + USE_CUDA_DOCKER_VER=${USE_CUDA_VER} + +# Install GCC and build tools. +# These are kept in the final image to enable installing packages on the fly. +RUN apt-get update && \ + apt-get install -y gcc build-essential curl git && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +# Install Python dependencies +COPY ./requirements.txt . +COPY ./requirements-minimum.txt . +RUN pip3 install uv +RUN if [ "$MINIMUM_BUILD" != "true" ]; then \ + if [ "$USE_CUDA_DOCKER" = "true" ]; then \ + pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir; \ + else \ + pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir; \ + fi \ + fi +RUN if [ "$MINIMUM_BUILD" = "true" ]; then \ + uv pip install --system -r requirements-minimum.txt --no-cache-dir; \ + else \ + uv pip install --system -r requirements.txt --no-cache-dir; \ + fi + + +# Layer on for other components +FROM base AS app + +ENV PIPELINES_URLS=${PIPELINES_URLS} \ + PIPELINES_REQUIREMENTS_PATH=${PIPELINES_REQUIREMENTS_PATH} + +# Copy the application code +COPY . . + +# Fix write permissions for OpenShift / non-root users +RUN set -eux; \ + for d in /app /root /.local /.cache; do \ + mkdir -p "$d"; \ + done; \ + chgrp -R 0 /app /root /.local /.cache || true; \ + chmod -R g+rwX /app /root /.local /.cache || true; \ + find /app -type d -exec chmod g+s {} + || true; \ + find /root -type d -exec chmod g+s {} + || true; \ + find /.local -type d -exec chmod g+s {} + || true; \ + find /.cache -type d -exec chmod g+s {} + || true + +# Run a docker command if either PIPELINES_URLS or PIPELINES_REQUIREMENTS_PATH is not empty +RUN if [ -n "$PIPELINES_URLS" ] || [ -n "$PIPELINES_REQUIREMENTS_PATH" ]; then \ + echo "Running docker command with PIPELINES_URLS or PIPELINES_REQUIREMENTS_PATH"; \ + ./start.sh --mode setup; \ + fi + +# Expose the port +ENV HOST="0.0.0.0" +ENV PORT="9099" + +# if we already installed the requirements on build, we can skip this step on run +ENTRYPOINT [ "bash", "start.sh" ] diff --git a/openwebui/pipelines/LICENSE b/openwebui/pipelines/LICENSE new file mode 100644 index 0000000..e05cc0e --- /dev/null +++ b/openwebui/pipelines/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Timothy Jaeryang Baek + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/openwebui/pipelines/README.md b/openwebui/pipelines/README.md new file mode 100644 index 0000000..6d99b69 --- /dev/null +++ b/openwebui/pipelines/README.md @@ -0,0 +1,194 @@ +

+ Pipelines Logo +

+ +# Pipelines: UI-Agnostic OpenAI API Plugin Framework + +> [!TIP] +> **DO NOT USE PIPELINES!** +> +> If your goal is simply to add support for additional providers like Anthropic or basic filters, you likely don't need Pipelines . For those cases, Open WebUI Functions are a better fit—it's built-in, much more convenient, and easier to configure. Pipelines, however, comes into play when you're dealing with computationally heavy tasks (e.g., running large models or complex logic) that you want to offload from your main Open WebUI instance for better performance and scalability. + + +Welcome to **Pipelines**, an [Open WebUI](https://github.com/open-webui) initiative. Pipelines bring modular, customizable workflows to any UI client supporting OpenAI API specs – and much more! Easily extend functionalities, integrate unique logic, and create dynamic workflows with just a few lines of code. + +## 🚀 Why Choose Pipelines? + +- **Limitless Possibilities:** Easily add custom logic and integrate Python libraries, from AI agents to home automation APIs. +- **Seamless Integration:** Compatible with any UI/client supporting OpenAI API specs. (Only pipe-type pipelines are supported; filter types require clients with Pipelines support.) +- **Custom Hooks:** Build and integrate custom pipelines. + +### Examples of What You Can Achieve: + +- [**Function Calling Pipeline**](/examples/filters/function_calling_filter_pipeline.py): Easily handle function calls and enhance your applications with custom logic. +- [**Custom RAG Pipeline**](/examples/pipelines/rag/llamaindex_pipeline.py): Implement sophisticated Retrieval-Augmented Generation pipelines tailored to your needs. +- [**Message Monitoring Using Langfuse**](/examples/filters/langfuse_filter_pipeline.py): Monitor and analyze message interactions in real-time using Langfuse. +- [**Message Monitoring Using Opik**](/examples/filters/opik_filter_pipeline.py): Monitor and analyze message interactions using Opik, an open-source platform for debugging and evaluating LLM applications and RAG systems. +- [**Rate Limit Filter**](/examples/filters/rate_limit_filter_pipeline.py): Control the flow of requests to prevent exceeding rate limits. +- [**Real-Time Translation Filter with LibreTranslate**](/examples/filters/libretranslate_filter_pipeline.py): Seamlessly integrate real-time translations into your LLM interactions. +- [**Toxic Message Filter**](/examples/filters/detoxify_filter_pipeline.py): Implement filters to detect and handle toxic messages effectively. +- **And Much More!**: The sky is the limit for what you can accomplish with Pipelines and Python. [Check out our scaffolds](/examples/scaffolds) to get a head start on your projects and see how you can streamline your development process! + +## 🔧 How It Works + +

+ Pipelines Workflow +

+ +Integrating Pipelines with any OpenAI API-compatible UI client is simple. Launch your Pipelines instance and set the OpenAI URL on your client to the Pipelines URL. That's it! You're ready to leverage any Python library for your needs. + +## ⚡ Quick Start with Docker + +> [!WARNING] +> Pipelines are a plugin system with arbitrary code execution — **don't fetch random pipelines from sources you don't trust**. + +### Docker + +For a streamlined setup using Docker: + +1. **Run the Pipelines container:** + + ```sh + docker run -d -p 9099:9099 --add-host=host.docker.internal:host-gateway -v pipelines:/app/pipelines --name pipelines --restart always ghcr.io/open-webui/pipelines:main + ``` + +2. **Connect to Open WebUI:** + + - Navigate to the **Settings > Connections > OpenAI API** section in Open WebUI. + - Set the API URL to `http://localhost:9099` and the API key to `0p3n-w3bu!`. Your pipelines should now be active. + +> [!NOTE] +> If your Open WebUI is running in a Docker container, replace `localhost` with `host.docker.internal` in the API URL. + +3. **Manage Configurations:** + + - In the admin panel, go to **Admin Settings > Pipelines tab**. + - Select your desired pipeline and modify the valve values directly from the WebUI. + +> [!TIP] +> If you are unable to connect, it is most likely a Docker networking issue. We encourage you to troubleshoot on your own and share your methods and solutions in the discussions forum. + +If you need to install a custom pipeline with additional dependencies: + +- **Run the following command:** + + ```sh + docker run -d -p 9099:9099 --add-host=host.docker.internal:host-gateway -e PIPELINES_URLS="https://github.com/open-webui/pipelines/blob/main/examples/filters/detoxify_filter_pipeline.py" -v pipelines:/app/pipelines --name pipelines --restart always ghcr.io/open-webui/pipelines:main + ``` + +Alternatively, you can directly install pipelines from the admin settings by copying and pasting the pipeline URL, provided it doesn't have additional dependencies. + +That's it! You're now ready to build customizable AI integrations effortlessly with Pipelines. Enjoy! + +### Docker Compose together with Open WebUI + +Using [Docker Compose](https://docs.docker.com/compose/) simplifies the management of multi-container Docker applications. + +Here is an example configuration file `docker-compose.yaml` for setting up Open WebUI together with Pipelines using Docker Compose: + +```yaml +services: + openwebui: + image: ghcr.io/open-webui/open-webui:main + ports: + - "3000:8080" + volumes: + - open-webui:/app/backend/data + + pipelines: + image: ghcr.io/open-webui/pipelines:main + volumes: + - pipelines:/app/pipelines + restart: always + environment: + - PIPELINES_API_KEY=0p3n-w3bu! + +volumes: + open-webui: {} + pipelines: {} +``` + +To start your services, run the following command: + +``` +docker compose up -d +``` + +You can then use `http://pipelines:9099` (the name is the same as the service's name defined in `docker-compose.yaml`) as an API URL to connect to Open WebUI. + +> [!NOTE] +> The `pipelines` service is accessible only by `openwebui` Docker service and thus provide additional layer of security. + +## 📦 Installation and Setup + +Get started with Pipelines in a few easy steps: + +1. **Ensure Python 3.11 is installed.** +2. **Clone the Pipelines repository:** + + ```sh + git clone https://github.com/open-webui/pipelines.git + cd pipelines + ``` + +3. **Install the required dependencies:** + + ```sh + pip install -r requirements.txt + ``` + +4. **Start the Pipelines server:** + + ```sh + sh ./start.sh + ``` + +Once the server is running, set the OpenAI URL on your client to the Pipelines URL. This unlocks the full capabilities of Pipelines, integrating any Python library and creating custom workflows tailored to your needs. + +### Advanced Docker Builds +If you create your own pipelines, you can install them when the Docker image is built. For example, +create a bash script with the snippet below to collect files from a path, add them as install URLs, +and build the Docker image with the new pipelines automatically installed. + +NOTE: The pipelines module will still attempt to install any package dependencies found at in your +file headers at start time, but they will not be downloaded again. + +```sh +# build in the specific pipelines +PIPELINE_DIR="pipelines-custom" +# assuming the above directory is in your source repo and not skipped by `.dockerignore`, it will get copied to the image +PIPELINE_PREFIX="file:///app" + +# retrieve all the sub files +export PIPELINES_URLS= +for file in "$PIPELINE_DIR"/*; do + if [[ -f "$file" ]]; then + if [[ "$file" == *.py ]]; then + if [ -z "$PIPELINES_URLS" ]; then + PIPELINES_URLS="$PIPELINE_PREFIX/$file" + else + PIPELINES_URLS="$PIPELINES_URLS;$PIPELINE_PREFIX/$file" + fi + fi + fi +done +echo "New Custom Install Pipes: $PIPELINES_URLS" + +docker build --build-arg PIPELINES_URLS=$PIPELINES_URLS --build-arg MINIMUM_BUILD=true -f Dockerfile . +``` + +## 📂 Directory Structure and Examples + +The `/pipelines` directory is the core of your setup. Add new modules, customize existing ones, and manage your workflows here. All the pipelines in the `/pipelines` directory will be **automatically loaded** when the server launches. + +You can change this directory from `/pipelines` to another location using the `PIPELINES_DIR` env variable. + +### Integration Examples + +Find various integration examples in the `/examples` directory. These examples show how to integrate different functionalities, providing a foundation for building your own custom pipelines. + +## 🎉 Work in Progress + +We’re continuously evolving! We'd love to hear your feedback and understand which hooks and features would best suit your use case. Feel free to reach out and become a part of our Open WebUI community! + +Our vision is to push **Pipelines** to become the ultimate plugin framework for our AI interface, **Open WebUI**. Imagine **Open WebUI** as the WordPress of AI interfaces, with **Pipelines** being its diverse range of plugins. Join us on this exciting journey! 🌍 diff --git a/openwebui/pipelines/blueprints/function_calling_blueprint.py b/openwebui/pipelines/blueprints/function_calling_blueprint.py new file mode 100644 index 0000000..ad6a386 --- /dev/null +++ b/openwebui/pipelines/blueprints/function_calling_blueprint.py @@ -0,0 +1,189 @@ +from typing import List, Optional +from pydantic import BaseModel +from schemas import OpenAIChatMessage +import os +import requests +import json + +from utils.pipelines.main import ( + get_last_user_message, + add_or_update_system_message, + get_tools_specs, +) + +# System prompt for function calling +DEFAULT_SYSTEM_PROMPT = ( + """Tools: {} + +If a function tool doesn't match the query, return an empty string. Else, pick a +function tool, fill in the parameters from the function tool's schema, and +return it in the format {{ "name": \"functionName\", "parameters": {{ "key": +"value" }} }}. Only pick a function if the user asks. Only return the object. Do not return any other text." +""" + ) + +class Pipeline: + class Valves(BaseModel): + # List target pipeline ids (models) that this filter will be connected to. + # If you want to connect this filter to all pipelines, you can set pipelines to ["*"] + pipelines: List[str] = [] + + # Assign a priority level to the filter pipeline. + # The priority level determines the order in which the filter pipelines are executed. + # The lower the number, the higher the priority. + priority: int = 0 + + # Valves for function calling + OPENAI_API_BASE_URL: str + OPENAI_API_KEY: str + TASK_MODEL: str + TEMPLATE: str + + def __init__(self, prompt: str | None = None) -> None: + # Pipeline filters are only compatible with Open WebUI + # You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API. + self.type = "filter" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "function_calling_blueprint" + self.name = "Function Calling Blueprint" + self.prompt = prompt or DEFAULT_SYSTEM_PROMPT + self.tools: object = None + + # Initialize valves + self.valves = self.Valves( + **{ + "pipelines": ["*"], # Connect to all pipelines + "OPENAI_API_BASE_URL": os.getenv( + "OPENAI_API_BASE_URL", "https://api.openai.com/v1" + ), + "OPENAI_API_KEY": os.getenv("OPENAI_API_KEY", "YOUR_OPENAI_API_KEY"), + "TASK_MODEL": os.getenv("TASK_MODEL", "gpt-3.5-turbo"), + "TEMPLATE": """Use the following context as your learned knowledge, inside XML tags. + + {{CONTEXT}} + + +When answer to user: +- If you don't know, just say that you don't know. +- If you don't know when you are not sure, ask for clarification. +Avoid mentioning that you obtained the information from the context. +And answer according to the language of the user's question.""", + } + ) + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + # If title generation is requested, skip the function calling filter + if body.get("title", False): + return body + + print(f"pipe:{__name__}") + print(user) + + # Get the last user message + user_message = get_last_user_message(body["messages"]) + + # Get the tools specs + tools_specs = get_tools_specs(self.tools) + + prompt = self.prompt.format(json.dumps(tools_specs, indent=2)) + content = "History:\n" + "\n".join( + [ + f"{message['role']}: {message['content']}" + for message in body["messages"][::-1][:4] + ] + ) + f"Query: {user_message}" + + result = self.run_completion(prompt, content) + messages = self.call_function(result, body["messages"]) + + return {**body, "messages": messages} + + # Call the function + def call_function(self, result, messages: list[dict]) -> list[dict]: + if "name" not in result: + return messages + + function = getattr(self.tools, result["name"]) + function_result = None + try: + function_result = function(**result["parameters"]) + except Exception as e: + print(e) + + # Add the function result to the system prompt + if function_result: + system_prompt = self.valves.TEMPLATE.replace( + "{{CONTEXT}}", function_result + ) + + messages = add_or_update_system_message( + system_prompt, messages + ) + + # Return the updated messages + return messages + + return messages + + def run_completion(self, system_prompt: str, content: str) -> dict: + r = None + try: + # Call the OpenAI API to get the function response + r = requests.post( + url=f"{self.valves.OPENAI_API_BASE_URL}/chat/completions", + json={ + "model": self.valves.TASK_MODEL, + "messages": [ + { + "role": "system", + "content": system_prompt, + }, + { + "role": "user", + "content": content, + }, + ], + # TODO: dynamically add response_format? + # "response_format": {"type": "json_object"}, + }, + headers={ + "Authorization": f"Bearer {self.valves.OPENAI_API_KEY}", + "Content-Type": "application/json", + }, + stream=False, + ) + r.raise_for_status() + + response = r.json() + content = response["choices"][0]["message"]["content"] + + # Parse the function response + if content != "": + result = json.loads(content) + print(result) + return result + + except Exception as e: + print(f"Error: {e}") + + if r: + try: + print(r.json()) + except: + pass + + return {} diff --git a/openwebui/pipelines/config.py b/openwebui/pipelines/config.py new file mode 100644 index 0000000..28b1031 --- /dev/null +++ b/openwebui/pipelines/config.py @@ -0,0 +1,24 @@ +import os +import logging +#################################### +# Load .env file +#################################### + +try: + from dotenv import load_dotenv, find_dotenv + + load_dotenv(find_dotenv("./.env")) +except ImportError: + print("dotenv not installed, skipping...") + +# Define log levels dictionary +LOG_LEVELS = { + 'DEBUG': logging.DEBUG, + 'INFO': logging.INFO, + 'WARNING': logging.WARNING, + 'ERROR': logging.ERROR, + 'CRITICAL': logging.CRITICAL +} + +API_KEY = os.getenv("PIPELINES_API_KEY", "0p3n-w3bu!") +PIPELINES_DIR = os.getenv("PIPELINES_DIR", "./pipelines") diff --git a/openwebui/pipelines/dev-docker.sh b/openwebui/pipelines/dev-docker.sh new file mode 100644 index 0000000..c9d256a --- /dev/null +++ b/openwebui/pipelines/dev-docker.sh @@ -0,0 +1,9 @@ +# Removes any existing Open WebUI and Pipelines containers/ volumes - uncomment if you need a fresh start +# docker rm --force pipelines +# docker rm --force open-webui +# docker volume rm pipelines +# docker volume rm open-webui + +# Runs the containers with Ollama image for Open WebUI and the Pipelines endpoint in place +docker run -d -p 9099:9099 --add-host=host.docker.internal:host-gateway -v pipelines:/app/pipelines --name pipelines --restart always --env-file .env ghcr.io/open-webui/pipelines:latest +docker run -d -p 3000:8080 -p 11434:11434 --add-host=host.docker.internal:host-gateway -v ~/.ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always -e OPENAI_API_BASE_URL=http://host.docker.internal:9099 -e OPENAI_API_KEY=0p3n-w3bu! -e OLLAMA_HOST=0.0.0.0 ghcr.io/open-webui/open-webui:ollama \ No newline at end of file diff --git a/openwebui/pipelines/dev.sh b/openwebui/pipelines/dev.sh new file mode 100644 index 0000000..715aeca --- /dev/null +++ b/openwebui/pipelines/dev.sh @@ -0,0 +1,2 @@ +PORT="${PORT:-9099}" +uvicorn main:app --port $PORT --host 0.0.0.0 --forwarded-allow-ips '*' --reload \ No newline at end of file diff --git a/openwebui/pipelines/docker-compose.yaml b/openwebui/pipelines/docker-compose.yaml new file mode 100644 index 0000000..be9925b --- /dev/null +++ b/openwebui/pipelines/docker-compose.yaml @@ -0,0 +1,19 @@ +services: + openwebui: + image: ghcr.io/open-webui/open-webui:main + ports: + - "3000:8080" + volumes: + - open-webui:/app/backend/data + + pipelines: + image: ghcr.io/open-webui/pipelines:main + volumes: + - pipelines:/app/pipelines + restart: always + environment: + - PIPELINES_API_KEY=0p3n-w3bu! + +volumes: + open-webui: {} + pipelines: {} \ No newline at end of file diff --git a/openwebui/pipelines/docs/CODE_OF_CONDUCT.md b/openwebui/pipelines/docs/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..1f27d32 --- /dev/null +++ b/openwebui/pipelines/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contribute to a positive environment for our community include: + +- Demonstrating empathy and kindness toward other people +- Being respectful of differing opinions, viewpoints, and experiences +- Giving and gracefully accepting constructive feedback +- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience +- Focusing on what is best not just for us as individuals, but for the overall community + +Examples of unacceptable behavior include: + +- The use of sexualized language or imagery, and sexual attention or advances of any kind +- Trolling, insulting or derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or email address, without their explicit permission +- **Spamming of any kind** +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all community spaces and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, spamming, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at hello@openwebui.com. All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Temporary Ban + +**Community Impact**: Any violation of community standards, including but not limited to inappropriate language, unprofessional behavior, harassment, or spamming. + +**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. + +### 2. Permanent Ban + +**Community Impact**: Repeated or severe violations of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the community. +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. \ No newline at end of file diff --git a/openwebui/pipelines/docs/SECURITY.md b/openwebui/pipelines/docs/SECURITY.md new file mode 100644 index 0000000..c6ececf --- /dev/null +++ b/openwebui/pipelines/docs/SECURITY.md @@ -0,0 +1,32 @@ +# Security Policy + +Our primary goal is to ensure the protection and confidentiality of sensitive data stored by users on Pipelines. Additionally, we aim to maintain a secure and trusted environment for executing Pipelines, which effectively function as a plugin system with arbitrary code execution capabilities. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| main | :white_check_mark: | +| others | :x: | + +## Secure Pipelines Execution + +To mitigate risks associated with the Pipelines plugin system, we recommend the following best practices: + +1. **Trusted Sources**: Only fetch and execute Pipelines from trusted sources. Do not retrieve or run Pipelines from untrusted or unknown origins. + +2. **Fixed Versions**: Instead of pulling the latest version of a Pipeline, consider using a fixed, audited version to ensure stability and security. + +3. **Sandboxing**: Pipelines are executed in a sandboxed environment to limit their access to system resources and prevent potential harm. + +4. **Code Review**: All Pipelines undergo a thorough code review process before being approved for execution on our platform. + +5. **Monitoring**: We continuously monitor the execution of Pipelines for any suspicious or malicious activities. + +## Reporting a Vulnerability + +If you discover a security issue within our system, please notify us immediately via a pull request or contact us on discord. We take all security reports seriously and will respond promptly. + +## Product Security + +We regularly audit our internal processes and system's architecture for vulnerabilities using a combination of automated and manual testing techniques. We are committed to implementing Static Application Security Testing (SAST) and Software Composition Analysis (SCA) scans in our project to further enhance our security posture. \ No newline at end of file diff --git a/openwebui/pipelines/docs/images/header.png b/openwebui/pipelines/docs/images/header.png new file mode 100644 index 0000000000000000000000000000000000000000..a63b0be899d4519d0aa9d6f530b582fa05ecc1d7 GIT binary patch literal 28696 zcmeHwcT`hr^KMX%g2xuH(i>BF0&U^@&m*XajsJN-m*4L{4?tj9tN-)J z=6{`8fbyU3fEcVS1ro7h6$oSnE5I{WJO~0=i7NmIE72AhVI>DaAS;;}0AYnBKvqZs zz`+Vh;22?rB&?7G4h~jG0>=m|Bw>XlaB#3f5`Ya>lmuXe6(s=}VTC08_mTvT9L#z2 z{5LDCv!6>>3vsSOZ^?1(M2o~K$m@Syj?6y=<3s-vj0OKG7@Pf5Fuw3F?OOPs+I7l5 zwd<9ofQ?zP3NXS7Rv?fS4+20~i7S9Uu0&f1WF-dyZ&=C9kQKfHaIiuWI7V0@2`eOl zgM$^40Bo>A5`Ym_NCGgz3Q1TY2>=EwB;l74R!G7MN%#eX6(!-nh$QTuncSiBRKw!S z?9$`m*h>?Eaa-Q}k$WP3CicZWSNT8Ic<%ALWgP3VS}02CK2t)jwyc5A*$VBKslapJUu+|CF_LW&SCS?-u^2#C}KcpX&I^a(=n( zuOBPc`P&vN*!kNQE1vbYEh1Lp&fmsZiOPT5;{R063Q=CZ_h{<;$B!RJo6ZH4xgLM?qH7TR5_aIS1{|FL7oTmu7-!p+UG?0IJ8Aoo)u zZcJ^`S;Fvh*ZEpu+qP|Ob#dFMAK4)l!w7q|S0$H0$E(&2z zE>w9B)hsmM7~JwTsP;)8)astX`La73#nh7`1~-eCnZekGbHojy>^bI8cxGnug9k{#M$wr}5Vg^!t47l%5Z zY0k;XarN*xF#SlOW)?P39ilp6Z9q|kip@Gg1Blp-d9})CU<>Hkh2e(8xgc4CW&WY7 z!=ndb2eHXU9!PS1g5Uhi#EAJ3Z~d(~Mk%aTBMIri3}7m(M!;k)@~`lvMbXC?3}!^f zC~NSHP2u?ljCt*RW$?6HZ)Je@Doa}jhoTK`2Z@J2a3;3z9~Yy9(?$JF~p4 zj6|DXlK~K2iP+KanMjsv@JM<<+ndO>?bGAkmD@6-Q(lP^r)Wfp{Gr8}(zzbej_e_= zz^UC9{+SsW<`SV(daNHWo3CFQa-BVAD{7HX3p(rQXyF((eR@^z{=#V?X4b5js`sGT zGBnx8mx&T1LEuw!rcUN+s*1sgf)H^FaT{^RjZiZejIP)kJ^oR}Q7!ByqOX~eSELYF z_n@=g;Pxq|lW@GHQ)EJMaq%*SS}c|coNjYm;QXhDN7TM&q^Emc_FIJc{E#(IdruU% zYQ05Se-EW=k)io8|8c;P9yIQobY#coQ~O%9|NQe$Lu{rEY0-x{Iq>g~j6JLIcmh>A z^8I`8KF#oqlvsHKm(6vxQv$8du;7M6ej50pKMBH=^J1y?n#hIUHWgmRXp1jQNhf?U zl*OioXXZ>Gt!`0h;vrg3qbE>u!_k{0%)zD^!=Fk8V{q@?-Q5MRCaXL7@W^J1H}A?U z%ERXUH#WPtxXf!ep_x0V)Ab1=f@_TVDbUBN${VztXzB}$b_+pDxJv4>nOOV{35WVyc{frKrSS6c;{ z2;!cpyu3W6CxwoLjMbs-Ej#x>vn?zvMq((4-ePx~r^oCG+k48@3qNLOXaCED?gfQ~ z*ds1Y!-8LEBYHOQV;JQk@%8rvTE^pB05UU8o{PK@ENitE=$^wos9Z zqK|Udx;2LU6gr)b+bA7LYi&Ix+>)Jl`}XZvhfL@zqRh5`o%qAIkIBAgt1BuV%_2@m ziMy|fB$4XkII$1+3U$KN5SMSBE|jYuXJ2+r5*RMuypDP_jODkA`#}`deQhMHX%2wb+SUN4SY0JdM2me?)M#-i(yJZG49`%%{E`=Z9C^X#3@Ren z-(RKZNRdCxZ%rL=|F(xmTr6Fo_Z+H&5kx2pnBmdGhsL_|b;qGaqqXLB#o{97%h$um zu>m)>f`@K$O!LPlT~qz#zJmHu9b_^Ymn1-V$6^I9`ifb-FC7C8?}|j$1VU^LDI8P= zO!j|n3qPW%nITnG(iOZJ6@g>3hsG+!IBVK$mM9dueS2 z)2OE7&=dZLzcMew48j*?eb8@`lauMbi7+7yjtq@R-U1DNZdZ$gmMa`MP&G9|a)D_| z?Am3bOWg@@037k7hZzYu^>a_t9yicLwkwir(xQdwJj2` z0~n*?qT|w&zCHzUP0JZdj|cq6;gIOY^%w4E{a_1(fqpjXb(lCyNv zWO-@I1N%zZtW9&rFM$6u;5_z=o#rD1rHkoN|EGS+04*;CLuFF z&VhVp4uHGtA&`jl5NYn7DeWfFy`p=d(c$z))lr=#x=#C#A07pY{tJ`fO7jSd)A%{% zN--oJgdz(awvt|HLPA2P3T^>_{`aRf32&{LC4m)mq;2o z+S+~){+!*17Snie+{nnvRq^+LT*U8RwyhhUNI?!B9BE27)mfN2X7iTb_;j-t8fVv( zrk1E9Ou>R6={MGav$i$Ub+ofH}Tk7rTmiqbg zXVDx9&A=D5;+XEZ=G;`MW|Wi>Iu~EVdK!3Wk&My7(r7e;YR)Edf<%rH<|iOPp8=)$ znLFQe?E;`Aw9==DzOviRcEq!Uz_X?m7x#AurzQJTZ`!nJgnnePX$e2@;ll@oaAZcp z`1WSnm!-O1Es)mm&VjA!x6f9dwQ%RIu|;j!2T!J=(xpngdOJF-kpp+{-t9O8eNVhJ zld@4qRW3p_C~5kwd5`&;hjLaM6-Alm`&`}y;FhSHKd-x29*Pkd zRF$tW#Ta>*o`iU$&#J=eQjtGjF4;oVA<}dcK~z-rnA> z=5bK6i~aW2pEzM@jO^sIBS(((vAjwB@gX!4$>}cniOTbyKxBRSP7HD#jvQ7$EjE6J zBwpV7eg5)uyU>!N+w5&8iSzMvd~EahEIYNkjm>3jU%v62^Pm%J4zM+=MA#E;ACkeC zGV~Ks=_d#3%F^1YsA;%fekyEXm$Ufi^feTykpJPk6|cM`EE=X33`qe*DTfgd zCBlw;Q`IgN5R-axU}w7bH)V1wzR25H8jlwu((|!N`{n{qO*Gr#WGV=hfSJ`HBe0|`|kIxH_I-`0WtgA7A=Z_pM z#?3AS5utbqB9z#UP1t%Bg9@+oDXdp^sP@9gOjV7}lj*f#6&2l#Uaccd#bb+Gng?oh zJQo&S-_f`Wc@?u9jP@3ej*e7|XWBeMw~&w6+S|ih;CZVVUWNqr?wcJJ+H<+?7r)g< zR3^h-A03f*sOtH!LkTUb-R=+{hBG>E2R%hp>ua39yj7CCyGc{_+}B^M0BGNlRIhWUhPwYMe zjFAYQc9i-w;ZSdlv03p0S~_JJ{Oi?up4vZ0AF&d~VzF%?Hjb&%G5{&D5(W`n$gjPn z<>cd#r;t|(K9_4_J!43!&#)OrpZfOSbyB#+PTFpj-WOO?U%X|hxxEC}?)h*FL9ElI zA6xu<)gr1T#5h#8-Y4}oH)X8n)tx7;&2++l4jVu11LYX&Fxc%aj6e(`PL2XMe;(cV zD}YuD{poMV|G;z8lV^cGo}LGb4U4@zGlCc0VY;dV0WS;;M8KItz2nL5ravmHs`{TA z)A~-qr@Y=B7lu+x5yE#1P~&+Ac`<1N&&{8RsCwnf9FUjCav~C*R2{|~2*3%#!>+V! z;bVQA)(>@vX($h0AWPUmvu(z^+}lG{<6e{`V@qH9`V`KX@QW5vLG-j#Ek;-__!}yT zC4Q?!3@wo26oUN|(Tz?A4Y?)T>)k@jq{nceCQPHV05|$&pac|~$FWsJmu-nHH{K{J zD|cGmxGd^!HZgf6N?g+0%$b^ewK7>n@`4NLWrBNkvdTBq)e5_jO9u7OH_W(Ow{8{o zt4e7TLYQ7-vM{a?ePJ_?3pzQuT^f+|jT(U)eD;KXJv&eo7J6lqgtpo13n*0KcG*Cp zB8QmZL`cO_U6Xlt0`?nF*au}YnY}I|#J&W9D#z92vH)d3pm)vK0V^=O6s!ig04>qy z0kpk?LDxHc_;B0%kQyKI@ta;xq+Y-7f& zw}XN(tF@(6gn3`aFlrIM#KPme*fnTL6+|F|a*FHsJNS7Ulmf~L>--dcBq&6bu9CDY z0HaV>E((HZVkpIG(33`*-Q%!p=RvxM??^gY8T%vk(LJ+j68*>Y=A&pr3|+D7sl;TQ zf^aIdV@`YiYg*sa(NR~X$uIZ5u$!01VdC1MDH)nU_r{d2wl}M)BNqanwvUauEw0aY zMMPE0Wa&7XZJA8I!-`X=xw;1uf5G>piZM#A0q{n|V`kl+F$W!Q=XdcO?x=oR#H`kR z!_C=^DBqlo&`i~!CcybesH_AGws!vcd_TABxf;o1+k8XeOJPsjv&?nywIMpb-_17G zjSFRd1>{UfN$zp?Jy3ZFh2w{??e+69XM?X?k?EA~+K*kMnzwGBYUaEB`}Zf@N{2oX zSNGeSfWgQHTP2Qo43I)KgBe$ARK{BTl*GCNs*)SK14B2dD4)_$>FgndhqqqP=z3B3 zLaX+Xm}cOiq2!s#f%*RVOVVA3(Xxe+-@bhdU3z{jO~VK+TJ&|pFI%;A^AZTu)YKGb zE)m8!waX7=ELTu)bk_0vRb=}`i2Ljcr~df-5*H=F7fw$dTKLKIGCzktI^9#`T+AN< z=btZn0MfhKnuHfWyHzYuf{i`}2G-?eWoPZIcXGlFAo6Z;NSsZK>!4?O6S*2NdsWAg zJi|lwE=ppg!7X>MpC>yzJ1bPzOQjTQq@lZGsF566YB^S6gGQsVR{7@N;EE*`t0((jg{7gJ z59u?@)*_x3j*ieC5_kEt(#1s}wNl#Dw#=X^YO~H#{}pcgNOy=ALB#Cr?F$gWGXNoN z9Q`UQPB|qd<;lS<7&t%arRdI`#w}6mkBcYx!!;di{fsc;jeK9ukO&Dre*BTA`_D1` zzHXMhT-zh9!#p~vE~8DXI3DN#!zM6AzJ}S~bu&xG9POO$Ae4w08>NV2uy}e|y>nz- z4w+lw9UDwm$zIkmKk+{<|!?iv*89zgWe*M@mcZMFs~~Q0i5Ij`N8i z^2_8|61*Bm-P$Eo99Ho?yt8^6!s4&!1gBN*u8O$S^f(H2usTExo{?}udDYu_CUrYT zAT!@D?VQn%og?20>`G%TuA`?DHdoi~AvcbtZH6-qu8DTLq^YJywTa;A0{C(3pOufB zVg`H?k_TT&PSzow`#e8Z+n!zlqT@c-D_5{3)C4y&`kwH|)Sj#0tC_$f%Z>+W;u!it zs=$trx*Mz4TBa{9jOfT-)wj384uw#wYiA7ar~-x>*_M8LB=z(SH~w3QSi!11ncgnN zok7>HA2z}ii6c-$BANE1a#fvvK`bM)2=s}H;U$~8x$T6PX!z!R(#`KUQd}FUH*Wqs zE@b3NQN7P5x#~9`78XK@%%&7`N0YS7Jk`-24?w)r)e-yd3k_{YED+ZJTBoiEFHE7e z!dOb34g8FyQjj{VWrbT@O~l&oOF8UU`|NkJ_R@g*>NbIO1oljyZT_C*lwlDQiIt8!fzy!6CZD(Wb~Svaac9})}l0wp7FJ9dFwHyfRE>AByo8EeT;0*%5bLUPR0?toSlEfBv zDJuGfhH4gRL2q@drB@1Bxk#NXaF2{Qny z6`qhF_Uwr5;7b<^5|Cj>FGFX$luwrzj#jew!Rlp5bd#;w-I6kM^?qdYBy9y}&(xIA znsWxPrB`=awtpK%3%pH5H6Kf(B0MXmIPM)YJ1Ms_i&&wu6KH)%cD@c9RAQ3Ix)UAz*Ub)RRA9*{7e#sOsw-?Y~;!aN-&q#gA05>o!4(`$jW&lSLz ztBPi;gjyZVHWVI{^>O7-k#_zdV3q37XHqo@#3Cy>Y3#>#ts_g|EhJoxvn@Ku0Sc!R25Fc`iyGdVs~ zRUI*A?KBSCTp%Sh5c}}qLtE^C{DA|x>0{9i??vjyi9Sguohmd3S_=|3v$S+E%2v<>VkP3(Ob^*^ZM-JEevxKvLYw7JIG8=Ax+tX&GZ$rH>8^RNs;8s)gtA%{7F|Qc)7XIV#O)Mthe*2=NWd))NOa{*p*zT)azw7 zH*p$zA|RytsjjjN^}!Z`0+Q)pt7f}TJf2c&m-tOJjy%g6$H!Hb0zI)uDT3t}A{7PX zqHTnlfTzQ2)6O?;(^f5}v2qpg1yzN%2W14y!>hbV!Pxk+!iq4*_-^m(kEY`c$`(Ot zv)1SD9!L{f)XbYy8y_F0+7t?ePs^qcocRYPvG6$4*>cE3_IPuQ#7^|*F^ zOBNH|c5%wQ_$%^dRaKgaYu_5-?A}XGpi)4%Rdxp|f2*l=_tw1K^Zq-~p=*Y?M7Dz0 z5%qiC1Yr{v#|c8=;|+ebowA_P8|#c+LU>LCO1R0Ykl9Ho-{1mTKfqt`S@bp`u&=5Kt< z@^bMUMZ4`oh!}_tcuM1GB~k3u`|HRyXU??wca|450IA_R3JL1q$B936@T8sn2Fn< z-ut+(yB0{lcN@M)4w~A%U2HTN=pzr74@|2AadKi@NRkB%*1lFE^bRNz=|iSo2e_bm zO|O2$eFl<#goBI6z_YRVpW_P$vZEe8+)10;n3QqY!E&_WYhTO<)8p|Rxjg~)){{)V zS#cko8oX8>@yruA#QK!5xKDa zPW!wG`>U^<%(rAFZD!urE3YrDY%mQ~7^(_-NeT8g43*g=uK6CbF9JiY*k^(nQ3+>q z&-g0+cle(isI*Flm1U%)oXC9zD)+HHy7>hh^;kn#(jm1P;K7hwS71ilYUQ1&>`s`d zf5(2#;AiMolhk!IYcam zk7d-HV_sSHLU_CU`4fPv8~-@p7g;I5Z(9FYap(4kE1-<7tE&w@7(KvZ%CecOEbF-B(-l6)DY*JhK*RNkYt^i2|bUcz5539)eZw508v2q&Q%8;t&NN= zjPY)KzN|$?u6ieX=i4}gK;%TPvQ;x*)~4EkIZ*Sojkf1oDa38o0+vQ0fga3_j)zY0 zt`Mro7X}3SPC3{QJ;dU|oOQld$V`WJowy1|sEghx#*)^QkdUx|=q{lkY@kBAa)NHM z;jdtzxs_W%TsX<&kxtqyN{)3tD6Ma#-5jiC(0{cnP_83DZVWm+*598tmF5u^rp4D1 z?zD@=x%69qZf^rkg|W+E$43(_v>WuehSMeR*g%~qQ1d%IK{d$f;^|p38T4zz@9*;G z)g{qWf`ftDCz`>YCoF|z_=|^gIyg!>+x5%a_*iFPMkBi!s@;K=wM*{x-;V%EyD7Dm z@f0-L^MSS(yDYWWFx|M?z}`zkHDRD#k=v!KDRaZ9jH8b!3Ba`RA*`U#D+mjGz6J~; z0h&XIxKV7UFMl|6seb&-Qre`Qo!!TgV|EDVRTW3KvQpiv(n04WMFM&abe3{Cr8`g+ zJ4{JE`B>~vth(o-7q?8HL@gtEFLvESJm_qUZLw-`j&wJE4X7U zhLI+cruUN&u=kMYf!=URY4Pd|JUE4AEiEsG}DHfCrGn}QdhGT9#;tzlpfw$<*KMvk~?9h5YT<4bDFv*8_mU84~{oovB7Cwa3Tzgj;ORz7R!wG z9chxjaP3!Vlwe}vf`RxvGci9=sBw+IM0OS9UaCG2R!u`^pgBVoi2VbE@TKHK)$(z1 zam!%JAnZ9@cfJ|R%Nf60 z;wp+4LqLL2=D>u#$Rt>Bl8EOtpi-gnKF{HCv;7R;9h!jDjuIup%{;GzO||azzuBG= zp$*#6CMefQ8l*Sp($y-!87N_3&}ek5^I4s2Hvai@o*cyM;kqdU0rq~68i^h-7$gf? zq}s*CW?C5Mu`|1e3gCPZ{$(J_yTzf3O)WMI8GG!42Fod47Qc$QmuglkBaY2tMx$Cd z(?TYH@o_ExHzEs;A3xrfXPRtT`l+eu`(`#6$-wn-nxyaON3mWp9O(xVDR+}v!k>hw zo$7mtbWjVNy0GsuXoK-hK(l=s;4)@rx}o zajXP8qlVL?rVugkwKa>Z@eUjy0b4V7tcRoEG9>lAl9P5|ioxx4nW%^(M=m$7Qy0_l z|B$+(TcU(%Z! z8xhl5Hl(n6hHnIl;LMukGbS0=2S8SGj8j>>KXj65AW34r7LVKx);5DSCj@c> z)bE!K*ZveZA|>D%c`xxhxpG5rE9i*~UV5BVR#S;s>OW9O1w%+Zl>yWT$ihpn#F!s{ zPax^e^S--7Nd}8qnrjYkq6OG#hRz<+dFsq*-EJ%<2LT8-CzMF-!*HZR-%k!~bV;l$ z73#nyhA$Yw^1@m9;U=$_%q0o_6Sg_R7oLrdBPe_BparX>G5e7={Ir3LLc5<@_tytz zy8`X+pnCBuWrv| zU#a)VwaSF2DVpdai7HyX$S9aSX=?A8a~#-MpLGznhSQz5!8;7lhNmWK20& z00J}gv$?)HYuHv^Uf%zpE~dFC2{p^**{lX0W7U(Al2TKOaT(i6n*(}Xn>)Vx0VG3~ zR0(Dio}aCqg`Ey{=pL_qMKeD}tVurcxur1XrNgLyw7;Om( z@%;{^9czRyRx$|_{NoD3BH;%B8Nk*tPvPK3Z_miS=kjI7I$jx728dTKwPs%a6do1n z0LDi6r`%y@3Xg1bbZ{t~W=0FO`;s$uTHP~^)n#y$aO2M4Wb0u2ED@4+>*(>VsfGfB zeo+oAWiktm``&lzu_$gWU%Z-ho~s1zLB3;GosH}a}I_u)7#XUG}rdVS(-G% z*^vhVCFs2p6?temisU51hx*(-Ck8YaY$tI5hiWoIg{V2xwcS3M@$+W~QW3S|20t7u zL%lnE9Ju@Lm&nTrPd|S?2MyK(g~RF#`LIQekQooO99iuxF&W;04kv&BznU^jC6&j- z3~40DhBpGr>Lw_tTIi13tktEZvH^8y8q9Eji7W(MCqL8RD0^-sGhe+{*bkt- zBmsJvxMmxo-^wCg{l5Rc$xV5uQxQqFghtpGyz@a*uPAQhZP~^fqmc)*ooOIV6#0^T zvvLI(u%tq#QGvTsvx8TG?6B1Tan*Fir^d#-Hc}WMYD&=qh$s4tS+=6G>MB-IQITl9 z*E9js6Yk?v^ng+&l!*|#A?Igz-WQBF6il#ilc_%TME8k-$^g6ew^B7|cTEyq9Zv(( zYlv4jjH>d>qVH?hml5l*yL>!odlg-^x(-j4gI0%_NH&+#y}Ge%xvPIbgI#JzUgOI_ z#K>Uv#-LWErnuFdK3!%X&FMzGp67%&`P9`*feMO2TB~LgtcX&R7$Z^B#G-Js<0|Yl zBmyu122p2G(fjeKWPt_+hpO3+L*!kUvuY}-J%OI5nVm9tT3Pp}PoHwl>#F~nyra{# z&uK2u1Mw1lmUy&qc0DPeqa2%Vc9;A23$7mFxzjFaHYPqk7YzOFDVbz5hb(|=d94b( zTOrfwZ)%VR)$WqRKI;~H;M=4EWge8k`menpLsP%)C`1PLKzfOVTuPrNvdH!b@QE4va$zZPA+qrt9S~k3X z zfkKdV{p!`dZ+4>^x2drL?zOAHE8Ek3Lj6RLAxOM5^;* zi^@1E;pCc8HW$;8yorNXU7%R6@Ef&ceNXi^JGLZ&ub#2(1t~aqiOr;E@|EnD={$Z( zw8#fP`rU65iD{`DS^w8qO;0m4xT>@QK)~pD$^cbHRyNLpx79q}+!73ixK03CIu~@Z zdb-PQ6kLEANRsA|bQGkhP_7Jt1Cv>6+St3YiM@k>$%45n=^fV4Y$AeFM|OBS(H~&^_qX=H2H!rcyU@AGql^;|3p(>H}{( z0wrpkzj&V(fir$8i!-MM-jG{26y1i>nXItGwYD>T_1vfrvE#KAf}6lDaQJjMd)(3Y z&Y-u`+xch-WXfuWKr7!^+xL{<>(~D(zTqd@4zz>d!zbdKSwJYU|I;dadu_F%3IUa% z9^42-0DAQim4_587{+>c_M^k;g3AgS%YQ#4o!3j$+OI$1T{DMO&}RrVf&sDthQiWM z1y|6(`30yfL8w)!+^gp@Oyu0RWLJRF;&c2IK;fw|uMc-Aqg92}chCZ8CFCK>~-VV`i@at}|LtI%op%gx%puGI|5t-UY=H}+#cb`5XIQ;}R2C8oM^#s+emc;7A z4jP3tkUBb^Rg~Ypk7@#%WD4e;%<}z(;ZzCaq6F947j!wSGMd7%ExMIVMFf%)RDb%> z!Qk2v9V0(;jZjdS3iissrE&tP<>Jp|Law>No*h0HYAGCM24=qi`$nTL9DYj?SXy)N z4!VI-bemSyuH1jXspIU&v)yAmuMVTK(Mw^DiIvPhm(y5lO?6E%Iz4mRNXb86zWW-_fRW)zxeqZbtQ9Zy(fDKvv)a}rB5 z3E>Hb_tK2FU~i!(AL|Nq&G7@Xph>qNdIP**8Ghj?<<11;M%jSJu?NXPmoH1}*kBr+ z#K=(hv$qyHbyoL|26fNUC+}xh=y+At4;*P%a6eOUu^6dfOKfC*2rR6L<3^Dl@nv{I zbU(h*`aO-x>RzUu+;?$)-do<~-$x>rq<+7D`QXYec)zU!`G0u}-e;YK1s(3hBLq@F z0|cD$nsG!`|EO;&Th)mc$OIJ7nk{ONs=cfBruHtHYRuYVm%Ft`5o)*g-ZN%fGcigKn<9wR z3PH?Ydf(sQ_~ZB2bI$YMa}FoRC)ellzTSJht{4M7b&7k8_Xr3GC^R)xUJ(!w*AWm9 z9^WCsKN0*j^#K2I_q~RNHvs_|&EH=_f~*_{{6j+TSL!bbYQ~sR_&@AD&V-LO9eL7>oZg4e`ia3c#pF^6vFgCOyqb* zN@Y{!VVc^$Jo(-C_can7QeQ+!Pl^SIMJ{a{wh=;VMef-uiFpzuqm6pQm36*k*Lz&i zjfZtWAbG(3t+|W2@uLgO-0?DYs?OO&zS;E3rP{&J8Bp%N^C+uwlW@~0#DR~y`l z!#CF$;Ty02k=~z2z0T`#c9#y2jTCK}u_JWPH5%1Nqp!nu)=N4m*t(dK0oo5*$&NJh z2wGUc$T{VQ_|1FtT>bZY%G*Jqe{N9`R=9ttK%r2VjiFcP|4FuJd6uY>Gz^y!HdoKT z3I+3fy9jiUY5n^eMIK=l;aBwIb->l6@YO3zh!4s%UsP>IvIjSs+0%1#wZA3O7>YFu z^L;xauo#H0Zs%uT_Yk>yDhwK?%g;cAP}+7`h@Y32-Qx-u|MTxW-w?PE)U9~~ zOh4>pe`bpi@{SOgkUPRUK7&@Nlrm&nSvsJfiGot`@_>12ZGSv?Qxx=Qvzm0^PL@c*D%ghp zU)7(K5)Ge@^z+~-x@{u-orB2aQiUKc+_fKT5a#?vCfPA*5@mME67z5H693P_G*br%W{Bc? z83e1g>DrFQWXFRCnzm)k^)mitVHW2C=VzeP6iy zpKIU(3)XE?f8dEQdTc=JXn8f`ab+rV*hk|;WJcV6$CmoQ;kEyY6wkkea?MR`YE5>Y zU!V5m-#mS7!nP1gKxadaGL`!CVg0}D=x+VLuEg(2+Q0PuZ}3A4Lgm5;yReY!(`4oQ zD4G57(38gFe&JA&uH#~1UHy<9BiWjxZ&9pTunb|3%Xom@K?Hr1O`co$?$lD~#V#t{ z%6+&;asAQwm&~Z$`aO3bdfLDHD7UlvAgxsHX(@;b%}T!^J-B0cgSCTu=N;u-!UeJnK*#QhI4b(F(gxorQ50i9rwG<=Ar?!gBAABFs~OE)p}ZJ)a?DBjX(s z-B7u5)R~>_(Jj5?i?G3++Tp-@q$>71O7lYRl)1>piWlxHQLwLJ3#cMUmcoT1L zIkp~4=BECv@gCU{5ByaiG}LZ*v+SL_d6@&lK38ECpVnSGyYFljXK|G!mXC@kuS}gfn^Giz!S}khx(YtkavP zRm(r?k-af{Yi)FE`p!364r(@pvv2EIgTCwEV?HM!5d2Cj z#m!E^zPFtW;HXvvBg%3X4E?Y{dESX$wll4RxWN;tFz7DUOP-OqTXc$BW}2x$j#Jk< zo0oKmv_yESoduDD4s&!EjCagLakLZC#O{Fv? zlfTABlS%8Vmk}7uq0O2&9|ur>d?80z^8V`&Rr>)7Hn9HYw+HHgiawbyN%vlqGP&R5 zi7+#U7B(v{cB;TCsb~>_uutd5f#+G4Z7Lr>BFFH-&e}c(Sd_~)V^)$2T92_YR>8?x zV){BSSx~$g9Bu&1tdb*1TJF%QnYQ0+E4C4`0w)a#u2?Yo!O9wkoO#J#&6=3L9bEt4;4<)-6Bo#cUue}T z%N%`ypS~a0VTbNoGz;roU#%3rE;ZNowV?CC)j!M?66w(~=-`gsnekIxC!MGDFk(o* z`zXy#cd+Izh!yd;#%{4pji3^aZM#(H^Oqq?!0zjV^n5JY*zJaoFavFi^pit@?I#01 zC5K%VwUUW09mj*GiMS!zsn$W?sBu`7TYJ`6R7esPZ+} zRZ-kg0V?YO(yK_iyU=rteCINOfK1SE?SrlGT8?Xlchf~EqIPA3Mo?$%n_lS)+bc%h z`S#5|aRI36%!CldE~d>0vM~K^UN;FJuB~dxylT=ZtDxwBQuLcC^#WBBBk4!^8NPJa zH7>2AB^U_&*tSN|GbHiYj}Hz3Q!Lf&=XP`GB8tiy<9Ea(#K69v+!4_Y-3rj}Hkxs% zgjK98-XkJ7_UqQLi&mgS8FN3)LR;p#KcA&>+gWx*+>e+UjuDvCqP2n4dk&2_KE59h zAC9o;Coi}q{V4fUq5Q;a%1|ANC2e6r{XTk^+Yf(|_fG_Fz1$$PTXbBmaratHvZn1( z0(ZDhVFUv(DfXdY>I-}IZ=iukP8&a%4Id>0$I9^|==E#Z^XL3qbkRg7nIMsEA@44S zAmN}n*jqk=`kpR@-M5OcSZR>4$D>yYfT#KF`ch77cligKTyr0StQAkCTCiAZ<~2D1 zM3ik-*C5THRH>9uRow1#c|JKE$Bsk)oQ%b(x^d)biB+h#(7{iT=?Z-d>{^s%!bEZ` zgvjo4`PiqcTPSg~yfiQC=&7Zss)9Nhxa|X=>rc?ttawcJVc8qWVlt%9%qYd(FhuEK zXdi{8aSq2<<7HQs%F?#q+dh}Bpj{-L1jsiBA53ap_# zRBl~nK-cVQ@%Q3dTRNO4Y}R9j#(cl$YM}?|$cTXWt#oXkjGvsfk`whQ$28Cyo090n$$pN_?+3V1j#p;)LEM` zjP=g&T?B3Ag0cnZDFK`8mPhY}`o!DqCaT3*pAFn9Xwy@_PVDSvZpbqpKIp*k^G()p$1Yn4CGbT8m?i@fBcP_T&m!@4`i*;0C;efh~p~|?^u?iTO zoe(Tq)jCXyYeYLRQ!KT%>4}kk~)=(qGc_Af(;_Nl* zJm?3W)o4@29W0|^t@dDZZD>Mt`d)4gwc$QbEw!gz{g;j$EZKiZX4MIZrv^-0iBpUCnp&pAlkJ^sOTtUFQ%bX!KHbS$+E=|h*g<5I4jj*JkFEmZ5xlMDg$W?F{H&kEx zfK77YkK&dXZH<)hS$yZ?(GBs14Ov4|+j1(kZk2xNyx0^WOj1*Q!n6*XRX#x|ErUp?tn%6J2&Ed`E9cuXS ziST;B^l8GSLl<>?(*D~Gf8y~1S?x@P%>=X-ag4dX@r-n$Yf?x;$Q_YHGkLZG-xtjM zB}Z@{g$zVdgE+(5z&}j&nAt{hT)!u~+vTXZY&zg(-bQ&#dC@#_eFi;x8-cikdH<6M zN4q6GSm26cs;+@2-vHba-bkJ-53Uhl#u5szoCd-e0Se-A~6GDT{zz%a25_=kBXG zdCTf_K_QM`dW>0PHkg!Ewf63IIjUGpj?dUrssZftfecsZx|F)6$Kr-)m|W*$VRi-@ zO3^h!=B!CN#P+*k@|}RHLLJS@_S>efFKCFEh zqF zM|J~dT8~%?n%(_QNay>2Od0cW*0MQQo`VsOnrd8jz8Aq%15CrX2AaL;$8O#m2*au( zB>7Yq>oBtVwT06QJki|#53jG{eW@aA1}J3Eyh#?s&IPafoF+n^^ufE!R{b9ghF``3 z%F@yVd*+)>?N@dr;{ise<`M%cL|gVC8W!D2nCZ*yEfVg$(1_q~*~}=OjRckGgJ(naCeQDgL3ctaES<0F?R^V-0(ItBM8sXXjaS8jv0wPbzX zP}Do#+1zLYrG&W@}+A}jx7oNrSM>xT#cdMp)4wXeMP&gvAcU) zyjLXA>z1Kk`o%rWr4zS+OI#jSX%qf-fW9} z7yruYx`RDmM#)Z&i?dI%Afn6Id^^dJ!%I*r87fV7h=$qPXbTWMRJkS z^YCU3gRfL)(S)=UygbT;S|4nCo^aIB&_?(bEGUt?F6saNY4kOK&sg8?YS(5$ux1Kq zMa{_*`2axkXZs$Fhm*`ABJN7GqmfLRpx_^< zFE35HLT-u>an=jEpA^Ks^BJz>2+P6{BG29-TXqIeZWR;#LY8(a205rMYmUNV_Q5A( zNf@q@&JVFi<)N(D3f7!hca-j6qmA~4d5MDiQ&=4(c3;V+6|L0aLvzN1U|Cqd7z9Z! z$nk;^$x@}+(0@w28d4dU-gQ&0zP2k67wTyY!BkqAd8)`4ciPT?5&Si>B6Lg(o0lH* zdOjDF6M?k4h;1p7U1*aYz_#fjxh|r3dyN^Dm~Jo;(XAlgqP+ZGA>F1=i7LNy?|3r( zUA1-hM05UQ)23HyUHxW7wcWLc1YkBI0p|Q|TSiJ@hrmDlfailxpl5fqRLPKjv!)>z z0yKhxj<0V5VGHdJD*<0;d=jGRTp|OuEz|pTS5Qhe96Q)snv5-ODVtTytqwH>ugmBq z45Kr@p@J~A*2Zl#o9@t@g#KCqj3U3^ zD5OREW05r_3J~Z+`9khnqL50xg2I+vqB>3@<9Qd^^n*%rTSKLokl5p6ulrydgB|_B zH8tU^B=y&IOJ8%B`b6R$^PuDC6dRQSVO&?p1rgGxBwtHA9@!)pOdB!Zw0(E42p^QI ziVWfG(iLomn#xeWM{&6q%xy{S<+}e%3E94h^l+GvpGZ1g zRs`*YO@RWBcvd1CD(?IHLYT5oIZ72YHUQt$L&b%YEC^Xy_ltk!<(U;?3rJzpU!0>u zxl5ktb$$~J4Rjmr)paD7vhLROd{QqQEV(wXoEd&6_}DUC6>+c~+TTtk7}PcZ2I!99 zGuG(rCaKzjW^HC8e5r%845ORBWEeoHCO^drFb;fws ztmW7TO%%joRr6FN6;J9{q6BntmbRj69giHHz{of4VZI|)ivB??OzskGF>NnX=>hL0tWilgCzD z!E6Hgk^2Q86&s*UC74}CEMiy6)F0r`KQtI~Mz@f79nu_Isn6ri~@$&IKIKh|)G-=)K8LO_s5TjsA7#>f)yqJ9UYIf_o#E0m;i8wUhY zY)^O<%Vo^ngf2Ht3)zU+YWM(SUeHPDI|TZ8+go<@KFRmq13~YXu*|CNnN+wED*OmRRd?ct4lLIB=nHsFPCT;k!mXVdSexI`xVwyqJ5TUnE^H{)?ZNU- zj(}`75C3+g#)9|nuJ%!c#H$WV6v+DFNQUQj(ms`7o((X&wUTc!-EvlcERg7*6NNm+ zSjV0}7!)IF1=!T3HUvNM;Mp%&9zGRUh^x=qVP{A<-}LaC#4<97qoRa6 z*k$TvImlxrT?fxT2h&yW7hFFWd|dPN(9d*oxLXx5B}G!)KI7yNjMq)-D`$wBST56U z4fVv2PS!s5UF+5@G~*LZU!&}G@dsJWN|N~r^|o1gdxq~$FeHt(2!v*b+o)ei{a66n z5Z+QS9BA=mr)fNQ1~Gp8QDVsZozTUsk1J`Wlee@#Nxhrcu6lb?mybt|8~xB+a9GgX z+m7IB{iN4ArS2R{lh4XrXe2o%03zM{jhc4C3%lZBByLZ}PxaVsu=`l+TIJmuTI8AG z;3FQ@Y(p>O<;M}tH7zNS5q1v)-jcOw=r8;sk7lzNL1I5{V z|Iq_}8O8btp;&!v>K2^5ZNd~U=8xs0zwdEKG5mB)!@}CrC?d}i1x{R4TCrM_&t{2S z2uMN>g-D&MvUQbafFZd}3p1_=1TZ)E#6=>1^sOuzFnQ~j^+`Qy_WtwnO|Lz2hlSm= zi&^b+5`4?L04}kBbP18s_su53?Q&B@wn_9aB<={RzfJA7ZCP-+CZB(qx1Z|+O={mZ ze*3jtR;zR!Atc|Zd*ccl*UQHS+MxXBY_ScAb7@h9UVCFtDH_{tDIkfzcqt-vR;+6x zmquDbB@ozC(Jo+*1Gx^AQDF-d(p9`>4gI7PtWI}v_hOUU&T)O-536Z8Mf z0#{!C*X{pb9C&4cx$^?p-IhB+n|6IYldd7ZEA4b!mqUXfI2SwZm8$Em(+{MRU8gJg zoVuTu{FlUcubN7Wo%Wi|&ZoEJxLE>v332LKZ_*Pc-!!^T)#>W$j$XpXB^F#$!^8qh zB$#|r7Q|JoVV9nfCX8P$Dq_!&Y!~K`wRN`EmqI}PAT!#NTc<0ExtHm!MNX!)0l)|5 z5?VWb4TeAIK2*82F8Oiw7JqcY&uiXLEAfXGDSh+0-og(^YI3w+35R;jJ?BR=x}+zh zoCD^{>^Y(Vk6pwVdUx1~%-*JwpfD8S-{U=>5 zB%EjO*s4C(CL1I$B~6k)`BGgY_I}IOD@H&4>FC@uHXK!7wHZk)YjVtN9EYwIlmzHkDV-2E>?hQnT4Yj)!~s6yr7uq?$)1fX)kU@RkL)j22G= zWuQ_pjwb(LRGZNvJpr#bFYEAnvs?G`emD3%>nsuf(~kSjJOMxCnDpgN%mbSOEswWL zbWt%C1>NR++mEY@a1;1JQIhdR?|#ZA0-1RFjr#4}yEz^~;=jaK*KI}-R&5NI{671E z=uSV!nwyA(9e*bsERf}=_2WY)DPF!$5es9kGc6oxm}oGxypx{5C^7J3@<2D3AZ``h z#UuW%6VkeJD9>8S*O-)KZ#L6uH$56vg!>-AW*u2%=s0)gjVJoD!F4LQ&VJvlRal#572rPbh#Fd_*WjL!jmRJE!XN8tM1d zVv51gD#JH(v%UD0l7Ck^lKtFzyf;a8kab`yuQ0fGE5Gee!|`5gx9{e@yx)T0Ra@YX zrj1AAv(C%@Hxq2g{ErT1cY&eLW}?KLH!EGOb&%`=OvN~Ry#0uViwC#-cN~)!V~2N- zRzfhTsIahM?YGwoeDZ_-CU@w~ot{qR(Jk{JuAQS+8?%3+Sm~76!_6f8x5h-m54Zs%RZrtR7*P|%Z zak`P7rd`i&%W>=)Pz)TDr?Ku^3(`GbRaZ#4hTY)8N=`#oZ=p8uR(+hPc)h+@904%D zxgCZzL-{Ak&(Ou;u?0@;JerdM&%^P@DR^ja)$IMv20K_N{-h7+w)~cJr=_`t8@T^aZ zt8-1`ESFq(s!8TtQPDQmd_h3F%U=OKLsym#_@okm_gXuTa#2jvMsNA%eMkKFI#zP| zv6luSJ?k|c82DR=#z9){7(`HZ3KDvb?DA zUXv5sN08ps8-S6CHhUM=R1pB*{ZRVzEBhGycUU>!Z(Wo1p2|GE8+bFKBo!~Nqm})O z${wwo)I+(7douc8$AkBSjv8)`dae<25*j+$ac(Uu3YsZXqmQ8j{RTom8t|@dS;sB! zVC(`v_J5y3;r9r`Tq4TCY4g|LhrINaUY7yziX6 zY`Zeb*P7&c^s&J+?LQTQ;iE#!dZH{BMUvxdO)@uUKDX1f(jxoq)|AAOd$`AZsD(h0 z3{I@G24I~B{2wFzN$QHI>qh++irxHYpCnZ7R@^&p@%x<2iT#czj}weX&2Tt$zi*`g29n)j0x7ELLdYs5=R zv+g)ejeP0u8R&9C9lklp3Ik*RD!3rY;_HJ=9^0@nD{x7$cpE2>0PkQmQ}D{Eg7g$& zW>QYs{DW66#mQ+iyxN~n*Os7bdlZ(u$K_F^if7+Pwt5c8dXn?7RZhhrCv)5^UPDh% ztC5s?q|EPMFmkqZxXv{s9=Xmp@s^9i)HI#nT9o233wY2_!c)GsA`RtIObB zd=NLY@ioai`QxN0n+w{^Y)={A$1h`t$>=^H^~6IK96!e5j_~Gt`LNCEV>e5lC0p3# zA&6y>!2p%P2{bmJ0Y3N_QRMg`RtkK82d4-^bVq!1jdv*J4tgzC9&4nC@ud4^z4;6N zm@H8L81#j9zs>$@gs>*wwO7H5{zWy>HTRsw^$gMO;kM6T6zA>HrErdN!(@j4qMI!H z%HH&#ZT7W}P(RtO>u7^CCp+t_Qu8*f?r?|K8iP3#I7sSj%G~c8|KU|!Iqg2h^YK%E zgWkFHEpx{avkI-00a{9jW&D`z4$u2}T@J4IL|Mwk_IEc$q_pWZI#WF_nd zp8@npYo>_3OZS~jCx4yrB*MUYdlO?C8&J&monOe*BkW?qNhmbs-2YdcUCv(T#h%Zg zjsK{Q73R2fD{R00=K8O;KGp0(m*x}-$fAl;C$sU+X`mrKO8grlN)aFvJj*ef2q>J& z1mre7#goSwp%)p9q_FzK6XoG%*FgRbq8s16(t!W)tSmulQ^_W%OrxHVh09|mK8$M+tQnA(Me zTY_IQlb*lIGRDWHRf`!-E4(A;x?`!UuTQyK{qRuBL@54JBh__(2L9XKZ^ajJxgUD5 z*D>9sH&(%i2{Jxf!t8pK!e(mqQ(C6b=8uh=oEDHDkY%ezC%%=l-nXcZv{RuChKvF* z8K4VE-2g$Yac*tvE0uqiw>yqE;J*VL7B@;}oak*sL0U#esP2pXasAv5i$kc{Cew$m z(|MQ{#8~+@BxCQPclj?;h}OYB5JYSjIB*!hCoxQ>Jd?0WH8FfLxCb0TMo;bloHSG_ zEm$7yGC^ME#0hDoYzjfY>b>KYEBpSoeAZ~$Hz-xeGUL&Gi4yyC}?c<|iqO-dL z&Uq!J%k}q3pk2t51*de!+%7q6y4r31PaO$I3?36KoOu$W&xAZ2oTKi^(*~_fa$@nR z7v9*oo1F+;K4ZWYlWR)1)zFpa+lyIed5*A|tRmp5ImMta}QDgALcb(S|%*#~mys+mSc#GXMa)<#_h)?>sJ zjrTx>Ml6zK0^y{ zVc+04DV}%%_`=<-TlT`}(H^P(hojY2^bArMup zUk-?Bfq6W-Yw{U((&joZXiszU|7Vl%x~@*XV1u$RQF{7q#hU%IEGcy9__WRXKgUK# zZCB2x+k)BEbeXAkxfm!p@0=_;h^Z3;;nO;x09tIA{Iwu^_bZbuS3rNz`j$fLv=r-P z+xipT(i+-aXqnPMi4vncK3$Vc_k9(#z$wUBc%U_{_m>^kcu+<`chx9C-e8S&=3ggp zZN@0M1Svnoe035Evd<`0>>HH)((J^3N8>5fv0%f z*@(~ocUuS0-!12UIlE~d>wywPWP<$Z2#db_$HMOQUse?D~+ zISqfKuyuNt6NbC^0Tb4*Tt3RGbYu94#eWH0^!}n$0X#7Afs}Q&S^Em&i=M2syV?=sG2Z0;2{GC6z!!Ke zltEn8%&rb|WlH$M#9AO7UnaO8&U+5Bq<$J~rAnl>Qq)}k2nDsa@-|m?YOlv~_bl;Y zxpUsX+=~z?6OqBEjAu2pfvn3?T+w2|_%ewYFLTijDk1hIAv(N@;4?5i8B|e?$GZZ# zKS*9L;j&++&u50}mWc#3v*iA+#mhMQZ`jj?$e+w|bY1Ow?915seEi}z#{lyWVxr-> zsG8DFA;CvL=eGnd)Q8}T6i#{E8%PG`Q>^rQ^zo!1`+-XKcTbBNM5~<0{TNQo$Tt?; zZlic3U*k7U3T( zpL=5o>ZFM;u>bC$v`9})a8-{WX8Xq=3Qj{iZMtfc|Ed{Vb*^#_F8 zRwMA)z%#tzTQ%ICVgJ!zXtj}T`buSnG^W=?n0{2Ld%z^6$h~Yzcq)K7ZTM%vYfjBq z!~ezfI33~YoPgq;$IEKecoV_rAQN2<-TWtp-&z+_IY&uhGAK=wI=V-Ao79|4FPlC3 zCs5(pav^P?=43G5XENV%nTRo~@Iz11*Qy}hs2d->PeKM!*y7!&uh$(b7=3adrjr3oE+wkl#+T)p)^mE3Gg|&&<|; zi4KLDUe*r_w6t(r$Lsv~LU*fHP{_(w>}6N^sKQ2*8Sed?=G~HDZe4#ExWH9E6TNloqpA+U0wY%Siu=QT9;UWVELOGy=N-Zyi z46hq<-6>OwWNJc++tWYlBsBm*%_I1DztFaX5AiEIflPo~fZvnOHrDx?+Fc{$hUwzf zmGrKW4OvK8u!XnMBhztVgPYgc=*p{6-*D%JG*EFF)8bL@E$J{`vhnuqf*k!KkS5l@ zC`?GIJI$>KK(*F3e$w{NvjLy#HR45$k0Z@r49cy2M?^#wUGVDV<7hG$`<0Hi&(}ot zGpPkCztewS=UEO;;;i9S7ecn89Hxp3mI{ z7Dbqote)2wRg|Tqcd||IllsmryC&=Jx?4koU$1cJD-+XahH8oLS64H|nmc8NPL9=Q zn=n=~5n(=K?URIX!zYd%xf|ZY=%jmcI=^EMzp_>Q(#^v@kFKT-998RzUpqIkHG(xH zqn`p;!)PuE5neU^hI9@0p<0_ZCs4B%L9)|#V9*Ebv)m}9%Q1-)7q`MxWOeVnt3e*K zqCF!pn{XM<#u1-ccbaEj_ZHG_xNFlQyJL9 z#tH~%)q~I1*cNBsUA7@|bLf=wd26oA1c$WaOVW0sz5cQ+q~VJPQkP6u)2Y^9`yKgP z>a(EM%e_kh!=>noa3AU1PJpc?;YKNLT4Z(D=Z~l$yk)kvVsi8naJwzfPDBvHULs04 z80q81-0rz1S|TdE^#d~_;`(Zg2EDNo%WjU9DUPcej5xH@!etBtShhqzFLHL+RS*hECL02zqNa|gGz<@=nX=^zFV z#o_c6bdnWdqNL>^rF7@vEW`gf+HI*@`xQHnT!+tarGXUpn!6jZj96dYm9l(?CmD!d6mo%5l zG}!d1^mU@!{*p7r%)@wRGt&Vt*OdmZV0=0BYVPCvzqy}fRauidjE!%@yvuJ_2XUM4 z%%y_+qwyx}>wsuwte>$^#*F0yY(%V87wn~2yWQzlSRVhe;pf|ulq2Ib?(Pg_&-BD^LC!g=lvs8>q{?PdGAC|R~HE|=f>6fyX2SoG$xqq&v#0j zh;AXjESF!f1G%3KP-x3l_q1xMKMHsL(cLhP`&0(buZm+TG~ddtcKo4xU$E&*#(2rf zVPYnCPf_&<*@-CMA0KsVquT1YtEVoVCzQ>}}hQ~M-Di3%n7+c$YzH>=XY83Y-PfB!;)MFZ1%$s=i%|3>J4*4R@ zx73s{y)E$&2XUzxMhrivQnu)8mbMFW!W-0OwW@`=tt4+YCh{?nmFlCWcD~dzftsb- zvu3-XhR#YTF|@j|(&Ye9?IMqGoK&+If|yQQ?seZZ!7)BW(j!*sAJZI1?1YVQtyYL5 zo=RS$clNpIFIcHaGbBtZ{n|vtr@HzNl!aD_FG?&|u?E|h zc~6+OD|Db3N|7P!@(JrW~{V!SPBjZ?{yHsnZW;%MgD) zi+^EL6}8()e(`VtAoGDS&)lCz9|Ig#dzRd()jvvcVX{nYSdJ{00X;x{afb^=fXdQn zf(w4=^?$i126jwT{i7_cps{FPue-?|mIX<8b{`h&d@)g780uP3p!Xsf>-tz@Mn*zZ zze~1hImIDoAmF7g*?Miru8Y-(G9Yhe^k-Ec+_F7?2F_?_-NF7&6+~a{utRBqN2q!x zni^9I7rMNTx105_VEDjpP_kr1_D|h1%9L!ICvwM8v3}9Vm!+iYH=}N#Ikytirp?m5 z$0F9f7$JmHIzNmWnze|m{-(1FuvW2ji)@gYG=sEG6G^Eog^T@ql}V~wIq#F)A59!n3dcVB;P@Mt}%_tjpC zfytWP@{arWmBW7;=tbW1eP9`AD#ku0uB2^9s*RM7-?_1L8r?U(ydn?@zeFD*V4__tI=VC%M zHVAA$Mw*3L|GXb{aR&5>LDP81?3SGc9}0JV*>lw4N-wMFet@={Jhuv%V|$^j)k4v_0B7M@R>JbYb0Vc@L~d0< zCj7}_3rf>YwtABl!tXE7VxCu!)2TvU*lm^_*l}BAN$6H0Qdoo8_5M9~b$q z`L3TYYChZ5k55#4NI+lMt)8CGvJZV|4!hnGqpxb!skFKMR8S2RJZKvJO@IuSK}vu% zRhw|Tnwi;}{S`>&!Mad)@6YLxS2FW!zJUl%seTDq)FbSNU%V?j&;~BZs5+?i@Eq;L zYH1*3pmuselv?tu>+gF*E)E6(cIe1L@T4k@`a%%_f;bZ_5i$aWvTzx zaV5xxvbrms&e4Y1$|aXhKg=;FWqSsZ)TZ=grJN_Dx_hTKnzCrCL}=Z!`KN?=7|u9r zFS6dlGxd2!=}d~4UEAAcezK#pP?Ncz;_d*nTgODgb)soT5F}64Vn|#viEQqhuaV8% zy=cs*hPvefaZ95Kd7&gDbM#DfKsjju!$Sa_;`oMJOm)Mvnd#bb@1gY*H^`rV8z0LBDDGn63-kQ>LKX9)0A|_Nh)w5$ zgeUt!><#8NNFtfytSt5I9Y@MSZ}C?}VZ*wobL;c3iq?WGDt#HVEX`Drl0^DfLxcac zz7cs#>sV)uimlgo2*PHE2SAow5J_CFP9fm&db@U{5_)#g_(~6%Z+a(j_X)6mjd55i zERBP>z89N@-r1SK3PFD~RBZ&#>RXs6-hQ-D7WaLcm(1d3{*z}WW;UjJ&;QU*u@9T| zVXXaH9fksj+?U8F74_gT3NlPDJoK0Cnxfbl%9nd5OIYXJGgdyS%ECotp$o9T`es-p z0@Y8v@hr`iXq3y(U1Hqg>uD=7$@t2gD58Xpgb#X6?56k*w0}3xB_2K#AOfT`nd`c3 zPP6}4F|S8|U)o|EVtlh2+v*@-)l#)Cmo4F@weEm#$Mcor+eK?`%DZ`=Bog>1;sW4? z#&5j)a|k`}4jF@cnJQ!5JBMp+T2=DiydsnAq{;X+Jz>e0pDc)% zf1$kwThg%&rSqV$o(L=ypmcHwKkx(CciITs7EPoj>LFh$LNW1l_N2`~B^876a@S~p zcG{IsaT2KlVCieV&onGoPnpr+(_r|{(hMASE`#)F^}WB=Cg6wfw6w7JFNy#I@X(tQ zd<{vL%~02sI!D60vo60h=Pa%YxNe()Lql68gmNP*9;Sno08LA9bf9FKdCOk9>1mp` zrG6dogZRQ>?G6sk+p~t{0SqvXl$9Z+9nB}KKQgu4JyKqD_%cA`>gNJ+67+&noq5m1WtJ$G$E zrxo@8pfh)zC#uZ-7+Ac6za^Dg1KFuaJc@@i@drZ%fZuRw7KMK&zAyg{*yo5`+VF?6 z|63UUz2#@=8cg=>zk7vhS${zSld}%Y8lhS~nWf^-g=Ih&3oCgYGoor6!A%bn_m1Nw zc{Q_i(iD-ieQBC;xP<*M{B5Yu_Z7}oeR>H=T}5(#yC`5jCl&37-CW>2pxv|j&`lg) zkhlVVbf=jzKu7zAZ^k$U#&!@vGw|+BUVw}65r%HX-ah+MBZZ3|aGz3)Cn+HM$HIJ+ z#R>TWzVF^SEd~2y^@kp?+j9x)ajnk>%q>(7@#_j%;qNOK)elI;cV@P_FF|~*I(z@& z?$5va;{>0YGOh_2bn5KSfJ%l9iYYjCiLg29NQ}~>izi33L!W9AReug}lnV&J)cAMt zx85sZF3%7?JSev9ehkeEB&%_U^QM|h@|2q2xX!?9kmkfS-3K3qM~X7P(h`<=W%6(= zfO9ugoBk9$HEUXA+$0+kgZ#ZcT@<5?JOzyh*+O<(7T%UjjQ;+_(*D zEd7#?s?YQ9xmaqSHo;#z(vT!E_cYLwsw@cX^=mVbqEs&{Z+MF z2U#m;2|h+9c%1LbwObsjLk`J{Yt+Y2%t}aE1K-7shXVGy;IK+=gD~$=ag>R07zlFn zo%BeClfu?wA|cjW(s;GpdN6?Ha*TpNk||o~!|o;r!9eFCxeDVNZwDu^L6ywLK;a;n zLxsL05COS!-AASs@cO|Wtq-O~(WJZ+SIMs*pfmIG#*h!t1x$`UF{LInNK5i2Rv^cj zNe%ZT72x%wiyM$EyMpv4s*Ha0DKklf@BXd^7lgv}FJKAj;}?o!=5@ z>-f`8O8pxHd8K*2aO{ZF=J=i0^_NMZC-{EfB$a_klBe2BIDCz3JaDG5B|!NBc&8** zW}$4H9*pq{C4j(aYj7nYME~83WZfSp{}Xa$Q4Iv&ICod0s>vwW_asgfLvyCc{22OtdlE(p2mmog4k zdlrYKZj1FSJ<9+%$cqT~G3=H8{Gvv#JXKS zG!KcJe2Pg|7Yk!(yOVDCUMSRh^-oll=W$-DejZR-^)|RByq(C(#JIfedJYd3Q1FH8 zq8hx%hV?UnAAKiouwlhEmE%A|Y?thQjs^Wuqk=+5kEK(Ws_9pg8JEy2#1Uk6rkL$w zc0?jk9$mEvMUSi}jRU1{`Zr=XFsDnsjK1@RqRib-+KShQi))oY1>j`A`4f>B0<`Z* zTub9QG-z$ac3@4}`;lyyt0S*lM-Y2%x|2KsTiMHLq1Ip<;aUHcOUdf!@#?YPwfIqH zd*#I;9>pZIkY{?kQ~~=@c%vb*A;BvNm8O1Zu$AU)^Yv2tsvxe#`5U?AyLzGYIIm`_ z_I^K}Bw*KwewVAN>;9#GCr7JaNfpOeHAuG=7^Vx?^iRWGKFjqI$9E>4XW$&!9OXy! zLq&wKgcwamPUu5R4eGW6fhXIl$-E^;rV3D2gYIVupf)>4nI3q`m%SK)pR)7ia(&af>%^Yhqip(rZr}N==4-bA)miEWT5z^;i;zsFnC8fxKWcNB45na0hveblINUU`lv2_1~; z%Ua3m#?8sf_g;cT^xk{aD5FIkJ%U6xV)PPibjE0d;l14V^WUCrd$)Jx z{l0JETe0HeYUg#H$9Ww4{yQ#LG!LY8{MJM^dN!pRj@vI%GG&2e*v$Jh4=~j90bjrS z)Y?|*9`Dx0*-k^TTv=OtLG|3z>09Hqr?ZzOnw*0an(q0E+G7h@wLWzXl^6B424s&d zofnI=HQo%NL94UMWpHzX6z7pmxhs_Gdz#vZRnM{Xlv>ep2fqv`s+qRVcPI=$KmKl{ zn-pQ5rkWdh5PCGx&tI>w80sA1h5?z+1(YIUgVkYvCfR%1o!ouPlXOV+#R&O#O#;cq z+s*BD?UgoBj4yx$=~TOpPT+TE8QYxI{u=FwE>i)8VJ{Kz8Ja9EcUSR=jYF%UcOkf+_uxD-ZC9*#zMZ{0}Il}V+v?=JHJ)%HWj z7gP5r-w>N5EqzrF?Et?^YqBZJFw(Q{e%k4Nn&GNA#b7!B^QdP|zT=2*&W5@B&2&m6 zT26NE@Td~Cd$oPjd=UrxqEA420#U!eo){xJvIH3F~ofIAZ%llBa!u&Xjl%rG7Ae1_h@#KPOtC0>sJ3zHQiQy z6rWi&HUOgV$Qlu4p0$hQO<=-NSr?{YXq(=b_yr zN1DpFxA55KpHZ7Rjj*J9-DVZiATo2`m*}5!>>`&bWDhb$!zccUDP}G-93$j)Bkw-j z2WEEgoZrcuh+nE%u0FKdbf7G_w8M^U`z$1nlSYB7XFqg!sl;&usN_r2svLPm7RF7y zxmD(Q@al6`1%q!-i)&r(ouRucy3t&dXwfEB+nYyrK%~mJ`ZDp|1v4y*c;!VJZ6{A3 zL!U6@5Fxo03{1~pyqw1l%vN>2;W@9{4nG~hY@sxHv*EOZmC!oTqJhFA8p8{=!N^|4 zGH!@cwNa3+u>(%y{y@8{=7+OF9iy~1r}G^z<4K*K9n&0v^374c=e)v~QO+{;>Sx=| zf~uHdAl%QhRZ?|PGtz@llxvKHc4%QO3h(e2n(DpaeVn|9ic%HoU!e?TC4DIk}BFkq>*QgUx1_?1?u*HXWHQ*jGA%4>STz-Q-`V%kfQ zEx~4?o{!&smAW&IL~uTK@SDPdn(5F%8qKcztP_2W9AVC%I)8KLJXWL62|ow~Yr8i0 zn<;6V(=We&6HQHQdKuGp(&~jZp!GW{lj!()U~}JLvmMvaqcGbksVg{R)=GDIWb7cO znHUubRqWuaAC~0Szx`6NssGF~w#(90Fj5%EehVRKF;wf}$s=TaL#}=6X}HtBlrD~Z z@aH{eQ|*{(vbl$<3<5+>EWrx1P&-ANk~{p>RLfeUVzD5NCoFU4>j!i7Bl&zs>Xwj6TE$ zh6oUPWs(PJ7{Xb84tB9N$b7T2I#frn?xNlE@0jsZd{^^s>0?6g_khUUEvCfJOqt3& zn~mRLM=Hp+L|Y9d?k;O}%kQ{w19PF@tA#q6SByQb`~nfJwvi2;pd_c+7^i^oUD(HT z79bg9;Qo;5omL+7GYx+QeLy(r6sw>`c}5PgceTAhk%4Z>fD1Yj-SG0w-4v*>M-1>)6@t%`+R^PTHen|CPzcaI95#$`}^dLXd=1Wbq zSK*t93STRo5V!RCt>d+~PwRQl^R||%iqtS+O1DQpevRI^_=(FAjoyAHUPMgtMzu)$ z9x>IZr1k5(ZW>~6>xQ~RAAgCwhawNgXQxhwnW>E|oq936y76+6@-lbF7tjL+I}1W+ zQL#27o#yiWB2V)iH+D+`xT?%O6u_)Z=h_0RfNpX;zKKd=byA z7<~OwFU$WvGI;ADs*zjHy&$}B?~nCK{fYh9)~b;prZ;1E%OQQw&G4<@_Or%c%~bAp zlMX)Z$y0I0Ltn=1~TliDJe-K&(ezj zBt9SYPW{3w#IpK`(yZ}*Xa}tzBRB7~`N_Hbe)!RWbNpF7P|w#vxgoOT5YJA0c@Dl=c{m#N`|HsIhoS^_*+BzH#MRU`^>7r5TP|s zFnRDYUQ|bGx*v29lzUGh-;-T1Y0dN50~$+z{C1HxGgljw2cUP>os-_u8}92Yv3jG( z8opXGGPe4pKG0bT7}x~1CzPshxuows^t?xGokXd%6m^IOd#)9~inQNjd}{k^%!}%| z#Luq8Gsf*SX8v!Fzn}hP7J!d@CtpKxL6{q6o~}BR$=scJbZX&M>_7GSbvy^Z_Yu)k zYS*RPVtq~wC6N=opZS_=Fg`6=prRv@B3kG}Ma znn^x;e=R}e_tpyOFCIo~HQ1unx_wfY?e;~b;m=02V@1&-Vq#+NzE*BspAMfAVmkZ2 zues)yR%nX(tu-S4B4T;;^Vl#VDL$4l?tWt7(ePAfksfzGWB7OpUzEId)6ROj3a>!D z0=(Z&t+nPl@xdU4{*W3D`nI?LtRi+qA9Bcfj?o{|!o}wvH7YD_h>lUn9FAPLHkCxY zknUa+S^N7%fC(7rK}0OIi=2yEf-`yDLaVGfigez6_aS;hSsm0liXQSk#Y+zG)-jSa zFTH3Y5j67DYKO*5cQh*&T1!Y;-CAUasW&PzcI><*a(&aJfEhzH+4FoCpmSOF5aLsP`1vdpqMFCo+m{BBDBpY9vY3Jo+Wy2k$;ojSAygsD)F0vk-g+Vz zLHwrSl7imP=Zrzxqi12laab&0kgl{!7#l1^+{$E65i5C1pr- z`vdT0?TjNEo0bwpb2VfqUc{)RHbpNTo32@Bw{j&4j3=Zg5 zX$(|T?OMj4SQ`rn<+~l0Zt5zjijfIQEq>%sC0*6Y!DMnoF#sA|Qb0}DWwD(4_xr76 zuCx#nIZ_o>(slWpw-^)vMKLYl0tgxG5$dueE#Kr22`yB}|LB?v91$oado7g*l#Gza z*Mfdi62hvGVI~cwmtsuHt;cn$@oO`I=X*H?Z_KA2LmytdK5Xxa$VJX-)z8>BbC9Pe zxvG->U1@0HRLuuun*Z7%RYb(IwH_KzW@uUpx(2Gm~53{&2E$}>godN z_;s#U>>wb(jk1Z-Os%5_wEfp+5QeAT&Qnwtm-N-#R1$T(UkP{C7tUpx)l?Wt^UG`o z_V|snfjsB`7*awX(95M7U*izo5szrTQsDCh>Q+_jC!jPYqfgxtm7e{C?EL^n+^w4h!}L?bXJMD_o@G>NtG9{{=T zW=$wkS5q4OBA#&dADhkkbC#DZGmS#3Zv{PT2DUmpX3SFnIWsU?aDaiNN^!C8eF!Z8 z@soYQ8WAw~l>DU@vVFg<68lFF0ro5%xNAN?ZLw;Q06e(2T-Rp`(w?#X9u-(d4HTo3 zt)jX$?+*y`is`h3h2=Q-qokUCXxzG-Ki|`ge{_@PV6H6uv~suE!F%fR9M$kxh#n^T z8yLdBEr6FAaYLH`wqzW|{9pcC2+hAtxc|o}Ny7DHhX5(n{LP`}-9EW^(Td&HEB{It zL@MBusGIdVnS=iBByvE1-%Yncu)?l2pGTGAYpE5WX1?ds98t;c6Or$R<>c%Duv+b= z%-(M~`oojdBlR3#=ZRV}#Flxm&d zc4Gb$BZczvvafnz&&tWqtUhu2rrjT8*gOQNWNHtqx)29uF@qCLq=N0znke{-v_GU3 zPAkfOkTkjSX8rq*q5HtxB?hX{PucxuFN;l+z1WwIV!iuWOY0Xlg{26^_!0c~e!xV4 z;Df^@I+WYOTbjszBWxc(&)G%4yQu_Ht@9iUlqb_x+4v^YTAgrb(pTx527D85fGvnr2lj1pS?iGg z6?4)C|4F~|yG?-#v~Nt4(Y{Q@gxBa{*5*<3ygk;axZ6FwyaeHAXz-=c-fY6(+B(?7ty8yg8zxmxsmK zTQT4AEiTv2@Y;F}xGGw9;k6Yc`1^)2dqN9#5IcUav#~Kf?cBeAtk34h_Y9PY$ua?T zGPcB-ueUDLs~PHnGxafWSvsSccw8Yt(3a;s+kX_1Y*fHmVa(TW=~0Vsw(9>q21{6S zHn25u(Nrk8=Anf`6po`~q*3%v()hiUPO}n4{biWwfTo8_(kqkQ>f6E)R?$qxfiCn= zAi}!M`N6e#cj9|qdcCn2C2FbP_%JUsd6uWVu1%O+VjrAU#@BA_=I=zI*}nV$&>y<1 zLaljoJc#C`{>>z#^J$lLt0jw zrCsY@fdaS)d5W$VgR<2Q2K{}KK3+Sf^S=_ju|sWd>*M!UL^fss$REW3%sOd#)tftl zy>b$i93vgu1Ng5>LVj?*evMfP2@3rY9~6Y2^z43)gUowEP?lDk@f+2kTW6qHL2$N!)R|Gtb=x@dVC2Ml8Q1Nx(7Tw0-nOMLs%)YO~i8zs)2_qf@ zP|Z9Y*_BMT8aEAc$(DTo_N>=z`u<%DqtzwI+U$)sV@7d;8u5%-a`y~E-umwl#PB`~K~c-WSJ-aqOC>zK8*ze#qolc<5O zIr`-qEHZS}J4X~C#e&_c$Sc_2_`j*3GO?7B{<#Y8Zc(4R(AAM~ylc+fs8Pk&U+us7 z2grI)sBR)Jd_K?+nCYu`mRY$eO2*h3CLA0UjmyLxL=B%lly`f%1;5+LAG95JnaMT& zJYNj9#S4|IaNCK*u~PnG3G%J23Ezo7o(xzk9^kyubJHE^;hxcvQ9B7(gsh1n{7@Q( z6X|{VOArt4-lKa~x`@BOm8Ac!E!eAjc}*uc?3Vu6s{#L2>>MP2_TbjIIqYR{!RTuv z5qKD5Abmpc-T^bLcnIAZeM7!B^smKv4;a*P0QqyTW15X0KuPADW*iwHt;xadA&E4X z2WR8tNgJHwgD-Gr+WPf&IHE;Ta1lf;4*G=)Epix62plOUC#*wUf(CkziW1Vc6b?i2jCob7KE& zqmvoq*6La+EvYYM-;UZQU(iniBj>76w7&jA!$n8lnSc`Zwg2$#vyXfC<@h|5cg|yo zSw}ZRkCYc9paw3^Hp56auD+Kq)CRt>G}t^a5M;c4nrLo`6+DCKqlDAYRaRcJBy4dN zlbXmuI57f9D4DA1Q`S>&#Mo@XVDq$bk1D)IXV?z>V%Y4^jy|fq`}M{)3j%x)F+n@4 ztVdC#y}+Y?%=cIvtB2tEl=sBJ!weadhPEtaA#ou2KmsO_hd)3U6mokiGMxTZb-fo9 zPiy2gxc+I+UoQ~V%sf}~Qp>2s_do*UBom4*UxM5VJk)@OWkgCrlDv}V9Bw2{ADq=S zkx;x&L!G$|#3?^DYG2xbPgH*HlIU?-BK+bUY4$9a8$Vs$%BfqJNu)<=%bc;P*`~|} z>WW?VeLQR)q?Va7Q3oL_=SEN&GUWS^^*dNaH7LTR+_E^udxf2%6}JQ-7pps3a<=Ug zA2UV6PVLwBG>^JYngsA?ogOsR(cFv4B|W_y3xDh}XX7>#wa0?Dx<16rFUifgId(c< zBi*_Cn3tPwO15)nQscn{PmwZJgxiCOuLDq>hTX#(>9r&9jcBmVH)37ICXEPag@{1{ zv8fO(-9{Li6$%S!oKG?=UdFheFr3u(4EOOjmf{8`dN%`K6AZ9bCey%%3Y^SH`AUKf zJi$p$>8A(hh`gyi+f{p!@iF=rDGy0X+4VJ}?+ZR6=}JX4g1@Q+{>sqazGUSW808P& zbJ~4W=WKo{i!rPuawP_@(sIHwh#=ozC}!J?#+Pz47*vZ@xa%>s6BwylOnj&8tft z;WU0WAt$E5(AamHdm z`>~*De6d6~1~8e^k7r$m|Gle%scSm7nm`lBoO3^aRL;)#Vc89!3a1I6&f8^0cKr5y zi~%!+%M!4o`WKZwm~BXm3Ppkm8n21sY0%NlQbSVye}8v0M62tk3wMP|Zk2zxxNJK@ z>bvZ3_R3v4LE8QO>_685WNnm}m7n>4SoqLgGLykh00*5ie}-%*av74~lG*RLxv`ev zFr>iO)9$_BNi-%1fY4WS>F_EX{4p6`X(?@og;31e;KuhHoNcxS-3#QHD42FD66-!? zp3)+)mh{h!rE?Hgt$ibt?l#%eF>q46%rc=>D7Gs3e67L2dIY@-OoqJTN~N(H?bhUm zcs+3Y^QNirU)DE*!;!39g6HO&+W5_oMVi3UBwn2%vI~u>h`@y|OIb`>+9D06Wyj8; zAM^e4K;)kr2l(Tp^WrKuN-m@zCJ+2i|H_b0U+T9X>FZypv+~TF{F>A&XNR*P z-bU>$H0Hkh!vqEP9H2O_Is-&8PbE+`K0-}xm^J@Zm+AzJ^ ze-1_bb7*nREgHkq^@9mb9oZn0BkXO1q%ATsJRz@rvwsO4#HgVGO!`9tZ_QR2S13E+h zLf}Pri=OJlhir=EU|(GW`w#Z63HxdKnnI3J3VaY9HwJ=iEU{5_B&_0}A%frl(!KZ( ztrXSDjMgww!T&zh_k;xoYT4LrQ}{nv7XJUXWBC4GrPlxb z^Z+o_x&BWKb*HtxVn7?SApx*Qu7T%iw83APy&o4}UBl2_u4&`x$8&%#VCtHf?s^VX zqF4JgV7eGKbdwg*Uz4gqZu zFK$`x(v0GsG|);zBY!boP2l$xGT3^y%Gej(y}=7?6E8FTCV}AWGK-ZK+p&NntO5v3 zmjab#Z8AmGt&9#WLNaIT`B;YoYXC6*kE4yDKH&6Az%PX+4Lm-C>qcS9+HwOhxQc*0 z8owX5-&jM?A8>*l&p71JzGNx`G!{e>R-M%kt9sg8U+Um4J{a4>H;R1iqn)X2xIEUNQAlC3W`W}KQOZ$xFGw+hdC*> z4yob-*OLszio4_^f^u2hh9=5|TF1!CJUc+ELgac}i9W9H*jzdQY+|6eYu9|L{`n%0hp8GmNtQ%kXUz z$K|2DS|0A;mj!W06_7e>3&}BJ=06kt-Xj7yzTx%(WnKVNdIHS+!U6ukoISKc6L>R- zq?RoVS}uZb;`E$l2DSIA_%zEapN8(dYrz+ps9ad&yW(&V(VkC3fqG6L-6w64ojN3n zyxUvx>YO!JvpmDB+gp_#b^q!ZfzLIhiFBIxLamw&IEA)w1oWHzlC8=*IPLa&E%!9k zd~+;~*6STJ(Hg3FnISmN)uH<$vT<$CsHJLn^eawz8`YIx~6e3wZjW} z-tA4yR%4Io8C@R*6_mK)lRnY*|UBn?_m2Fa#c%`%7=(xt$ zmJ0f?M~0EtCre-63#Sd4B(A7?_osid%8IUmkzvLzZoX z-}|}pgkfDu(4PTK6IS@fc(9T!2560SeXzznd;gs~*38(n{kKg$BWTb3BQGSW zS~@3b%ft&`pdjY(JJS|BZiTLW%rwlr51r5UQGwytvUddQfp8E*b(<0V>v7(^Y3Qz7 zLFIx_E^%}ZJtQ!fQ$qd!tHrwf_P8(BD&|7yd4niO@%W=1;=B^a55yX3VpED^*d~p-ozcyB9$C z6}KOith7z@xF)l=m>I6L|GPHj58OafIW6CZ1vWaZ5DLMyyU95louZI}tE{sG?rc=! z8m^iVi>fAmv$-zI1L{s>`*S5WdNR5rs^whv`+6bJ0#Hx$rki2bS{NhFJNrc$V^KFf|L{B8Wx zVK??Qp{ay}cs(`MPSJ&csGVTYV4UUXSq$FV)nIbp z=O=~6g)TZqqP4SU(0+^Gg&+SDD&+qHo`Yfvhkf62h_HRg*4$d-?9Z1knjQ^YgwU>$Q$(cnl5aS=zmL@{ zL7Yg%tck@|5VAV%6rE&vWswhka51PUBPZpixZ_^_PheO*^$3dW6H1l*+mnD@qB)tB zSMF!Ff7Qet&4@{j34qGbG(L=?ea5FY!d3L03A9n3!h%#jQH0cvxJ3-#2LC*j`+6bc zn84C0P~YXfkr!$|$8EOF2AL9_;{*Lm{+t8@SKwOR_A6n1tylJVrtUkpasQ$g8JOA3 za#SqZX&8QMI5owLnm@j;BCiWfSoy10+{j78T}p1sawtke0S;(%<`pC4qsiq~G$U^- zLWs$_g$p=?-U@{Be9FDEwF1mr?u;L$hom5J-MYqZ zM`n!tw0Z$!$4j?};{*$`_SS3(pK}c_O;$&HgH)0C&pV!`2|}6_Z`5U95?d&Prc)O> zr?1N){NVbI^g=7{ALQl@3QheP@9jtKM_BGm*Llq{QtdB>h`{gM@mvf^u-6Xsnq&s; z6YzO<84+;N2Y;bei>$skVPm|-#N+zsxtvIah|06eDx+w%6}8&Q zuvNXM^A_|fQk}AJXpgVCPRo0?oMooP7Gfb;y-JiN*ES^{Ct(#A{oAz%*si8KhSbZt z2YP;K06cWXtVdXgkmgisXu?^tf1){1SQZ?4MSz^?UtAvD4Bx}+Ph2QFja=4gyM1?? zbR%p>(R=wcKKtw>dN^;!yi(;Rfb+MN3^>&|co6Cvy?3LL0_ZUVc}Ddlb^_hHBptd; z+i8Ct5<_f@p7xgY|7oyaM!R;A+q8^Vw9TKUWGo-_q?=FIrG~AhBb9km&YXAF1joME z&#ikKS~j~L*0kF9yFc4vL8{BT&!AM4$DE)bzlsgUIRZ28gWr3AkQYLF?!hL2FB=T4WMk>jm9-Ljd|um*PBnqy6R=T(xet|Y+ctM3akfI>jkR~5VmIBb_9Y>9-yVnM?N;I?if*P zVz3MxJId4ujDLZdq8+jiP!8&!!m*ryeTT3L9@>#KRq*CTM49p!dwpYkfnwz1_%ZZL z?OYh6<&&ve zOj~%Ne@3P^;|3{=nMb z>#z)*^dWr*&uNiDlrW*K#{jQtI+?*<)2>>6kB})KmFT_!CWTe-MT(WupKJoBi5Gq2 z+h4AfBtr|_R44GuPF_;es8Zz}$;Ya9x#U_Vx4THA9`!~faSS%>#M}B6#`TbsWf57{ z*}B=wU95L)RQKlC?q2;|>wYseYhj&n0pHzen33IS#6_#dKcX?b-B5Xnfqh~StNk$z zpx}IYzj8nUYFhnNoF7&U0X(_=a(vww>0=dd$FbmuHuHx`%USM|zvYt)!W3`VoR>TR z6b#o&fQt^m!GblR`|;tfRaAYi<)~Y1$M{W3nJ2V!rdaY0i^;lPib3>=o_l-Dp3E*`ngjB4=aOsEJh+U0)YuTIQpL`JGsAebn z3AOmMqeh3k@z8_!RUFVZ`i=u7UH0v;Kj&E0t%B}@HS{ujjOYR% zdms?IAbhKDC4a6^np~5`*LAV`arFattbXN;niB~KIG4JL&#pT$g2v2w40iI#E-&2a zggHY9np2OLb1rNn7InsIr@Sw2>$9n(^WhSSfEi@@7p*QLHPz-ds%h8@Oz}ifeltKD zk1q&f+p@_EKbz zrZUEOjW6>K_4|#|0@kg6376+=IEXtdUjILi5*7dyX!bq>fDlTDO7T|^i|^>(%3$+U z{J6OX@)x+$P~!;#2YL~^3uUp=;PK$=)%GnJS1|=vv*1=Nn?BQC#ymGpUNfDss|N^# z!1(zM2)G#T0;9>!d#SqVRu!?zXjJUt-A=xdoxKwXtWp+9BA=AH$0F1@7&df#5oFcsMYO=%P?V8MwZ=jQluUdeXuLYhFtiwSGG5_*`QTV7na9jty5J zjCM6IsH#lcuayJR3#FK0O`t;W4dfaq0wwWZcd5KfU~E4~Y8k|W7X+|0SXSu8wX*20 zlx_QS@AbGZYZn&k5Y^Vw^(AHb@M;)+SLyA}lB#ST9qWzA@UzD*Y*AZJVhv zuyn{r6pR;VCGUR2mmqkLEH}<^pK+V0-Apw@zG;|%WhwgCHnv_1x$_o{Edem zO)i^CLlFu1R*+8FM8#nH7*B*BC;7H=ZJ(*Jsod>dyK+RY^!6TH?h4Z*5S(@Vr5)Sp zdUT=(SlWU=WgWyUcUTtuEw`40{r&}7<863;Hg!T;<~P0HG`zY>Sz6yO%xAnHz+W?& zLwwrpKM^B{eZdf2hSX$^6|6R@GCF?$2cG_NI^K}DP^bpyK2!X_V^S!pEX#fNH&O@q zZ20l%Mw6lgZX3+HEcl7E-`&=O+)^jDr&`w?Cn2PsW$mclPO5yR3=T}nDD6;rH&MI6 zWA$v}1q4TB@S8uU^r+|Tc68#F@w;1D9livr$vRsxJ8rWv&AZb}M7Aj%Z^J}ikctE; z5SW;@pUNDYnJ|rOFD?B7bob)HBM}=uT3hRh0R-QEd91CX z{(!UR-t6eeEKIT>T?}bjB#TI%R%`n99H`Gk;S`zx-PZZwQ*lAl0Je|6yd@s3@Is@^ zIH9iBgHJ?iuhGSy{qJ*1xx}8^ffD-0z_KpEwoXNCFG`N|Q??;{{Ih+B@W{fu79NJZ zO*JYOHEdO#U^kz6Q_z=|Kau!TOFM}x)ssH{V55sArW+5VMol*-*<)Vys%xSaZs+db zJy(dxpL~j!sG(3E4A~0xawretS65N1HOh0NhH}omiV^ri5MouWwYHiL&MnS(7cFUR z5im)}|LYe!z8b+8Tf5}yLdaHSKuv}vugrz4Aycq-+gIEB%FMt57Dj7guG2s1&?7&S zPxCE*oOTAKD_~cb$v(HzYr1XCqs{>M+|oEy)0t{V z^t4@5*mFnkrNY0b#?VR179wFKgxVd>H45 zitoQ@Da~Q3bv6g`H5*BIVsJ5cgeW1nGS5x-UG_*ilwNYxI1-O``qotk`m2<&y|z+A zC(qO`Wp|2kH1|O-O`^Dt9`+M&6K|vT>d?)rvKqN#J=GtJ%l zs@#BUD}Jfp@_FP&vgkBWBR~-Noc{fiAX>^idm$gsaa&4rC%)iK?Q(Sf72yZSB9+0z zj^`1X&W^`-0+W6Ank&)YotY1%p3+R)c~I@z-dgRWHJdv&m#tULjfNK25*nfQmz_JSiu{;cU+m8LQ{+Xj6GxR56&KSg~<&8=jm)N2EHC#^1ncEs0PwEVWFvmC)R z7{P-a0ibNwya!&^0fdNTCigoK#Me|-KxrykO8XqBuhpK~C0xf7KnOs+Mz5QWX1PMM zY;2ZQYFx|d$BqEm)@~Cj`w%c5nyg`rPd^*0T>DfJvgg|g6cQ`3u0ZXr_$GxoEBT!n z!+Kl2UazNi(?Hn=@u51K4fqLEJ`9On%(sE0;o#Nr)^%SvNz8zVi(=dYQ5-FAh@2(e zGu5}Le*``u-zhD8b9=Z#?pAmwumvR}sPog}9XEld zYz%QnH z`Du4Li#o6@IGRl2ZJ5?c=7Jr$-sv!DBAy)e4s)%P-KkAWma(im_~gnb2ha=pMwL-s z&8zC=Wo-@wcEqA_8bDhf1Rb{1z3&DaEYZ=NH%G;XueX}CATD=6=438qtiB>?O?Xc? zL{$0gb_md#CSaJSUiFE{!Ds9*jHuXnW6qIpH1ai*3*x2sq;WY+_Ud70qqXD=r=RQR z?L=cCTzP^<`jm0>O#G(E(EDhcZAwh7)Uj=ue)=ZReetRJe4G*lItSUg3H{*h3m(QC zWO6Hx2p@R)ZAncY8peAAJ)mzN&;j^`Cd5;gL6ldYYJwl|`5EbIj#lkd&)8#)@d-U@ z;o7T<3N!<(Pn=i6X(Uc-YDn%lVg@aH>w?H;a~HI3`7V7I8z+}mh&3zC@VCEc5-Qd= z30rLnwDekA?(y@g%U6%$s3`h6;FY0|5~|TW)MH&o~@dQTZK;^zptrFPws8hYcmlj_!1Xf>k1=vEdZuc}@|o6_B!FJV8>^(n9ls#JueZOK$?s0HEou z;UX`1sXt0IC+R4IJDHH3$KsX0e5f3!E9(MAWfJ{Nvvv-6qgB0(s&=IyffbxEbit&3 zX4jEuHO>g36)u18oT>8>Le*P*kGLZaN&HD^F?#ry;Lz>*PKhoFt&|C6kJmw7RZT9xp_UqzGwSiRQFa2`R-4Qvoz}ne!=_!caH&nC!T$7 z1|;kw}lYzRY~Ga9vH1g zlZ_RBfXefN9NcDM8S6#_?gc^T%U5O#c}KcTn$hQd;5KTqa2)EC-|p6&ibwMWWSYu= zq#y?qgz?aRPRfJ$DINC+Vn^-Dc{-6;^p@ghJ$`w4uuI0ylPpa1ZbRJD0|`iR++HmK zZ?or}5gvWd)xFn%l<^`4Y!M3ngv-u<`{bh-9zB6u8}H}9zK4}{{tM^x*CL3`^5SpC zN9d3ATcz9|X)vRvUs%=O-MFzzuSkTOu3QbcePabFG_HOeKRp;=Z z7W$g4!x3Qnd9>)o&~jG`1d*4^;Y*lhLet0=m?`<|M3MZAHLy>tRZmkb5^?Q}ly-$Zpi zu*0_>#ZdsGs%V&YLt#u}nj9K>$j?2`K+iwc=Zcm>IhHqE@FA&t*@Hqeb=34>9paU2 zH-2ljqrL(2ag;=lx93Uo!rX+g)=D%$(YWTM6j;h^>0urIp{rjnkeJhZII>c2C+(4) z_N&W{K?i@plb}k1d?(82QQJeX$=sHrWw%=4Lmqdy3tBlyw6i zbU_fk?(S|V)FEzI=6oVb_^1-urGI2>3&R;ertOxJ?F<5g8OIq}uLlE#2f-}-YAh+UY! zr<_Os;aBeVzGbaF*(r+NAXc+V=w|n;5tz6j)xz!AV`5(=__F6p^{=90my>sJ+U#)# z#%NI`j~}z)Rg(6#CVUMK-InQf&7xnvuS{yRlsli=2qG0wSpf7n&Ms#S?YNzd@k)O; z$pY``ACv(iz;4}MjS*x&FcuO=p_;G(V0T}E{Qjz`B37!h^WIS1gpH|h{sF5o2fges zHBp?kbrI3rhpLu+e#^|-Lh{7vo#t!y29vb(Y138_(DC@76zIZchTzZUR-O%n>>Kmk zu!^#)4M&xB{T-keSwTeZvnlIv6M~s&TyqT)cc(YYsVBkgeH~n*XHqo|B&6b*1}cbJ z1T!bLKmuCs@8?eIzH<{&LK?hKdki^=-HVgEXfa*wORFv-jPm7TIgnq0Vcu{2{o+8q za&BR>c&u*?OZ_ES1X|UJ)Krx1b+YQq7dD}%|5BP=#0fLIi&v=fu5yV!+YrV}xWZ*f zFf*AWJe<~6J^i%*r^N5}GRSfh47vj}bj*_IE!`tKpA!v9{__*C0!Lb)Mf>+|CCSeB z+|?9n{`yvBHP&uqyIsF922U?-*y&LL`Z%BEN;K=Zcc&SDrwJukoWgQ?rAgGf&A9>f zxkL=O2I+9YB=aGcTpOPSPsb07uZ|))pe=fuqu)X+)Cs&BH;Tu}0&|w>ZJX0+4KYh1 z_8m=O!w!|aJ>jJ>bz1J^v&~|%YE`-l^U9>>4zH#(6jl75q1ES0Fbm*&{>^qnTG-yUza6X zsmJX*neLNs;++%+!O5QGUusjZV}BT+RKMu(70}2R$Jp)7>7Q+J6Pb4jY` zK-rS&nnW{Vbl#rS!0a_&K#zyvl2ZUgI{hA{y+12px5g_syWGz*MEO8yeo3~Iyj7%% zxPd&((D>DU)feFH4);9r0szGB+0&d+cel8g&3Yeryo#WKJu~$!m(o{AFGAO@Lni-> zmyB6{ou?_w(%7<2srOH=)Z~hdzv-cU<12K^|DZ^J+FRAEG95|naFG$SYt_ocg`Z7e zyE&&RwW>2^TB%y>Vp4(9kjhHV8uXqi6fye1>Sjh-F4TRu7yIpA&?B7Isdouee1(7d zp&F#c=!lW1SO>Q@+`hMfou@=hw4-6JV}r(h{4kC4WsGyKb&8?;kX&)5>$A%X^2z|a zc+vUtVyk;~r2dX(bGvKsG0QpR%rM+ruh$@Xub$=m?+k^3g)PZ77p6j^gS18MDVpyo z<|t?8!K2xj2n43=JOo?BKIP z(Ezb!4)13B)eto=qAezojUguV%9@L$^8LVTw4dvf*j&^rL^L*TS4L#BxN2~yMVrDF4 zgapVS;v%?U8@bHl)=dzF>UjgiesytNP8JJ4q-!&2pWTT~*8ltN%G2*GBj$3Lzh{hO z0XN@X;Bs~NgMY1;tL%oZADMtErq??h*oBbmn?!vDG(umOQKAtn;BH%>A(hJcUVA{V z7hJ|IN%V3zVDal8!62OzxqTA~$JVt5ofX!5rqcWn{ag6&savMaQw$F-96aZ2FpUN~ z$#J4`<;5yMlM-On4uQ)|F&!@!%<*KoK3G3od57@K*9J-h(%2C7dGSd~LO;3HmAi=B zy=~ku+#X#KsBQQ6ZYV(ky$&%_~>Bq%Ii)V(KGa-h=G%!o#>i_ z=GhOpx(NzjmJ9nRTA)kDw;4wFYX+Ird8m%=qe5{8a>zAItey9w$bPBGC2@xyif;Sy^9xZ2S(8(I4i(k(#qSr zjK%)hH6r;UcmqJ$Jn%T%z@F&ax63SC2p~Vwc1BP3s>;Wj3iOsqALH06^`7FtDqs)F zTNKaq8&!k~fGZ6%8iat*^)O*=c}Sf7pM3P29uJ?@-&`R79uLoPpZNr&p^G|0!Kr;x zuP$k?P*u9vR+oW`$hXvc;-)c_{& z*zM+pbF7$SisAnv>8it;df&ISw1`N9g3{e3h@u#jpycT8j?vvAB3&X93Zr4L!Dt2y z7$FQ8T?0lVUB7)lzyHpzYv)|&Jm)>{b3b+8zVhHBmDNkxF{fSItXPvk+W^n z)NnRY2%BuES-!+h{4d~i0f2>sE5>|O>81xd?T$zy+|+2B+D;x*Jszep4_ zwG7#7*@tE8di}yoXLL+&>mn+@fXr;Khnr-4`{g&R**RF9 zaus+8ex)v|V_s+5j^$M536H$T7juW@f7zwBH=8*+Pse<@$w~)mBhSS4mHqpf6)^$h zUIpK{KfYVkgNdar__Mu5G#p)XCb?4_`PR!RY5YzWZ?D(P0|%O}UhB_0EbHDHZ?I#R z>tzbu+4_8=fU;6+v{>^yS#ESm%=&68PdkCY#GdmvcN2Kyvm=ia`!d`-AR4rLn}KzO zQ1yt~WZ(R;q`@fX!PQZ}-s89kNY`}Uygv@Zn_zV13l=iDDQGxtzcaXPTU?q1jCU;U z<_AAZdW_{|`tEUEvFXr9$swIwa=$7x_m#{27W*}>wDy!r7a`X_F4M(`84#}JRxNb- zf0=4Z-a=7fyl@sXyZcn2@=_3V57MdzyQp*t<>c`W^qxTY-~aHHO))_t@eyjWR!WYuKW&6TfvAVK}w(w0s8e6i_t&;;EvM@!@J3>>=^U|=B zbR53OZ?u)vU_QfdDob1b2RDm$to9_SP^>0e5@hy)g|@W5db*fQzi0nRe}`07Mv5tD z-Sk(&YIk6&aO#G4-`9WF4=dNBlizi{9?v-|C0buPu* zGtV+;9moX#7HDSdPJxeM*owDgG!|68J!ZTb@X=SD3Cg^&ypvVD9y#N_-B=X6<2diJ zy*5#zCUxuEcZ%76otJjlCJUuwuwu{rqAYZq9Ap*r3m26KyTK%$3J*HI2M*~=Um-3@ zwLtt$tc#)}6a9<#>)ctF#Buxl=c|_xi+tQ0{^D%-Jy{&*AD-s)`8pIR?S#Dt`F+3_ zEGA|wzr2)&%{0UIgYPgXXz5{<$w_WE>jOjW5ZoqM^|SZgkMuWfk`JXL5{+o>=4-2l zn^hWQ;$C*UgItUV!}Ylg^&Vs`M?+aSW2dC%4i?8Darq7{ zw00*0tXk3guHiB^$VQ3Up^+>>!D7|`wCL(g^nCC-WlD}F^a&;Kx`d;URO*3M+`Xr8 zj$~%(6WM+(qMT`Z#gwXmF(xjQ1s`QF|-&+AVIJ zREc(NnIRphd0R`N9JT6zF`AM49Xvve?af z9?R>a?~50|Q`~zcrcxtRCShyGRX7o2%b!sX?d`VX5wbYJZXEE7^OQ{flIObP=)$NQ zs5y8Xu|C+2xaLQ8%VfwtFw8?N<7y*$vOO_$lz z_c+9yyg~3EC9DnsKh7fd<Ulk7WQ=qBWKXlQPLp>d`Cyj35i zUF;9lnfbET<&D0aA2Y1Z#JfV7#f_Ts))iUz2jAo%_AJD={xs3A3iLK*K0?Rmo;SxO zd*McJBr-2PHN9Lp4lwL&fgI_76?Os_Ak|ju#RNYlm}$P~?`~zOzw|$1UNfA>3^quw zKZPeDr^{IGIqy>AiA>RU_WE>|`&>=`PS69ka@-#iT3k8qzv|~a$meONuWerk4v1aP z+gDb}kl;e|0GBA#Wu<)x2kW{Ap)JRg$fN7n&?Q#TdJe38SArGcgE!yj(geEUG2PU< ztHgnqCFroP|Cds-H|FBi@jGT z5&bXo3tU=g?$DF3N|)mK2{uj7wLk3l;>9i<5XXFNW$brIVyjpNHS-&G6-E;Di&MKQ z3PgM+tQIo9O1+jR82FfDii1FUzPLO!3OJh-Hc9OQ%Qeh#vowSc^GbupJa{0z>B=#A zr`x5H<8S!W7-(ar*capj){B3jIJm-m)&$d7xy%FIB-nkSv*&*I_fhOq!Lbi9 zQ%(ZiwNEu;MD+t1{o-D#Ktt8CgM)2h=efrXyrwIyR(GBWP+dfc&(*`Qwj+fcQkaj- zlr`klUX_uvZ~H8+FZ>-UVRL0>SHDT!(UE;IWY*pur)LE|7Z{g8HGE_1>*HL!U>iT# zZ*~YEjJ(MAbcB$3q?OTp<%{r7MOn+8=XK0(8^B(80%VA%803~h3FLxDbddj3wxj$A z_SSJv{}oKre3{#R<(CsYLAnJFK2zUPx;q433`bnlbLu*cjmLLnPkB1z!)Zpds#U8**iZF`s9u-l$Vzt5 z&uEC=QmYn8!(u+frO2t&JXQskz|vo+`4@jQ&qm@hdF^A9PTnWNK-~>&pDwCz9b|1` za%()!)R7yM5;k(IuRSR>+i1vZ1gZs8X1-L@3FQ$%<~tiAy&r;B-eF`xPfVbDW};KH zTiiicjS%7~WQATOKvrBB*b%Hs|H{jumUiA9agy5P0>_C__gxFX{WMtWH@o_6)iAb# zt8riOvS$d1_67T*<1J^9z<2)liD{l#vtK+#>NXIoq-0X|z^a{(gNEi~j23A(pmlcH zswgE$%~3v==2m>J;e438DVE{8UU2863TM^(79*Qo=)=x||BWMwdiBt#{`#Nv z4=YCYY|e9{FNt~L$KmICG`9AA;j?JLb2^^;3pM}k6jUV8UD-VZXH>DsRv!TtXb{uneGw-iSVpn=chpo;eZsYpq-y1PN z``ynAd7hRZ_>2EH;J?VjnKJ)(+oGrGSVm*AE)gwm#80ghSQ_vUFWe5)*}h}s1hr== zzK-s7V3O_=HNA+IFiZmImWWfYsLL4@Hez9uAU+BZ%p32ta#u0)Z_Uo-3ephGeObvz?Km8 zQ^zFg=(>-F(Bt?Mw;}zzoRy@Ye0=xH6weX`5{sbu8WxNj#fS#1tzG8sT|oBjOw+UY zRd#GVSm@M~?VGKD>7L~;%=qyL;v(CWOp7%w{mW80Z~0KI1#IpQh%OOYOxF{ClTlK9^IiWH zBPV3!aC2|aqZ}Puwy>>ZDGP*8nM20trVZjIO?3}xC{Q0}LSDpPtsRZwTbxW?9 zDEgZ-me8)nJ0!P}25xRHVha~`bl#yBf;v<`2?FjZ<&u#;_{!B5$H5a%sij=`gG2wx z=^5|+%ST!@FL>iQ%m@gf5_Glqq(0{;emWqX-i#ej_{Vgg@ZH5;%M3F&f@|hwXjEQ% zyLUZQ+;?wC2r6#7*Jc?}K6F@hL{*egt?vx)Sy7O5{2bMP$TLEpLK}6eqajdRTPyRa z@`QwRLFf~UU7N>!@NC)4HA8S*Xjk&+t|g`Ohtr{9si0w_5tc#cTu6q$<27jbs9x|DP!KN1wsI9%zHc2&*K?JPI;AKnXi?A9 zx#-;9gFJ-RcE{o|SHG3%_M9A>8&nwU=GP{#qRLnbiJzsh;|G21C_v`U!5iB#NE-##CRjbLid}94`a19245NoQj zAXi2=PGjmpl~j#Gw5j-&w4X5}`G>d+%7#C$1FWw;*6C+Rv%KRRFNJk$JSXtWQyLl? zSw-BPsE~=HPK3WJAE3Fp*iQ8=Jp#?ZTCTqDT=jGZekspsYLSwXZvOqPQ!vr8@vjJ= zEeF0hhWw;<$LPpMb8v9{ym>^!J2dwLQv`T{qzo5|B7jqQ&fljqVO7JMB2BcFel2S` zkT0M7hUUYBe~j%L*$zGo`N;=mONAicH!82jYH#ux4yO>OTpZkwcgtd9W8VeaJvgKF zdf?0bz4I->;SMJ}bjkWxTP9>fgA*BAH*~fbqJ*-!3-il3!HJL!ddg8I=j0Q+)6>vk zRG50^$ASmrmT?EhDk@!>I5Q}8a1?~C?dmVjd@&j0el#mOq=A>9BWY^gP_teS@*gvIVGMIaRHk$*fNtk zpNh4#^tpWR1BE$=T(9-goL;W42x<#`24f_&>OO z7P$RS8p9r#F6Ot=n`VWUzJ|(c4GryYP<~#F)C$9!LUXWS(T-_G3xSB9I6Yb!Zue|6xhkUo2RMVLZ-JSDr_ni zY%&57y#@yK@St;I1uJEORB81O8&h2=>n5OI`_eyB6wIx5Jcxfv`!&q)c1lk`zP{vd zM#Of`gd;+)a`054%n0Toxgk8HC}N68gaMK2x#F3SRs(mUy}U*M38| zFleq_liH{ZJp6AlSbBa)XOdXb3D2jFbo`u!(tll0!M1eWUr_u+z}CbQ1fS;nlhrQy zoqu0x0~qZf;F^|QWeG()qXxPCUpQ_E+77;N!lExB8#cg%UCRvkdR>4SG=RMh?a>?m zWoCPOJp;KkqTZe1${zSHF39Bu@-e;!+7oyjxce8dyMn-Z% zIQ^y!Y-d=%&|$Ks!N&cZ6tx}nWPoqktWit zf8U5m^Xu9~n^qEZ^EYie1R0Yr_j#QC_yF1Bbqc`HQ0rndA4NYg58AIoCC5P>NkP;~d7_?e{kFL`W z@m9@9lCTWs2T*2@!PwoNRz9VIIiejo++>#3yW68RLX^f4mH*$Sqk@+~*qh&BIW|-v zU!Zy=Hp>Ceb#y(qP2`Cd6(S_H@%5-{vprwMf1&{$@-a4YGJ0{Ss3cfB4cec|b*;gp zZNZyje9|Hm`?sI{?=)&Kqqg(Q#y@qZmpyP?L;Q%lY$YdrjAN=Lom{g8L4(`?wMIko4_;Uv*Rpz5q??sv2s9bvJ03Y0_Cw zyiTxMZL>=#pwZ#l)2(+(xkp!Rf&Wr;+8kS~ctbLy6&0q*1F8Xl6@}393*Xc9WCQa- zNz;PKc*EC32}7Pr#Q7QC$SuAYml-G7b;v(pA+oyTVvxvyr0LY+JmdHKf(XR1H~o{_ zW5AxCxYb7~pAf4|&1gIJz-xo4CpWmAhJzz2`+p8ac0?p}WjIRDs{}ZNH!~yo)fhFM zE_fUap{(%BwZ#`mM)qK2ryjzq4Qo9b^M!HEM^4^rrI@6r!<#8ewbi0eEa|)Eh}mt; zdPO!ojP4xtn>@UmP;JT&jb2Qifqz(lRfy$W%zTmsd-wbR)r?AaIgLZYu$-*w6L;9T zL!DrRv`~y-Un^5iO;W^AedFg1v5p>1G@zEF!t8)!V-ycvEFzibiX<2&2& z3AdxGsQktMsInH>p%qJz0v#Qsj2-Ftl^a4)igsvNhxbKwCbrttfBI`l%3@_;tgyZ|@^K2vdEcAQvFpMiT8WgSoasgsF3M9i< zn%B>oRxDU$+5-xfzbxI`DO#$NeJ7teQ0EF=FLkO8$V8r4$*e;djtb^){hoK_Ox}^w zwNMF6cwbnxP%wF7{r%e7D-UlEhtCUspQPh_Z0!9dgosw`E*ebDvFxDvR!rq6LGb(B z#W$>P$FnFEFD7eNN3OSAsbu#bW^9-xjS@M_*yf9|B=vX86 zMOKhoX&?rEn*iG~(ym=Jd0AEh4=D;kBR_;0r1}6Kv{)hlkN=(O<0AB0J9I)1=a-8=Czl%7_$g#DNAHRa~^yn(ZQ@<&HhH5hF6&1b)<`ta6Gtw530QQE1$X^64j);2(>46@$fqzBF` z@CA2V5MVhtw6l2rcP-q-SG89nsDHX!{G|Mjf5Il#1Cw$ZVqi%QfcS!~$EM?GowLvW zZH7gC%y?1_X}Q1`jV04{)wA`j@I9G)w=@2nzu{==jmoc@F$VqrmaqqaSdQ_>xaI(!YKU9m$tpeU~lm%GQ^{UUe=GzqdoQp#0ZKWu^Eqegw2e%rtEW+_evdK z?v;O|qh%iH3cyiVFSBZQc1F(4K&Wz6j&f&VegWz%(obmO5(LQxo-=_-O&wisDdS^V zH2+4`d|gUr^?j@dGEDczjYmr+KijK;SbJa&$5h#SaYVz>pNikE%kFcgC_EhZ!WsTy zaP#7)GL8I7T7y>C>6)T>=^uJ(`W-J8lr!|snSxPvYzdTcYK`S7-WBz1qaMjUqD3BuLPnKDn3}ttP z3av$L`e%7sfarINY>H0&S}c2a^#)F--av^~AZjqmV*%3K-csN=Rjr!)Sa1g-DUIzr zF< z%zN`BOZy?Xl_BalRauFda}8c{g7=|I4@uNL5UmddF&)2&`{P+axdfDP07+P|SpztkcQbhGqT? zM}lB2OVjm=Lf!^1E#HhaCF4VhO%Fhufzm?jIWaioo2ab${k-?l4JUVOy{m9^i<2qp$@g8bFT`lj`~GPr7|j1ftUU)@ z8)1a9A61s24_5+#-tVnwgA0_`HJ+E9;OvzsY8yE{AG!`bOkxP= z^j&(+gxp?~<#SbA)0%psmCcME*S<;Xo>GzZ4p=>=h-5gO4x3V+{yZbD`{*@)(<}ex zkPssmk%g@p2w!ytJ>;?4&NxPp)6MMkmoaoFh!g1rj8b)JW0a^Rq9dp`Yc-}Z&ZZcf zo1~~9bgFK=&9FLKdk`BJ8!1ktA9dgN8^bGEg^vT6xnw5aCO@C=(t8J~Xai57mbHh> zpWVyS^cl{qfYF|1N@C3W2b|iz6TQ{v=6= z-=PK6r4pJpQ8zE;0UB?n5Hb?16%&m#r2XY+r!jHVR|TZ_3P5nifJ&yt;#YMWMT$m7 z)~wJRTW-yJkxcLGzIvtUP;bEx>`s@RjSZh#pMzalOEvlSkPIk$orRDP*fz zClmdjEfhVrw=vB@6w#VODl7Z)A;&GkU2qD}E9ayhT2FEAT<%{!H`QLp<%`DLl3u)m3i2Oj$leX1l`JL;YbvF~~6!}!R zb*fRc@65N59UHii7eyLAiMH=DRlvj1X8h(?PmO|Rloy7t6qe$+o&0)#2)NC#=p~qE zLbW-{m{UKVbzUzW=?fWsaS!!%Fjsu-q`c9$y3%Z9$*o?8x4>{G5m@ll)NGoIeotZt63{Zfm+2Oa>f%3GVYs-j}S>l=m4VT>94Jg7G%o-1T4=q{!ui!05OtWBNxqqAHB)YZWts@dk^QQv zoxWSEtiGGa8mop;wF1(68LOwi&*cy&ChFM}N5G8U6A#2j$G@H6&5U7dH#s8xqN@Sg zSTQX_hS9n~G+oO4)?Bl0-W0P0k6JDp zletW;LXg~)f1j3Ro9tftS&Ny6krFtl%XkUG?wV|U^ZF)_MdLy?2-Z9_fTuU8@ZE!| zHx*z@%1&0JvetYEKk`2J>k??a-m_}GSpKY*Cq*5geOmk^=x~ePZlRJ$gU@hw0f%uD zsa1nwyBqx{9U7o*tctA1ye7;?eah8mK@fJ>?FzqFa|MCV&UD=MQAr7>&8qJC>7OsZ zSr*@dQaWO=jR71SF*eKJp4;VRG5niq`bZxTifE5XVvKO})jZqVd|?%P6R^`0*R+5G z;N~b%={TIElcedBXmWuVk}H!rA;4Q1!hlM0)Qz8CN;qlbtxGPw)#kTCVNjVu-J0l``A9wZj=g^?7d0)dQM z=ia>ziP;JyFZLb748II6yS0s;5w%_(RroD+YA{^xX`y@lw!u1w`Y5>QGJ-TVKKWJ2 zVEWkMEmgT8$N@H3a4{hO2wt~tZb#f~dbx6Y1+6Jp>bH;$K-1j!fS9G+fyJMT2{#9W z9Z$~(2@QO&7k7*33v%Mi=K~(3r$*c9n0PoP%Dz~R=s(IhM^g?~=Q{g1{rShjS%l zx%TI+bW&D#7n|1#af_KWpk3H7s-?om+zn-Pa53~eOV0Os$MO|N!Vjy9Kj&uGYXAIy z&{2^Xy?b%Cb9{b~Mm>6+vvJxo(>^R^KAtJn%*R9@vn^S%nwj0cv?E7(;D>}C_@%&K z3xy1re$+btZde{<=D$z;R zefSvRdKYZ7vGM;1F$44v%pW{@~|7v4*z!as8X`ewCCxC6H~; z`~>3}x(Jq&s=i^q($Ur$iENP^sZ7vLA1rQp&+wI5#sZY;F zAeP0>vq*NGt6{Um4+Dp+6|FZoo3t4vEC?TG2M9$>9QM^LZ|RvRUo|=JiWx{x^$Mn( zczXNJ*F79|)b!p|@h%n0z)8{g3ahnmy#nQ%NJL+N)POh)+3#F`6 z{R20c%L1f@Y)i9)lYm3sHaIH;?u)fyv64eW(!Bx^=vagox(17DSVA$zyRj+4oreQ7 zy3eAlAzI%6S_7z-;DxjCtAduZbnLqEW7E~EZX~|xsz-}rkN1Y>m!W`;3j-mr5#r@A zy5#Tx@HpUSOLLF(4kpRFd;IqM&Px;1tWPUI7uN~<`&y}EKD~h)c^1T$Y2wE?B74Td zY_dC7OB+FK0phs5WjTNij-DF40gs(snf38TWD6wow%1) zgQ+c2@Z@~x!4RT)HLL(>Hh!jLaXbZzVFCOqDY4wWq|7>)mzcZk==KZv$MCC!ubk&y z0Qh+klGV3Zd(y$rZxrDdJS6ikFo2=T)t6f1w;nsmH=Tot^XjoP)7`_hB(TN7jq%|+ z|LfWv6mYuHp8qGuPbZn}z~K??lvllS+A1x_OjQ`^(BS`I>6Sy%vRqR^x6i^;{cl_ZCC{ zp{q~|I&Pxbn`0_z!Y#-k`ApI_mGsj&O9W+A8ampAbq8_isn7%<~}w zWcq9fZ0}RLecaMMAHdQ)-=?4Po~w05 zZsw(IKPDSHa>RCe7hn!t2(g_rp*Wd?97($9z+8Qx7u|2pq_N1|A$Xu>U6-_u(p1YJ|HAT z+6p2sDRy(T`tRhQH;w@V#}nIm(GHUpGnm;XjRwKiK25figj^FVBis29*vxj9!TSmH$3VY>QdI z@PK469TDQ&UoL2W;2akw93faY9)=I|2=Mop^KX-2P#U^^j&1oU+U+?lldt&@x7Rqk6czd%Mc*d%XF&}G3nuczF;Vbs~UF>Oa znPk*tww~k7W7u$CL#c%8XR%m_nC);u!XG;RAFnP(7t0#4Qct9$h^;>sMi#H|F)Dh1zc$X6P+G6W1I0~qTq4Pl0KLwEMX1Eie$Cy=3y@ub|%R!O<2OUtr63@#u$nAzqe?Lbm>;Uhs&?qb3RSG zE*u0`jDP)}QS!XnMO#Y*E#Tw;Qj_ zm|v(YC|Alyj_rI(d@)Wz>r`>$g)0HW!>rJ*Myge1QR*7wkF^V2U)@0J=Q;BCePvjw zrwmjxkoF8tl>flr+_4=5t|*UKz-EVi!BM18Sm!-#pSMj*-qd8mglMzHSrac8VCh)s z(XxN7Bo%u-THmrBK<`)UlBQ;??_v~E-cuLut^Thm{x`-Iv}Q~abK(i=Y%XX*=d=?& z!1xNrFgS_i9k`fEPkncOiU~2j$RQuY2bVY+te09}xXkQHO5c-R)PYH#Tz8tLha&qm zLhfxIR}++3)^z(rbag23a3@&IT`iZMc$}R0oUCD@jz>gJj)FD94}DQ z80W?8=Q^s}fE#&Js#`iTkTW5^*G@Jp*|gmrc8|>B-s$HYbSqrfNsyNPl_yaH4erdu zwDDdIyS{@^D(Jet?}LEkQ?(?Brw_$XLFZJ5&-qvPdn4REx}U}-BcQ+cYrxUwS53R; zK9|ydhb@^k0eTxLx*tA}l@4EaHuY>TmML)sOOi@22G`%@T3RKsym*MoL*H|SU2?tZ zC4!a7jfTiP(a8w+E7x2{7#{)9?axTN@Wj;H)l_( zUq+2m_pP*wBVVo_k0{!~Sl_?*TbrRDJMIiwJxX9&erEyx*4eFUo@apJ;PRSnzq>b> zwPe$Q!+wc!Wh=LAK?gg<#;>Sq7={`h?m~b2zHXg97G)7+)~oJ&j%uU=UK-lw*}EoH zLHMvOlOZ=z>j{sSGe)R?stjl|%wiZ518yX*3Ihmla#Wb*G z#f)g{wW)MW!%#s*puC&@aln2I|fwrg>+l~~)?oEiF#$cTj79wij8*e`Be zZb^H%G(Xw)W}?&pFTxjCwUC#5a!MQ~h>stY2QdR;X$5LXrIfIJ_YjS4D>usfnZS`K zt}3;CpBlVarI%y^g-^e>#s3I_b(~F@f9?8F64g#_)U-4KO5;sg%BcSs2MGm-?VXQ7>{$N7?Y-Ky5KD9PAoTsn0C(*{!b)?AEB9ZKj2umS#QS zpC{IDRrTKp`uvM}%bvtdJL;0hn2bknOF+Oxf+c+fn~D*83%j9W!yYmWei!r{b?jbb zN!BB2k8W<3xrQFMyTgsa#V3@YgUhd|;hBoQChNyg-6)HmQ5rwrfAX@?dM4?AK6jBv z6sTeXplkdcM|4Ty!cn&8VhuOuV#Gn4R~aU5S6x8RR>cScnPxlz!#hgy_fnaRR+4S)ztVmn3iRC9$q1K zDdIY2;ko24=I|MGjZ`3^TFPu8|I}btIfM7dx>UV(c-u5&pHuDnR`p-d&Ql4wGl6IO zGYKZbR84}TiovVa^1_zsGvv25%eoQb8mek)Ub-zGbt}6rlX7>@O`g+!cp}9`9K^Pl z_`edm2sV`Ro$TJsTvUQ{LZw{U-=0*hQj0g0QX~&2v41=JBpnaNUmMq$Px2kZRHt=1 zxzmMHBF9TU?f%vIBmrY^$TOiY8J7C<1X8R6PWY9#IQGBA$t zk#yl!BO+s^^&6d=Zkz`Xtm^f2p5fcZljr8eYf0 z@A_cbFtf$F4zU07f`ZPI49AdSeXvg$k{_XtX+BwQsQQA=0#w;xnj*jyP5;x!+pi-) z2&cX_W{GB2%098OgKI^C&{LO2p1{NU9y3HEgQqbY^`El#FJH;=XO56Q?GpQ86}B~6 zF%vPzp=SVkz4k`sa5Jz5;A{i0NLyq98RR0^R~NMRV!xW))Ru6|qcaM&kUPth1wKv*Xi_Wnt>wJ^K>Q_k@)uL@df{19>0;poNbVTmEz{I!$ zc!toh_T9Xw(l6`l+RYS}YOdza>&+k~OTKSvHAmO=!e$+w1P8a%pH4r7oq0p3r5D}E z<9i<0f*KyGWu@g#^6M4f&YU>w^>h+MFAdoGFHh7KE{9A6_qZHK%wG0Q*#U37H%|%+ zHcy7q$^+iMK(C7^jay{DEtxTUE+ zEB#fJKBIcyyBXA;Q9X3(S4A4>$M51JcU!IQ$V@@Gc_PEMcN{qS0NZ+LyA-FvT5EA) zSc$F~Qv|yoKYk>+tXF&9TzL;0(te$+j&3Rn%oe*p`KJ-YRed3Bo1xYlZRfscWwqk=(j02v^jiHyX+Q6FqV{1?!tEC$ zK1r>oM;~`uOVr;@fVz#dBm>xf-gJ(9bWIZ3pJzk2HL4u?wTjoUYX})VzS~mjNT${u zA}qA$vf7nM@qmnDxumjPCDRU}|;l+WuKA0n* zrbjEmMx!8b)Wc%C1a{FBt+xD` z3tTEo`Z_V=g9(5HM&EHq03OSIs8AY(j31Hw?QP3mh>6op_ZHxO@7CKjx6wUNHFmFm zibo;T_c*|4zea#q3cv-WKdNQ+%0H%%kd`h#>*mTbLp64o#msda2y}bOtL`gDUy#__GUhEfBeWnTV$(~fOpXgA z&;{i_9}5GP#oyo%E$?W)z!B!Gn`Kv;9{+l_ssOW^sSi$+wh^G)8)u%u$3_%=;ogn% zp8>850PrVpq8VrtkTv*Hq^x7#JjB?54I#Vf>99bXu^L1@cSS3ZwPqV#_Eml@x z196|#FAZ=#&u-e!o~iQrar0PE#Ao~&vl#Vp%48p^8E|I2bo!R~qS{0ME86f#6PL7% zESbMjzgI6gbIZz17ngN|POWi%$kzix*iRR^P=EI)i`A>}wbZ7_1LFzPj>xqhk4M*4I zL$(%Qa(?5>XG7U9NNP8hLgp8-qnO`{<=Q1=(XA(B-mtF3awEvwc!sJ!;+{_6_pKKw7E5*np_y4%fp*3``^Z4$#f zaT|kkK*|<~$9{*ysXmOW3rtSAI^IDWKmn=3D0QQz8M&G-=_mtl>~tk{Rf)Uf(#{#Hx+QCtd~{A?iwf-w!np|m)0X34&O#US!$gN#FvT6$|@R{?N(WP=Pp=1_i|o=2SYbW)mTACou!(k)k}50t7N7K*xL@J z5Hs}a^CFvaNF-Td_5atu^U!)KYSo8`xqqJ6hfBn4+N@e;v;8OsI&NWhzGi6Dvb)x1 zkzbKZZtC%edGaL1ARXl;#9(>eak4r8TW90m#;qv4IS9DwNbwQB`>{)m%H8DFT%EpV zRRsDi&>8VEZ_#9Tgxzj>&j#9NXY1T+iATLn(20=Sds-MNJi8VH6fb?N1#}RPcA| zI@e=ebZyWcQgN*};8n=z`w4I19L%h>;A|_sm$fT}t&X{>q)|e8-x$AIpFXsqjgFq*v^Cbpj};crhyqGFCM!_W{DgAoCg*Nr z@IumITKmh24Ul^(X)A-FXsbVVQd;yXWu`@_Y=7dW#_sl2BAfKAB5Xa{=wQs2eP$ir zOaBH_mG9myDLu>2qK)9Metko1`*V@}0=oTv?EMuDl>4DQ4Wqu!Gcu+8M^TI=#Wi*B z2&uC2_{Ocs+^)7EcllTwoh}Er3)9#pR zN)h=$ZuAk8L_3}*S4ed0wc?@tP@V+iiv^~5Yx2itXUVKQ)`M8!6^P+*RWnx1d@{6( zdKp$$SZn^M`;SyvVYE1tMgdm)EM!a;b_xIuc{<)1WW@7-e6!RzAy&3~jlxq~$Y4wq zgp~hK1||&-hW+QO$zKIyOrZ zorBZ>mvXE}`3dEh;9E+claTQp=i+X5~-6P>9oS% zXDq8>Yl~`6v{)Ex(_b0(1?BJe=14la_4ew`!mHbE_=sD@?tS|7Y4dL_?Np^&efxx|1I{hUCBdmFs%qJeH~t5Wpw@sABHyWaHQS*oNz)vf(f4-y81vABtl19;J3? zeKu-xh8yjNBsMgJ81^*SO+A{du!50brmAfTDoi(oI#72vY&WI>%#DBadPxeCRgv^0 z?2k;+aj{jDhjl9Zs+!C{l{;mH=Mh*VK$u4X|IM4XDr^$E1}c%rg;tE~oJ5HxU);*! zVHR`~>2xe`>6i@?j%XP2xSL_y3Gc6ya-NS@y;Mk{+qiKF9_T%9&#h9e@58L_j#<`t ztvew9(eQjjMzr;Nv2(R4F`X&SlQ5&IIzwY1KYVU3rwhM@%R^pRbn`<_N8}H6_qlf$ z-=o3Bey`qpZKD^X^$6JfA564gKAhbC+X);NYFwtfk-hT$D}NWrY%;dt9a8Mn8-L0d z@$W6Npt(`o_n{)XKL#V@atgEOOvrU%T@?pE{tMkt z^dw$xXIRHhH{!`lRjXPxT#UVwEE>7Q>sU}4q6xBp(if>%%f%26T5r#{!2eChV{#My zH6s<*_s5F%omQShR!!W~R5&7xf1K3qN*>(Hh^tRwt6T7#4IAG~e9R@zFSN9qR21A- z7B9OIVT}Vnw|TKQ11+N*@b?X2dclUZhWGggcX%39FJ}31+T2ReUZm^3cxWX24*qi` zHA_#746(wI`f&91S3Tau53A{iNA$!J<-Y#O)otRZ;>=pRGf)(vq5RH|b(Cp3V{|3D znb9`ShmS8q*byEJIquUvKg2AG>=bM6JlPIo%17VpO<12Bv??Kukzvb(f=s|{i)w{o zT$sWH6Y7N&@{vC&R#fMO37eaC0Jl>EzpIGa=^A@--J3S5)v`caQ%jM_qj;P1gX3!F z%x+v;)3Ct?C4?7i2_^UU^us>n5$CWf^<-QdriDChb52<2TN&dtXa7bx!$ zD|WK-e&mz} z&gOmH!c;qFQfU5^1xRX_9CrO`q4)4$hx1ZL^j|N__z<(t3VmKJ3|CmN55y<8a|G@c zEt5b|NiW1iL=0p3xbqNTP|%^1N~7cP_Q)MgGcpcm3$laSa%l&3rp;d7+# zBrzNxrF(qSS+$hu&O&dc56JnV%ni(uEO2D;X94snjrV2xHDYvH$rB*NcKlgWS}J9f zcr~F!#FZuR`v*z;72cR5z*}i&$Y525 z;5xRpEPV&hE{1AP_9k7q?zLz8k{qg}#kW^B^|MHu%vzDepXc*ZChl{~IPtkN>JPUj z)ignLe@t*_lB=Qq%<2)nj zm2DU@>lb(hg_2o94t!j@twVSsXvET3;KhHnf zf8uxRh>E?}64CTAOR)0y_gmiA%WX6S!wqW;#;n1@T3WmC07i2;&nLq0EcN~*k3VR8 zI1W^#g`+g?Y@K9%EBU)tsW?oVi5Uu~;gELCUmwf-La@8JiP+a_Z(Mro+h*{p#s9qjO*j+w zVa#;>Zo}@tD4{y${`2I&nfWUb=_A)y8=tHb4`A~KlRuYJ%028{Do{j!3pYV%+D=1NpWaV_!p#2=SKin0Zguc}`iH-(Mq zGgTMMS{15_C-EX#I}>IrIDi!1Bjg8Vdi1zA?W@(s#Bt0_LUu@z$d#?WD-eHTUcJNN zO0A?-i|Wb`l*iXTN-xE2*B`cUd#TiydGr%YS9R{Y3I~k62@9WqbJVA7tH=SeZoUfM zaqt)R(EV-7Fk@ci7*wzo(c+)ejdKfPlkr@1jd)b-L_0aX?ns{8e;bChe7L`HC3Y@7 zP7vDz4@;>D<3k;foQ{p~xG9=RgR59$PZR`lC}1HO&$`tjT|3Nn%!Tz^qy=fPLG#AV z2VJ>S&v$zBi{V;0eK)u%Z$??XBvG5GzCzx9qOPql9Tqf~9m^$z4(`NE+%p<3(W6lZ zCv}LQcIyJ5D4@_>-9bzjFt#2Otn7G(d4-NBS+C|$W;=2%xeD0u0M#Lu@?VRnaz4KB^ac2w0p%iIAFef zDN_~_i(-D-NkR|QAEt<2w|8&gUJpSv+VaSsEL--iA<%u(|9J<;Fj?~YlFVNyWtrg%7 zBM8@Bx)AGZHx=l`SK||M0Cr;-<|Bp0I|?RY)9=aSeA(#wPv_+)18r<3Iu-f*$)x0i z`6c`HbL)&1lO&5!>oWRx$fB^+gw0d0Js98dZf zp49YlL^pTre$opmMWj-NAS0Op^(>5_!KF9bP+KFfcHf+2ccD)5T>DgDEt9RyEvs za77iy0q9AJ#y`PMSlDQQ(mlT@5kFHvZ_{5`96lbNj`9V%#MH9=r$Wlz5xLQ{Qh^0zp7xDX%^GcEz5 zj&UzGHo4c?)(ZPpqJLqR>>@)K6xO3j(jAX9-wlO|<~N^eX>e0e0vc-#)+0eJc)H(Q zn$xtWWZyEMbRf#CJDBNzXEgSO3C1=z_#@LHYR_C$-%UvAs?x7NrB=$+_Af%DIO4SA*Hay}lo6SAus2}1%O2T* zJiU<`BjUZb z{6i~wj-;#w?=FA?vW10e|FbqN!sZ#!SGG^&iOJ08i#U=0gNNcvfUWjKRC191ju(o- zo_NU z6MHU!tNR}w5JOl*XNEv&X(_-BD&+xH3X&g;ZkZrI8*-~wlkLjToy;~g0Uy=W)w@o@ z8o;4DbC#B8yQZ9dHZy|Kt0Ec>Mwo0Uymhs(Kjd<*&Yi%5!x3ZY2JN6+y3*U zhz@Cb!^t>{;Ip7E0Ul6*kq~RqU$(vgaCxkwfy#&K^fEwI>T5pT<2|ul{X=DivUnp$ z&Z4Rc(Yqdk*1$)I(^|sgo(wCc6h%S|e=wK;9nTGehSP03QetN>0jSS=R2hNq5A{{*i9h@u=qTK7=0Np2tbrr?u^f$pqM2k?{Z~RYtO%p#T-lI^guYs);u} zzK4Fmy=~d~(uEZNA{64RfmAz#h`6|ca7OfJOE#y&-TPpG?Z)oWMoc(B-V&YkC#tGj zV|mWd;PZF|q1zvdi$!E*nM7tEhj(ec&prhZsj z+(?bSLT3q5x+p}w42*c54dj`;7m%x4>zlIKVFBjd0UCG2oG-uR&sKHaBD6{8JiRv0 zBAS|qJj@Gl?o@FW;BSM&bYhf-nLAidfXd!r7Lm>@*E838BPW`V%jJVGMD`2N1(407 z?-Qeebi?b)j>@YdB03nr9Y%}MTE@kHt_t3XwHi-jmHW=^?D~ET*kv`ad2{*#klw=i zs}~0@&nDDx&Bp&2b`fK2i1->~v0(=wzHR@(>7LQZSW+sBhX7UoTUdVrq1c(H1&Wd7 z!w5h}{}x9;ng8AfUJMNX|K0!39smwU1t6O_XU+K`z%nUA&o~pG*$3lP?|MZx@d6cQE}&I-RH1@u>Ck*&%``cmTPW=fj!%L z`0JlX9@UPpn>qJ_BShC>FHwlC={#MzVv0 zR8(%WAtWl=Nk02|Yo^MZiqz#&ZYS*#W$VM>i6`za1-w2Ay1`9aTxl}+4j=<07>RVJ zWGM74b9n`{b+=g~rd7C_()% z9z|b%Yh@qgbaDBcz{P^?UWp0nir(UjGKK&iFy0sNSa9bNofsZYUi2||JSP;ugun^ z{RS+_Jk}Uh`w+~wyGc%y5asrsa&B`z6zC(t@^Re0ZKSa#q--U(IxT#ETO$X}B9k1+ zv$Q}0eO&cv!@6gsGE;p0a25?G3W-$d^{#23sPVH2NsnP68< z%iib_72tjl@^(a3QlmeT18?tg@kVfahwJ9Kc#u(K@C?r5$I2Z&(Wi{a_;*QUE$C+# zNY;GY(AfZdVsB4TgddjRD;?A6OGmeR!!makt)T&900XEvu0POwIOnmBGFSpinDLx^ zZ@kx>AI^5_rcm2ObZvc&ZnKpO&m0W}a$ibkQrX6;jbSXTrxy~=W}`GOH85+oW(vQk z3C$#i?-XG2wt^rE7vt6~=Fkj3hbmf}2&kQcsM#O7&?#3NN7@S&mZ0fi@qG6^Aed98 zOuIGUIBO`Em-ZD9;`x_)=xuBRF2k3ev~-wdV9u3(LVGo=Zg-Oc;T1cuobYS5b0h}5 zE41A)U%f#taqWE1w(6Ab!#RH#G(27@w|56z!vH%8F5Xe|EAj;U(K{o zH}z+ymso(kRp?H0+SF!G*y&=lBRiTrCObK?lJ0EJv<;PpPW85KC@v%`ykiFixJyfF z7!*n+eEtX=6X}fZK7^2TPgDWq$r-n+M^-I#~L~UB2m!m!*UF zTBG7}-u}5cnW}JR2IIbFI!?qT^rUogK1@&EeB*Vwk7o7qZ^+)ak~o0v`qfT9Xaw~N zZHNeyZ${0&3tGB(qjg`c4&~HS(?uL#@z2N{0c9=Z$oF literal 0 HcmV?d00001 diff --git a/openwebui/pipelines/examples/filters/conversation_turn_limit_filter.py b/openwebui/pipelines/examples/filters/conversation_turn_limit_filter.py new file mode 100644 index 0000000..bb31939 --- /dev/null +++ b/openwebui/pipelines/examples/filters/conversation_turn_limit_filter.py @@ -0,0 +1,64 @@ +import os +from typing import List, Optional +from pydantic import BaseModel +from schemas import OpenAIChatMessage +import time + + +class Pipeline: + class Valves(BaseModel): + # List target pipeline ids (models) that this filter will be connected to. + # If you want to connect this filter to all pipelines, you can set pipelines to ["*"] + pipelines: List[str] = [] + + # Assign a priority level to the filter pipeline. + # The priority level determines the order in which the filter pipelines are executed. + # The lower the number, the higher the priority. + priority: int = 0 + + # Valves for conversation turn limiting + target_user_roles: List[str] = ["user"] + max_turns: Optional[int] = None + + def __init__(self): + # Pipeline filters are only compatible with Open WebUI + # You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API. + self.type = "filter" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "conversation_turn_limit_filter_pipeline" + self.name = "Conversation Turn Limit Filter" + + self.valves = self.Valves( + **{ + "pipelines": os.getenv("CONVERSATION_TURN_PIPELINES", "*").split(","), + "max_turns": 10, + } + ) + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"pipe:{__name__}") + print(body) + print(user) + + if user.get("role", "admin") in self.valves.target_user_roles: + messages = body.get("messages", []) + if len(messages) > self.valves.max_turns: + raise Exception( + f"Conversation turn limit exceeded. Max turns: {self.valves.max_turns}" + ) + + return body diff --git a/openwebui/pipelines/examples/filters/datadog_filter_pipeline.py b/openwebui/pipelines/examples/filters/datadog_filter_pipeline.py new file mode 100644 index 0000000..af1d2de --- /dev/null +++ b/openwebui/pipelines/examples/filters/datadog_filter_pipeline.py @@ -0,0 +1,121 @@ +""" +title: DataDog Filter Pipeline +author: 0xThresh +date: 2024-06-06 +version: 1.0 +license: MIT +description: A filter pipeline that sends traces to DataDog. +requirements: ddtrace +environment_variables: DD_LLMOBS_AGENTLESS_ENABLED, DD_LLMOBS_ENABLED, DD_LLMOBS_APP_NAME, DD_API_KEY, DD_SITE +""" + +from typing import List, Optional +import os + +from utils.pipelines.main import get_last_user_message, get_last_assistant_message +from pydantic import BaseModel +from ddtrace.llmobs import LLMObs + + +class Pipeline: + class Valves(BaseModel): + # List target pipeline ids (models) that this filter will be connected to. + # If you want to connect this filter to all pipelines, you can set pipelines to ["*"] + # e.g. ["llama3:latest", "gpt-3.5-turbo"] + pipelines: List[str] = [] + + # Assign a priority level to the filter pipeline. + # The priority level determines the order in which the filter pipelines are executed. + # The lower the number, the higher the priority. + priority: int = 0 + + # Valves + dd_api_key: str + dd_site: str + ml_app: str + + def __init__(self): + # Pipeline filters are only compatible with Open WebUI + # You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API. + self.type = "filter" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "datadog_filter_pipeline" + self.name = "DataDog Filter" + + # Initialize + self.valves = self.Valves( + **{ + "pipelines": ["*"], # Connect to all pipelines + "dd_api_key": os.getenv("DD_API_KEY"), + "dd_site": os.getenv("DD_SITE", "datadoghq.com"), + "ml_app": os.getenv("ML_APP", "pipelines-test"), + } + ) + + # DataDog LLMOBS docs: https://docs.datadoghq.com/tracing/llm_observability/sdk/ + self.LLMObs = LLMObs() + self.llm_span = None + self.chat_generations = {} + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + self.set_dd() + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + self.LLMObs.flush() + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + self.set_dd() + pass + + def set_dd(self): + self.LLMObs.enable( + ml_app=self.valves.ml_app, + api_key=self.valves.dd_api_key, + site=self.valves.dd_site, + agentless_enabled=True, + integrations_enabled=True, + ) + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"inlet:{__name__}") + + self.llm_span = self.LLMObs.llm( + model_name=body["model"], + name=f"filter:{__name__}", + model_provider="open-webui", + session_id=body["chat_id"], + ml_app=self.valves.ml_app + ) + + self.LLMObs.annotate( + span = self.llm_span, + input_data = get_last_user_message(body["messages"]), + ) + + return body + + + async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"outlet:{__name__}") + + self.LLMObs.annotate( + span = self.llm_span, + output_data = get_last_assistant_message(body["messages"]), + ) + + self.llm_span.finish() + self.LLMObs.flush() + + return body diff --git a/openwebui/pipelines/examples/filters/detoxify_filter_pipeline.py b/openwebui/pipelines/examples/filters/detoxify_filter_pipeline.py new file mode 100644 index 0000000..73fc3a9 --- /dev/null +++ b/openwebui/pipelines/examples/filters/detoxify_filter_pipeline.py @@ -0,0 +1,83 @@ +""" +title: Detoxify Filter Pipeline +author: open-webui +date: 2024-05-30 +version: 1.0 +license: MIT +description: A pipeline for filtering out toxic messages using the Detoxify library. +requirements: detoxify +""" + +from typing import List, Optional +from schemas import OpenAIChatMessage +from pydantic import BaseModel +from detoxify import Detoxify +import os + + +class Pipeline: + class Valves(BaseModel): + # List target pipeline ids (models) that this filter will be connected to. + # If you want to connect this filter to all pipelines, you can set pipelines to ["*"] + # e.g. ["llama3:latest", "gpt-3.5-turbo"] + pipelines: List[str] = [] + + # Assign a priority level to the filter pipeline. + # The priority level determines the order in which the filter pipelines are executed. + # The lower the number, the higher the priority. + priority: int = 0 + + def __init__(self): + # Pipeline filters are only compatible with Open WebUI + # You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API. + self.type = "filter" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "detoxify_filter_pipeline" + self.name = "Detoxify Filter" + + # Initialize + self.valves = self.Valves( + **{ + "pipelines": ["*"], # Connect to all pipelines + } + ) + + self.model = None + + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + + self.model = Detoxify("original") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + pass + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + # This filter is applied to the form data before it is sent to the OpenAI API. + print(f"inlet:{__name__}") + + print(body) + user_message = body["messages"][-1]["content"] + + # Filter out toxic messages + toxicity = self.model.predict(user_message) + print(toxicity) + + if toxicity["toxicity"] > 0.5: + raise Exception("Toxic message detected") + + return body diff --git a/openwebui/pipelines/examples/filters/dynamic_ollama_vision_filter_pipeline.py b/openwebui/pipelines/examples/filters/dynamic_ollama_vision_filter_pipeline.py new file mode 100644 index 0000000..9eb01d2 --- /dev/null +++ b/openwebui/pipelines/examples/filters/dynamic_ollama_vision_filter_pipeline.py @@ -0,0 +1,91 @@ +""" +title: Ollama Dynamic Vision Pipeline +author: Andrew Tait Gehrhardt +date: 2024-06-18 +version: 1.0 +license: MIT +description: A pipeline for dynamically processing images when current model is a text only model +requirements: pydantic, aiohttp +""" + +from typing import List, Optional +from pydantic import BaseModel +import json +import aiohttp +from utils.pipelines.main import get_last_user_message + +class Pipeline: + class Valves(BaseModel): + pipelines: List[str] = [] + priority: int = 0 + vision_model: str = "llava" + ollama_base_url: str = "" + model_to_override: str = "" + + def __init__(self): + self.type = "filter" + self.name = "Interception Filter" + self.valves = self.Valves( + **{ + "pipelines": ["*"], # Connect to all pipelines + } + ) + + async def on_startup(self): + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + print(f"on_shutdown:{__name__}") + pass + + async def process_images_with_llava(self, images: List[str], content: str, vision_model: str, ollama_base_url: str) -> str: + url = f"{ollama_base_url}/api/chat" + payload = { + "model": vision_model, + "messages": [ + { + "role": "user", + "content": content, + "images": images + } + ] + } + + async with aiohttp.ClientSession() as session: + async with session.post(url, json=payload) as response: + if response.status == 200: + content = [] + async for line in response.content: + data = json.loads(line) + content.append(data.get("message", {}).get("content", "")) + return "".join(content) + else: + print(f"Failed to process images with LLava, status code: {response.status}") + return "" + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"pipe:{__name__}") + + images = [] + + # Ensure the body is a dictionary + if isinstance(body, str): + body = json.loads(body) + + model = body.get("model", "") + + # Get the content of the most recent message + user_message = get_last_user_message(body["messages"]) + + if model in self.valves.model_to_override: + messages = body.get("messages", []) + for message in messages: + if "images" in message: + images.extend(message["images"]) + raw_llava_response = await self.process_images_with_llava(images, user_message, self.valves.vision_model,self.valves.ollama_base_url) + llava_response = f"REPEAT THIS BACK: {raw_llava_response}" + message["content"] = llava_response + message.pop("images", None) # This will safely remove the 'images' key if it exists + + return body diff --git a/openwebui/pipelines/examples/filters/function_calling_filter_pipeline.py b/openwebui/pipelines/examples/filters/function_calling_filter_pipeline.py new file mode 100644 index 0000000..5ea9957 --- /dev/null +++ b/openwebui/pipelines/examples/filters/function_calling_filter_pipeline.py @@ -0,0 +1,100 @@ +import os +import requests +from typing import Literal, List, Optional +from datetime import datetime + + +from blueprints.function_calling_blueprint import Pipeline as FunctionCallingBlueprint + + +class Pipeline(FunctionCallingBlueprint): + class Valves(FunctionCallingBlueprint.Valves): + # Add your custom parameters here + OPENWEATHERMAP_API_KEY: str = "" + pass + + class Tools: + def __init__(self, pipeline) -> None: + self.pipeline = pipeline + + def get_current_time( + self, + ) -> str: + """ + Get the current time. + + :return: The current time. + """ + + now = datetime.now() + current_time = now.strftime("%H:%M:%S") + return f"Current Time = {current_time}" + + def get_current_weather( + self, + location: str, + unit: Literal["metric", "fahrenheit"] = "fahrenheit", + ) -> str: + """ + Get the current weather for a location. If the location is not found, return an empty string. + + :param location: The location to get the weather for. + :param unit: The unit to get the weather in. Default is fahrenheit. + :return: The current weather for the location. + """ + + # https://openweathermap.org/api + + if self.pipeline.valves.OPENWEATHERMAP_API_KEY == "": + return "OpenWeatherMap API Key not set, ask the user to set it up." + else: + units = "imperial" if unit == "fahrenheit" else "metric" + params = { + "q": location, + "appid": self.pipeline.valves.OPENWEATHERMAP_API_KEY, + "units": units, + } + + response = requests.get( + "http://api.openweathermap.org/data/2.5/weather", params=params + ) + response.raise_for_status() # Raises an HTTPError for bad responses + data = response.json() + + weather_description = data["weather"][0]["description"] + temperature = data["main"]["temp"] + + return f"{location}: {weather_description.capitalize()}, {temperature}°{unit.capitalize()[0]}" + + def calculator(self, equation: str) -> str: + """ + Calculate the result of an equation. + + :param equation: The equation to calculate. + """ + + # Avoid using eval in production code + # https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html + try: + result = eval(equation) + return f"{equation} = {result}" + except Exception as e: + print(e) + return "Invalid equation" + + def __init__(self): + super().__init__() + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "my_tools_pipeline" + self.name = "My Tools Pipeline" + self.valves = self.Valves( + **{ + **self.valves.model_dump(), + "pipelines": ["*"], # Connect to all pipelines + "OPENWEATHERMAP_API_KEY": os.getenv("OPENWEATHERMAP_API_KEY", ""), + }, + ) + self.tools = self.Tools(self) diff --git a/openwebui/pipelines/examples/filters/google_translation_filter_pipeline.py b/openwebui/pipelines/examples/filters/google_translation_filter_pipeline.py new file mode 100644 index 0000000..a8d31e3 --- /dev/null +++ b/openwebui/pipelines/examples/filters/google_translation_filter_pipeline.py @@ -0,0 +1,185 @@ +""" +title: Google Translate Filter +author: SimonOriginal +date: 2024-06-28 +version: 1.0 +license: MIT +description: This pipeline integrates Google Translate for automatic translation of user and assistant messages +without requiring an API key. It supports multilingual communication by translating based on specified source +and target languages. +""" + +import re +from typing import List, Optional +from schemas import OpenAIChatMessage +from pydantic import BaseModel +import requests +import os +import time +import asyncio +from functools import lru_cache + +from utils.pipelines.main import get_last_user_message, get_last_assistant_message + +class Pipeline: + class Valves(BaseModel): + pipelines: List[str] = [] + priority: int = 0 + source_user: Optional[str] = "auto" + target_user: Optional[str] = "en" + source_assistant: Optional[str] = "en" + target_assistant: Optional[str] = "uk" + + def __init__(self): + self.type = "filter" + self.name = "Google Translate Filter" + self.valves = self.Valves( + **{ + "pipelines": ["*"], + } + ) + + # Initialize translation cache + self.translation_cache = {} + self.code_blocks = [] # List to store code blocks + + async def on_startup(self): + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + pass + + # @lru_cache(maxsize=128) # LRU cache to store translation results + def translate(self, text: str, source: str, target: str) -> str: + url = "https://translate.googleapis.com/translate_a/single" + params = { + "client": "gtx", + "sl": source, + "tl": target, + "dt": "t", + "q": text, + } + + try: + r = requests.get(url, params=params) + r.raise_for_status() + result = r.json() + translated_text = ''.join([sentence[0] for sentence in result[0]]) + return translated_text + except requests.exceptions.RequestException as e: + print(f"Network error: {e}") + time.sleep(1) + return self.translate(text, source, target) + except Exception as e: + print(f"Error translating text: {e}") + return text + + def split_text_around_table(self, text: str) -> List[str]: + table_regex = r'((?:^.*?\|.*?\n)+)(?=\n[^\|\s].*?\|)' + matches = re.split(table_regex, text, flags=re.MULTILINE) + + if len(matches) > 1: + return [matches[0], matches[1]] + else: + return [text, ""] + + def clean_table_delimiters(self, text: str) -> str: + # Remove extra spaces from table delimiters + return re.sub(r'(\|\s*-+\s*)+', lambda m: m.group(0).replace(' ', '-'), text) + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"inlet:{__name__}") + + messages = body["messages"] + user_message = get_last_user_message(messages) + + print(f"User message: {user_message}") + + # Find and store code blocks + code_block_regex = r'```[\s\S]+?```' + self.code_blocks = re.findall(code_block_regex, user_message) + # Replace code blocks with placeholders + user_message_no_code = re.sub(code_block_regex, '__CODE_BLOCK__', user_message) + + parts = self.split_text_around_table(user_message_no_code) + text_before_table, table_text = parts + + # Check translation cache for text before table + translated_before_table = self.translation_cache.get(text_before_table) + if translated_before_table is None: + translated_before_table = self.translate( + text_before_table, + self.valves.source_user, + self.valves.target_user, + ) + self.translation_cache[text_before_table] = translated_before_table + + translated_user_message = translated_before_table + table_text + + # Clean table delimiters + translated_user_message = self.clean_table_delimiters(translated_user_message) + + # Restore code blocks + for code_block in self.code_blocks: + translated_user_message = translated_user_message.replace('__CODE_BLOCK__', code_block, 1) + + print(f"Translated user message: {translated_user_message}") + + for message in reversed(messages): + if message["role"] == "user": + message["content"] = translated_user_message + break + + body = {**body, "messages": messages} + return body + + async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"outlet:{__name__}") + + messages = body["messages"] + assistant_message = get_last_assistant_message(messages) + + print(f"Assistant message: {assistant_message}") + + # Find and store code blocks + code_block_regex = r'```[\s\S]+?```' + self.code_blocks = re.findall(code_block_regex, assistant_message) + # Replace code blocks with placeholders + assistant_message_no_code = re.sub(code_block_regex, '__CODE_BLOCK__', assistant_message) + + parts = self.split_text_around_table(assistant_message_no_code) + text_before_table, table_text = parts + + # Check translation cache for text before table + translated_before_table = self.translation_cache.get(text_before_table) + if translated_before_table is None: + translated_before_table = self.translate( + text_before_table, + self.valves.source_assistant, + self.valves.target_assistant, + ) + self.translation_cache[text_before_table] = translated_before_table + + translated_assistant_message = translated_before_table + table_text + + # Clean table delimiters + translated_assistant_message = self.clean_table_delimiters(translated_assistant_message) + + # Restore code blocks + for code_block in self.code_blocks: + translated_assistant_message = translated_assistant_message.replace('__CODE_BLOCK__', code_block, 1) + + print(f"Translated assistant message: {translated_assistant_message}") + + for message in reversed(messages): + if message["role"] == "assistant": + message["content"] = translated_assistant_message + break + + body = {**body, "messages": messages} + return body diff --git a/openwebui/pipelines/examples/filters/home_assistant_filter.py b/openwebui/pipelines/examples/filters/home_assistant_filter.py new file mode 100644 index 0000000..a86ed9f --- /dev/null +++ b/openwebui/pipelines/examples/filters/home_assistant_filter.py @@ -0,0 +1,116 @@ +""" +title: HomeAssistant Filter Pipeline +author: Andrew Tait Gehrhardt +date: 2024-06-15 +version: 1.0 +license: MIT +description: A pipeline for controlling Home Assistant entities based on their easy names. Only supports lights at the moment. +requirements: pytz, difflib +""" +import requests +from typing import Literal, Dict, Any +from datetime import datetime +import pytz +from difflib import get_close_matches + +from blueprints.function_calling_blueprint import Pipeline as FunctionCallingBlueprint + +class Pipeline(FunctionCallingBlueprint): + class Valves(FunctionCallingBlueprint.Valves): + HOME_ASSISTANT_URL: str = "" + HOME_ASSISTANT_TOKEN: str = "" + + class Tools: + def __init__(self, pipeline) -> None: + self.pipeline = pipeline + + def get_current_time(self) -> str: + """ + Get the current time in EST. + + :return: The current time in EST. + """ + now_est = datetime.now(pytz.timezone('US/Eastern')) # Get the current time in EST + current_time = now_est.strftime("%I:%M %p") # %I for 12-hour clock, %M for minutes, %p for am/pm + return f"ONLY RESPOND 'Current time is {current_time}'" + + def get_all_lights(self) -> Dict[str, Any]: + """ + Lists my lights. + Shows me my lights. + Get a dictionary of all lights in my home. + + :return: A dictionary of light entity names and their IDs. + """ + if not self.pipeline.valves.HOME_ASSISTANT_URL or not self.pipeline.valves.HOME_ASSISTANT_TOKEN: + return {"error": "Home Assistant URL or token not set, ask the user to set it up."} + else: + url = f"{self.pipeline.valves.HOME_ASSISTANT_URL}/api/states" + headers = { + "Authorization": f"Bearer {self.pipeline.valves.HOME_ASSISTANT_TOKEN}", + "Content-Type": "application/json", + } + + response = requests.get(url, headers=headers) + response.raise_for_status() # Raises an HTTPError for bad responses + data = response.json() + + lights = {entity["attributes"]["friendly_name"]: entity["entity_id"] + for entity in data if entity["entity_id"].startswith("light.")} + + return lights + + def control_light(self, name: str, state: Literal['on', 'off']) -> str: + """ + Turn a light on or off based on its name. + + :param name: The friendly name of the light. + :param state: The desired state ('on' or 'off'). + :return: The result of the operation. + """ + if not self.pipeline.valves.HOME_ASSISTANT_URL or not self.pipeline.valves.HOME_ASSISTANT_TOKEN: + return "Home Assistant URL or token not set, ask the user to set it up." + + # Normalize the light name by converting to lowercase and stripping extra spaces + normalized_name = " ".join(name.lower().split()) + + # Get a dictionary of all lights + lights = self.get_all_lights() + if "error" in lights: + return lights["error"] + + # Find the closest matching light name + light_names = list(lights.keys()) + closest_matches = get_close_matches(normalized_name, light_names, n=1, cutoff=0.6) + + if not closest_matches: + return f"Light named '{name}' not found." + + best_match = closest_matches[0] + light_id = lights[best_match] + + url = f"{self.pipeline.valves.HOME_ASSISTANT_URL}/api/services/light/turn_{state}" + headers = { + "Authorization": f"Bearer {self.pipeline.valves.HOME_ASSISTANT_TOKEN}", + "Content-Type": "application/json", + } + payload = { + "entity_id": light_id + } + + response = requests.post(url, headers=headers, json=payload) + if response.status_code == 200: + return f"ONLY RESPOND 'Will do' TO THE USER. DO NOT SAY ANYTHING ELSE!" + else: + return f"ONLY RESPOND 'Couldn't find light' TO THE USER. DO NOT SAY ANYTHING ELSE!" + + def __init__(self): + super().__init__() + self.name = "My Tools Pipeline" + self.valves = self.Valves( + **{ + **self.valves.model_dump(), + "pipelines": ["*"], # Connect to all pipelines + }, + ) + self.tools = self.Tools(self) diff --git a/openwebui/pipelines/examples/filters/langfuse_filter_pipeline.py b/openwebui/pipelines/examples/filters/langfuse_filter_pipeline.py new file mode 100644 index 0000000..2d73e5d --- /dev/null +++ b/openwebui/pipelines/examples/filters/langfuse_filter_pipeline.py @@ -0,0 +1,333 @@ +""" +title: Langfuse Filter Pipeline +author: open-webui +date: 2025-06-16 +version: 1.7.1 +license: MIT +description: A filter pipeline that uses Langfuse. +requirements: langfuse<3.0.0 +""" + +from typing import List, Optional +import os +import uuid +import json + +from utils.pipelines.main import get_last_assistant_message +from pydantic import BaseModel +from langfuse import Langfuse +from langfuse.api.resources.commons.errors.unauthorized_error import UnauthorizedError + + +def get_last_assistant_message_obj(messages: List[dict]) -> dict: + """Retrieve the last assistant message from the message list.""" + for message in reversed(messages): + if message["role"] == "assistant": + return message + return {} + + +class Pipeline: + class Valves(BaseModel): + pipelines: List[str] = [] + priority: int = 0 + secret_key: str + public_key: str + host: str + # New valve that controls whether task names are added as tags: + insert_tags: bool = True + # New valve that controls whether to use model name instead of model ID for generation + use_model_name_instead_of_id_for_generation: bool = False + debug: bool = False + + def __init__(self): + self.type = "filter" + self.name = "Langfuse Filter" + + self.valves = self.Valves( + **{ + "pipelines": ["*"], + "secret_key": os.getenv("LANGFUSE_SECRET_KEY", "your-secret-key-here"), + "public_key": os.getenv("LANGFUSE_PUBLIC_KEY", "your-public-key-here"), + "host": os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com"), + "use_model_name_instead_of_id_for_generation": os.getenv("USE_MODEL_NAME", "false").lower() == "true", + "debug": os.getenv("DEBUG_MODE", "false").lower() == "true", + } + ) + + self.langfuse = None + self.chat_traces = {} + self.suppressed_logs = set() + # Dictionary to store model names for each chat + self.model_names = {} + + # Only these tasks will be treated as LLM "generations": + self.GENERATION_TASKS = {"llm_response"} + + def log(self, message: str, suppress_repeats: bool = False): + if self.valves.debug: + if suppress_repeats: + if message in self.suppressed_logs: + return + self.suppressed_logs.add(message) + print(f"[DEBUG] {message}") + + async def on_startup(self): + self.log(f"on_startup triggered for {__name__}") + self.set_langfuse() + + async def on_shutdown(self): + self.log(f"on_shutdown triggered for {__name__}") + if self.langfuse: + self.langfuse.flush() + + async def on_valves_updated(self): + self.log("Valves updated, resetting Langfuse client.") + self.set_langfuse() + + def set_langfuse(self): + try: + self.langfuse = Langfuse( + secret_key=self.valves.secret_key, + public_key=self.valves.public_key, + host=self.valves.host, + debug=self.valves.debug, + ) + self.langfuse.auth_check() + self.log("Langfuse client initialized successfully.") + except UnauthorizedError: + print( + "Langfuse credentials incorrect. Please re-enter your Langfuse credentials in the pipeline settings." + ) + except Exception as e: + print( + f"Langfuse error: {e} Please re-enter your Langfuse credentials in the pipeline settings." + ) + + def _build_tags(self, task_name: str) -> list: + """ + Builds a list of tags based on valve settings, ensuring we always add + 'open-webui' and skip user_response / llm_response from becoming tags themselves. + """ + tags_list = [] + if self.valves.insert_tags: + # Always add 'open-webui' + tags_list.append("open-webui") + # Add the task_name if it's not one of the excluded defaults + if task_name not in ["user_response", "llm_response"]: + tags_list.append(task_name) + return tags_list + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + if self.valves.debug: + print(f"[DEBUG] Received request: {json.dumps(body, indent=2)}") + + self.log(f"Inlet function called with body: {body} and user: {user}") + + metadata = body.get("metadata", {}) + chat_id = metadata.get("chat_id", str(uuid.uuid4())) + + # Handle temporary chats + if chat_id == "local": + session_id = metadata.get("session_id") + chat_id = f"temporary-session-{session_id}" + + metadata["chat_id"] = chat_id + body["metadata"] = metadata + + # Extract and store both model name and ID if available + model_info = metadata.get("model", {}) + model_id = body.get("model") + + # Store model information for this chat + if chat_id not in self.model_names: + self.model_names[chat_id] = {"id": model_id} + else: + self.model_names[chat_id]["id"] = model_id + + if isinstance(model_info, dict) and "name" in model_info: + self.model_names[chat_id]["name"] = model_info["name"] + self.log(f"Stored model info - name: '{model_info['name']}', id: '{model_id}' for chat_id: {chat_id}") + + required_keys = ["model", "messages"] + missing_keys = [key for key in required_keys if key not in body] + if missing_keys: + error_message = f"Error: Missing keys in the request body: {', '.join(missing_keys)}" + self.log(error_message) + raise ValueError(error_message) + + user_email = user.get("email") if user else None + # Defaulting to 'user_response' if no task is provided + task_name = metadata.get("task", "user_response") + + # Build tags + tags_list = self._build_tags(task_name) + + if chat_id not in self.chat_traces: + self.log(f"Creating new trace for chat_id: {chat_id}") + + trace_payload = { + "name": f"chat:{chat_id}", + "input": body, + "user_id": user_email, + "metadata": metadata, + "session_id": chat_id, + } + + if tags_list: + trace_payload["tags"] = tags_list + + if self.valves.debug: + print(f"[DEBUG] Langfuse trace request: {json.dumps(trace_payload, indent=2)}") + + trace = self.langfuse.trace(**trace_payload) + self.chat_traces[chat_id] = trace + else: + trace = self.chat_traces[chat_id] + self.log(f"Reusing existing trace for chat_id: {chat_id}") + if tags_list: + trace.update(tags=tags_list) + + # Update metadata with type + metadata["type"] = task_name + metadata["interface"] = "open-webui" + + # If it's a task that is considered an LLM generation + if task_name in self.GENERATION_TASKS: + # Determine which model value to use based on the use_model_name valve + model_id = self.model_names.get(chat_id, {}).get("id", body["model"]) + model_name = self.model_names.get(chat_id, {}).get("name", "unknown") + + # Pick primary model identifier based on valve setting + model_value = model_name if self.valves.use_model_name_instead_of_id_for_generation else model_id + + # Add both values to metadata regardless of valve setting + metadata["model_id"] = model_id + metadata["model_name"] = model_name + + generation_payload = { + "name": f"{task_name}:{str(uuid.uuid4())}", + "model": model_value, + "input": body["messages"], + "metadata": metadata, + } + if tags_list: + generation_payload["tags"] = tags_list + + if self.valves.debug: + print(f"[DEBUG] Langfuse generation request: {json.dumps(generation_payload, indent=2)}") + + trace.generation(**generation_payload) + else: + # Otherwise, log it as an event + event_payload = { + "name": f"{task_name}:{str(uuid.uuid4())}", + "metadata": metadata, + "input": body["messages"], + } + if tags_list: + event_payload["tags"] = tags_list + + if self.valves.debug: + print(f"[DEBUG] Langfuse event request: {json.dumps(event_payload, indent=2)}") + + trace.event(**event_payload) + + return body + + async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: + self.log(f"Outlet function called with body: {body}") + + chat_id = body.get("chat_id") + + # Handle temporary chats + if chat_id == "local": + session_id = body.get("session_id") + chat_id = f"temporary-session-{session_id}" + + metadata = body.get("metadata", {}) + # Defaulting to 'llm_response' if no task is provided + task_name = metadata.get("task", "llm_response") + + # Build tags + tags_list = self._build_tags(task_name) + + if chat_id not in self.chat_traces: + self.log(f"[WARNING] No matching trace found for chat_id: {chat_id}, attempting to re-register.") + # Re-run inlet to register if somehow missing + return await self.inlet(body, user) + + trace = self.chat_traces[chat_id] + + assistant_message = get_last_assistant_message(body["messages"]) + assistant_message_obj = get_last_assistant_message_obj(body["messages"]) + + usage = None + if assistant_message_obj: + info = assistant_message_obj.get("usage", {}) + if isinstance(info, dict): + input_tokens = info.get("prompt_eval_count") or info.get("prompt_tokens") + output_tokens = info.get("eval_count") or info.get("completion_tokens") + if input_tokens is not None and output_tokens is not None: + usage = { + "input": input_tokens, + "output": output_tokens, + "unit": "TOKENS", + } + self.log(f"Usage data extracted: {usage}") + + # Update the trace output with the last assistant message + trace.update(output=assistant_message) + + metadata["type"] = task_name + metadata["interface"] = "open-webui" + + if task_name in self.GENERATION_TASKS: + # Determine which model value to use based on the use_model_name valve + model_id = self.model_names.get(chat_id, {}).get("id", body.get("model")) + model_name = self.model_names.get(chat_id, {}).get("name", "unknown") + + # Pick primary model identifier based on valve setting + model_value = model_name if self.valves.use_model_name_instead_of_id_for_generation else model_id + + # Add both values to metadata regardless of valve setting + metadata["model_id"] = model_id + metadata["model_name"] = model_name + + # If it's an LLM generation + generation_payload = { + "name": f"{task_name}:{str(uuid.uuid4())}", + "model": model_value, # <-- Use model name or ID based on valve setting + "input": body["messages"], + "metadata": metadata, + "usage": usage, + } + if tags_list: + generation_payload["tags"] = tags_list + + if self.valves.debug: + print(f"[DEBUG] Langfuse generation end request: {json.dumps(generation_payload, indent=2)}") + + trace.generation().end(**generation_payload) + self.log(f"Generation ended for chat_id: {chat_id}") + else: + # Otherwise log as an event + event_payload = { + "name": f"{task_name}:{str(uuid.uuid4())}", + "metadata": metadata, + "input": body["messages"], + } + if usage: + # If you want usage on event as well + event_payload["metadata"]["usage"] = usage + + if tags_list: + event_payload["tags"] = tags_list + + if self.valves.debug: + print(f"[DEBUG] Langfuse event end request: {json.dumps(event_payload, indent=2)}") + + trace.event(**event_payload) + self.log(f"Event logged for chat_id: {chat_id}") + + return body diff --git a/openwebui/pipelines/examples/filters/langfuse_v3_filter_pipeline.py b/openwebui/pipelines/examples/filters/langfuse_v3_filter_pipeline.py new file mode 100644 index 0000000..a046eee --- /dev/null +++ b/openwebui/pipelines/examples/filters/langfuse_v3_filter_pipeline.py @@ -0,0 +1,406 @@ +""" +title: Langfuse Filter Pipeline for v3 +author: open-webui +date: 2025-07-31 +version: 0.0.1 +license: MIT +description: A filter pipeline that uses Langfuse v3. +requirements: langfuse>=3.0.0 +""" + +from typing import List, Optional +import os +import uuid +import json + + +from utils.pipelines.main import get_last_assistant_message +from pydantic import BaseModel +from langfuse import Langfuse + + +def get_last_assistant_message_obj(messages: List[dict]) -> dict: + """Retrieve the last assistant message from the message list.""" + for message in reversed(messages): + if message["role"] == "assistant": + return message + return {} + + +class Pipeline: + class Valves(BaseModel): + pipelines: List[str] = [] + priority: int = 0 + secret_key: str + public_key: str + host: str + # New valve that controls whether task names are added as tags: + insert_tags: bool = True + # New valve that controls whether to use model name instead of model ID for generation + use_model_name_instead_of_id_for_generation: bool = False + debug: bool = False + + def __init__(self): + self.type = "filter" + self.name = "Langfuse Filter" + + self.valves = self.Valves( + **{ + "pipelines": ["*"], + "secret_key": os.getenv("LANGFUSE_SECRET_KEY", "your-secret-key-here"), + "public_key": os.getenv("LANGFUSE_PUBLIC_KEY", "your-public-key-here"), + "host": os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com"), + "use_model_name_instead_of_id_for_generation": os.getenv("USE_MODEL_NAME", "false").lower() == "true", + "debug": os.getenv("DEBUG_MODE", "false").lower() == "true", + } + ) + + self.langfuse = None + self.chat_traces = {} + self.suppressed_logs = set() + # Dictionary to store model names for each chat + self.model_names = {} + + def log(self, message: str, suppress_repeats: bool = False): + if self.valves.debug: + if suppress_repeats: + if message in self.suppressed_logs: + return + self.suppressed_logs.add(message) + print(f"[DEBUG] {message}") + + async def on_startup(self): + self.log(f"on_startup triggered for {__name__}") + self.set_langfuse() + + async def on_shutdown(self): + self.log(f"on_shutdown triggered for {__name__}") + if self.langfuse: + try: + # End all active traces + for chat_id, trace in self.chat_traces.items(): + try: + trace.end() + self.log(f"Ended trace for chat_id: {chat_id}") + except Exception as e: + self.log(f"Failed to end trace for {chat_id}: {e}") + + self.chat_traces.clear() + self.langfuse.flush() + self.log("Langfuse data flushed on shutdown") + except Exception as e: + self.log(f"Failed to flush Langfuse data: {e}") + + async def on_valves_updated(self): + self.log("Valves updated, resetting Langfuse client.") + self.set_langfuse() + + def set_langfuse(self): + try: + self.log(f"Initializing Langfuse with host: {self.valves.host}") + self.log( + f"Secret key set: {'Yes' if self.valves.secret_key and self.valves.secret_key != 'your-secret-key-here' else 'No'}" + ) + self.log( + f"Public key set: {'Yes' if self.valves.public_key and self.valves.public_key != 'your-public-key-here' else 'No'}" + ) + + # Initialize Langfuse client for v3.2.1 + self.langfuse = Langfuse( + secret_key=self.valves.secret_key, + public_key=self.valves.public_key, + host=self.valves.host, + debug=self.valves.debug, + ) + + # Test authentication + try: + self.langfuse.auth_check() + self.log( + f"Langfuse client initialized and authenticated successfully. Connected to host: {self.valves.host}") + + except Exception as e: + self.log(f"Auth check failed: {e}") + self.log(f"Failed host: {self.valves.host}") + self.langfuse = None + return + + except Exception as auth_error: + if ( + "401" in str(auth_error) + or "unauthorized" in str(auth_error).lower() + or "credentials" in str(auth_error).lower() + ): + self.log(f"Langfuse credentials incorrect: {auth_error}") + self.langfuse = None + return + except Exception as e: + self.log(f"Langfuse initialization error: {e}") + self.langfuse = None + + def _build_tags(self, task_name: str) -> list: + """ + Builds a list of tags based on valve settings, ensuring we always add + 'open-webui' and skip user_response / llm_response from becoming tags themselves. + """ + tags_list = [] + if self.valves.insert_tags: + # Always add 'open-webui' + tags_list.append("open-webui") + # Add the task_name if it's not one of the excluded defaults + if task_name not in ["user_response", "llm_response"]: + tags_list.append(task_name) + return tags_list + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + self.log("Langfuse Filter INLET called") + + # Check Langfuse client status + if not self.langfuse: + self.log("[WARNING] Langfuse client not initialized - Skipped") + return body + + self.log(f"Inlet function called with body: {body} and user: {user}") + + metadata = body.get("metadata", {}) + chat_id = metadata.get("chat_id", str(uuid.uuid4())) + + # Handle temporary chats + if chat_id == "local": + session_id = metadata.get("session_id") + chat_id = f"temporary-session-{session_id}" + + metadata["chat_id"] = chat_id + body["metadata"] = metadata + + # Extract and store both model name and ID if available + model_info = metadata.get("model", {}) + model_id = body.get("model") + + # Store model information for this chat + if chat_id not in self.model_names: + self.model_names[chat_id] = {"id": model_id} + else: + self.model_names[chat_id]["id"] = model_id + + if isinstance(model_info, dict) and "name" in model_info: + self.model_names[chat_id]["name"] = model_info["name"] + self.log(f"Stored model info - name: '{model_info['name']}', id: '{model_id}' for chat_id: {chat_id}") + + required_keys = ["model", "messages"] + missing_keys = [key for key in required_keys if key not in body] + if missing_keys: + error_message = f"Error: Missing keys in the request body: {', '.join(missing_keys)}" + self.log(error_message) + raise ValueError(error_message) + + user_email = user.get("email") if user else None + # Defaulting to 'user_response' if no task is provided + task_name = metadata.get("task", "user_response") + + # Build tags + tags_list = self._build_tags(task_name) + + if chat_id not in self.chat_traces: + self.log(f"Creating new trace for chat_id: {chat_id}") + + try: + # Create trace using Langfuse v3 API with complete data + trace_metadata = { + **metadata, + "user_id": user_email, + "session_id": chat_id, + "interface": "open-webui", + } + + # Create trace with all necessary information + trace = self.langfuse.start_span( + name=f"chat:{chat_id}", + input=body, + metadata=trace_metadata + ) + + # Set additional trace attributes + trace.update_trace( + user_id=user_email, + session_id=chat_id, + tags=tags_list if tags_list else None, + input=body, + metadata=trace_metadata, + ) + + self.chat_traces[chat_id] = trace + self.log(f"Successfully created trace for chat_id: {chat_id}") + except Exception as e: + self.log(f"Failed to create trace: {e}") + return body + else: + trace = self.chat_traces[chat_id] + self.log(f"Reusing existing trace for chat_id: {chat_id}") + # Update trace with current metadata and tags + trace_metadata = { + **metadata, + "user_id": user_email, + "session_id": chat_id, + "interface": "open-webui", + } + trace.update_trace( + tags=tags_list if tags_list else None, + metadata=trace_metadata, + ) + + # Update metadata with type + metadata["type"] = task_name + metadata["interface"] = "open-webui" + + # Log user input as event + try: + trace = self.chat_traces[chat_id] + + # Create complete event metadata + event_metadata = { + **metadata, + "type": "user_input", + "interface": "open-webui", + "user_id": user_email, + "session_id": chat_id, + "event_id": str(uuid.uuid4()), + } + + event_span = trace.start_span( + name=f"user_input:{str(uuid.uuid4())}", + metadata=event_metadata, + input=body["messages"], + ) + event_span.end() + self.log(f"User input event logged for chat_id: {chat_id}") + except Exception as e: + self.log(f"Failed to log user input event: {e}") + + return body + + async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: + self.log("Langfuse Filter OUTLET called") + + # Check Langfuse client status + if not self.langfuse: + self.log("[WARNING] Langfuse client not initialized - Skipped") + return body + + self.log(f"Outlet function called with body: {body}") + + chat_id = body.get("chat_id") + + # Handle temporary chats + if chat_id == "local": + session_id = body.get("session_id") + chat_id = f"temporary-session-{session_id}" + + metadata = body.get("metadata", {}) + # Defaulting to 'llm_response' if no task is provided + task_name = metadata.get("task", "llm_response") + + # Build tags + tags_list = self._build_tags(task_name) + + if chat_id not in self.chat_traces: + self.log(f"[WARNING] No matching trace found for chat_id: {chat_id}, attempting to re-register.") + # Re-run inlet to register if somehow missing + return await self.inlet(body, user) + + self.chat_traces[chat_id] + + assistant_message = get_last_assistant_message(body["messages"]) + assistant_message_obj = get_last_assistant_message_obj(body["messages"]) + + usage = None + if assistant_message_obj: + info = assistant_message_obj.get("usage", {}) + if isinstance(info, dict): + input_tokens = info.get("prompt_eval_count") or info.get("prompt_tokens") + output_tokens = info.get("eval_count") or info.get("completion_tokens") + if input_tokens is not None and output_tokens is not None: + usage = { + "input": input_tokens, + "output": output_tokens, + "unit": "TOKENS", + } + self.log(f"Usage data extracted: {usage}") + + # Update the trace with complete output information + trace = self.chat_traces[chat_id] + + metadata["type"] = task_name + metadata["interface"] = "open-webui" + + # Create complete trace metadata with all information + complete_trace_metadata = { + **metadata, + "user_id": user.get("email") if user else None, + "session_id": chat_id, + "interface": "open-webui", + "task": task_name, + } + + # Update trace with output and complete metadata + trace.update_trace( + output=assistant_message, + metadata=complete_trace_metadata, + tags=tags_list if tags_list else None, + ) + + # Outlet: Always create LLM generation (this is the LLM response) + # Determine which model value to use based on the use_model_name valve + model_id = self.model_names.get(chat_id, {}).get("id", body.get("model")) + model_name = self.model_names.get(chat_id, {}).get("name", "unknown") + + # Pick primary model identifier based on valve setting + model_value = ( + model_name + if self.valves.use_model_name_instead_of_id_for_generation + else model_id + ) + + # Add both values to metadata regardless of valve setting + metadata["model_id"] = model_id + metadata["model_name"] = model_name + + # Create LLM generation for the response + try: + trace = self.chat_traces[chat_id] + + # Create complete generation metadata + generation_metadata = { + **complete_trace_metadata, + "type": "llm_response", + "model_id": model_id, + "model_name": model_name, + "generation_id": str(uuid.uuid4()), + } + + generation = trace.start_generation( + name=f"llm_response:{str(uuid.uuid4())}", + model=model_value, + input=body["messages"], + output=assistant_message, + metadata=generation_metadata, + ) + + # Update with usage if available + if usage: + generation.update(usage=usage) + + generation.end() + self.log(f"LLM generation completed for chat_id: {chat_id}") + except Exception as e: + self.log(f"Failed to create LLM generation: {e}") + + # Flush data to Langfuse + try: + if self.langfuse: + self.langfuse.flush() + self.log("Langfuse data flushed") + except Exception as e: + self.log(f"Failed to flush Langfuse data: {e}") + + return body diff --git a/openwebui/pipelines/examples/filters/libretranslate_filter_pipeline.py b/openwebui/pipelines/examples/filters/libretranslate_filter_pipeline.py new file mode 100644 index 0000000..39e8c5f --- /dev/null +++ b/openwebui/pipelines/examples/filters/libretranslate_filter_pipeline.py @@ -0,0 +1,141 @@ +from typing import List, Optional +from schemas import OpenAIChatMessage +from pydantic import BaseModel +import requests +import os + +from utils.pipelines.main import get_last_user_message, get_last_assistant_message + + +class Pipeline: + + class Valves(BaseModel): + # List target pipeline ids (models) that this filter will be connected to. + # If you want to connect this filter to all pipelines, you can set pipelines to ["*"] + # e.g. ["llama3:latest", "gpt-3.5-turbo"] + pipelines: List[str] = [] + + # Assign a priority level to the filter pipeline. + # The priority level determines the order in which the filter pipelines are executed. + # The lower the number, the higher the priority. + priority: int = 0 + + # Valves + libretranslate_url: str + + # Source and target languages + # User message will be translated from source_user to target_user + source_user: Optional[str] = "auto" + target_user: Optional[str] = "en" + + # Assistant languages + # Assistant message will be translated from source_assistant to target_assistant + source_assistant: Optional[str] = "en" + target_assistant: Optional[str] = "es" + + def __init__(self): + # Pipeline filters are only compatible with Open WebUI + # You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API. + self.type = "filter" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "libretranslate_filter_pipeline" + self.name = "LibreTranslate Filter" + + # Initialize + self.valves = self.Valves( + **{ + "pipelines": ["*"], # Connect to all pipelines + "libretranslate_url": os.getenv( + "LIBRETRANSLATE_API_BASE_URL", "http://localhost:5000" + ), + } + ) + + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + pass + + def translate(self, text: str, source: str, target: str) -> str: + payload = { + "q": text, + "source": source, + "target": target, + } + + try: + r = requests.post( + f"{self.valves.libretranslate_url}/translate", json=payload + ) + r.raise_for_status() + + data = r.json() + return data["translatedText"] + except Exception as e: + print(f"Error translating text: {e}") + return text + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"inlet:{__name__}") + + messages = body["messages"] + user_message = get_last_user_message(messages) + + print(f"User message: {user_message}") + + # Translate user message + translated_user_message = self.translate( + user_message, + self.valves.source_user, + self.valves.target_user, + ) + + print(f"Translated user message: {translated_user_message}") + + for message in reversed(messages): + if message["role"] == "user": + message["content"] = translated_user_message + break + + body = {**body, "messages": messages} + return body + + async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"outlet:{__name__}") + + messages = body["messages"] + assistant_message = get_last_assistant_message(messages) + + print(f"Assistant message: {assistant_message}") + + # Translate assistant message + translated_assistant_message = self.translate( + assistant_message, + self.valves.source_assistant, + self.valves.target_assistant, + ) + + print(f"Translated assistant message: {translated_assistant_message}") + + for message in reversed(messages): + if message["role"] == "assistant": + message["content"] = translated_assistant_message + break + + body = {**body, "messages": messages} + return body diff --git a/openwebui/pipelines/examples/filters/llm_translate_filter_pipeline.py b/openwebui/pipelines/examples/filters/llm_translate_filter_pipeline.py new file mode 100644 index 0000000..a97067b --- /dev/null +++ b/openwebui/pipelines/examples/filters/llm_translate_filter_pipeline.py @@ -0,0 +1,157 @@ +from typing import List, Optional +from schemas import OpenAIChatMessage +from pydantic import BaseModel +import requests +import os + +from utils.pipelines.main import get_last_user_message, get_last_assistant_message + + +class Pipeline: + class Valves(BaseModel): + # List target pipeline ids (models) that this filter will be connected to. + # If you want to connect this filter to all pipelines, you can set pipelines to ["*"] + # e.g. ["llama3:latest", "gpt-3.5-turbo"] + pipelines: List[str] = [] + + # Assign a priority level to the filter pipeline. + # The priority level determines the order in which the filter pipelines are executed. + # The lower the number, the higher the priority. + priority: int = 0 + + OPENAI_API_BASE_URL: str = "https://api.openai.com/v1" + OPENAI_API_KEY: str = "" + TASK_MODEL: str = "gpt-3.5-turbo" + + # Source and target languages + # User message will be translated from source_user to target_user + source_user: Optional[str] = "auto" + target_user: Optional[str] = "en" + + # Assistant languages + # Assistant message will be translated from source_assistant to target_assistant + source_assistant: Optional[str] = "en" + target_assistant: Optional[str] = "es" + + def __init__(self): + # Pipeline filters are only compatible with Open WebUI + # You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API. + self.type = "filter" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "libretranslate_filter_pipeline" + self.name = "LLM Translate Filter" + + # Initialize + self.valves = self.Valves( + **{ + "pipelines": ["*"], # Connect to all pipelines + "OPENAI_API_KEY": os.getenv( + "OPENAI_API_KEY", "your-openai-api-key-here" + ), + } + ) + + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + pass + + def translate(self, text: str, source: str, target: str) -> str: + headers = {} + headers["Authorization"] = f"Bearer {self.valves.OPENAI_API_KEY}" + headers["Content-Type"] = "application/json" + + payload = { + "messages": [ + { + "role": "system", + "content": f"Translate the following text to {target}. Provide only the translated text and nothing else.", + }, + {"role": "user", "content": text}, + ], + "model": self.valves.TASK_MODEL, + } + print(payload) + + try: + r = requests.post( + url=f"{self.valves.OPENAI_API_BASE_URL}/chat/completions", + json=payload, + headers=headers, + stream=False, + ) + + r.raise_for_status() + response = r.json() + print(response) + return response["choices"][0]["message"]["content"] + except Exception as e: + return f"Error: {e}" + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"inlet:{__name__}") + + messages = body["messages"] + user_message = get_last_user_message(messages) + + print(f"User message: {user_message}") + + # Translate user message + translated_user_message = self.translate( + user_message, + self.valves.source_user, + self.valves.target_user, + ) + + print(f"Translated user message: {translated_user_message}") + + for message in reversed(messages): + if message["role"] == "user": + message["content"] = translated_user_message + break + + body = {**body, "messages": messages} + return body + + async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: + if "title" in body: + return body + + print(f"outlet:{__name__}") + + messages = body["messages"] + assistant_message = get_last_assistant_message(messages) + + print(f"Assistant message: {assistant_message}") + + # Translate assistant message + translated_assistant_message = self.translate( + assistant_message, + self.valves.source_assistant, + self.valves.target_assistant, + ) + + print(f"Translated assistant message: {translated_assistant_message}") + + for message in reversed(messages): + if message["role"] == "assistant": + message["content"] = translated_assistant_message + break + + body = {**body, "messages": messages} + return body diff --git a/openwebui/pipelines/examples/filters/llmguard_prompt_injection_filter_pipeline.py b/openwebui/pipelines/examples/filters/llmguard_prompt_injection_filter_pipeline.py new file mode 100644 index 0000000..b3cd79e --- /dev/null +++ b/openwebui/pipelines/examples/filters/llmguard_prompt_injection_filter_pipeline.py @@ -0,0 +1,81 @@ +""" +title: LLM Guard Filter Pipeline +author: jannikstdl +date: 2024-05-30 +version: 1.0 +license: MIT +description: A pipeline for filtering out potential prompt injections using the LLM Guard library. +requirements: llm-guard +""" + +from typing import List, Optional +from schemas import OpenAIChatMessage +from pydantic import BaseModel +from llm_guard.input_scanners import PromptInjection +from llm_guard.input_scanners.prompt_injection import MatchType +import os + +class Pipeline: + def __init__(self): + # Pipeline filters are only compatible with Open WebUI + # You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API. + self.type = "filter" + + # Optionally, you can set the id and name of the pipeline. + # Assign a unique identifier to the pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + self.id = "llmguard_prompt_injection_filter_pipeline" + self.name = "LLMGuard Prompt Injection Filter" + + class Valves(BaseModel): + # List target pipeline ids (models) that this filter will be connected to. + # If you want to connect this filter to all pipelines, you can set pipelines to ["*"] + # e.g. ["llama3:latest", "gpt-3.5-turbo"] + pipelines: List[str] = [] + + # Assign a priority level to the filter pipeline. + # The priority level determines the order in which the filter pipelines are executed. + # The lower the number, the higher the priority. + priority: int = 0 + + # Initialize + self.valves = Valves( + **{ + "pipelines": ["*"], # Connect to all pipelines + } + ) + + self.model = None + + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + + self.model = PromptInjection(threshold=0.8, match_type=MatchType.FULL) + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + pass + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + # This filter is applied to the form data before it is sent to the OpenAI API. + print(f"inlet:{__name__}") + + user_message = body["messages"][-1]["content"] + + # Filter out prompt injection messages + sanitized_prompt, is_valid, risk_score = self.model.scan(user_message) + + if risk_score > 0.8: + raise Exception("Prompt injection detected") + + return body diff --git a/openwebui/pipelines/examples/filters/mem0_memory_filter_pipeline.py b/openwebui/pipelines/examples/filters/mem0_memory_filter_pipeline.py new file mode 100644 index 0000000..2016c61 --- /dev/null +++ b/openwebui/pipelines/examples/filters/mem0_memory_filter_pipeline.py @@ -0,0 +1,140 @@ +""" +title: Long Term Memory Filter +author: Anton Nilsson +date: 2024-08-23 +version: 1.0 +license: MIT +description: A filter that processes user messages and stores them as long term memory by utilizing the mem0 framework together with qdrant and ollama +requirements: pydantic, ollama, mem0ai +""" + +from typing import List, Optional +from pydantic import BaseModel +import json +from mem0 import Memory +import threading + +class Pipeline: + class Valves(BaseModel): + pipelines: List[str] = [] + priority: int = 0 + + store_cycles: int = 5 # Number of messages from the user before the data is processed and added to the memory + mem_zero_user: str = "user" # Memories belongs to this user, only used by mem0 for internal organization of memories + + # Default values for the mem0 vector store + vector_store_qdrant_name: str = "memories" + vector_store_qdrant_url: str = "host.docker.internal" + vector_store_qdrant_port: int = 6333 + vector_store_qdrant_dims: int = 768 # Need to match the vector dimensions of the embedder model + + # Default values for the mem0 language model + ollama_llm_model: str = "llama3.1:latest" # This model need to exist in ollama + ollama_llm_temperature: float = 0 + ollama_llm_tokens: int = 8000 + ollama_llm_url: str = "http://host.docker.internal:11434" + + # Default values for the mem0 embedding model + ollama_embedder_model: str = "nomic-embed-text:latest" # This model need to exist in ollama + ollama_embedder_url: str = "http://host.docker.internal:11434" + + def __init__(self): + self.type = "filter" + self.name = "Memory Filter" + self.user_messages = [] + self.thread = None + self.valves = self.Valves( + **{ + "pipelines": ["*"], # Connect to all pipelines + } + ) + self.m = self.init_mem_zero() + + async def on_startup(self): + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + print(f"on_shutdown:{__name__}") + pass + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"pipe:{__name__}") + + user = self.valves.mem_zero_user + store_cycles = self.valves.store_cycles + + if isinstance(body, str): + body = json.loads(body) + + all_messages = body["messages"] + last_message = all_messages[-1]["content"] + + self.user_messages.append(last_message) + + if len(self.user_messages) == store_cycles: + + message_text = "" + for message in self.user_messages: + message_text += message + " " + + if self.thread and self.thread.is_alive(): + print("Waiting for previous memory to be done") + self.thread.join() + + self.thread = threading.Thread(target=self.m.add, kwargs={"data":message_text,"user_id":user}) + + print("Text to be processed in to a memory:") + print(message_text) + + self.thread.start() + self.user_messages.clear() + + memories = self.m.search(last_message, user_id=user) + + if(memories): + fetched_memory = memories[0]["memory"] + else: + fetched_memory = "" + + print("Memory added to the context:") + print(fetched_memory) + + if fetched_memory: + all_messages.insert(0, {"role":"system", "content":"This is your inner voice talking, you remember this about the person you chatting with "+str(fetched_memory)}) + + print("Final body to send to the LLM:") + print(body) + + return body + + def init_mem_zero(self): + config = { + "vector_store": { + "provider": "qdrant", + "config": { + "collection_name": self.valves.vector_store_qdrant_name, + "host": self.valves.vector_store_qdrant_url, + "port": self.valves.vector_store_qdrant_port, + "embedding_model_dims": self.valves.vector_store_qdrant_dims, + }, + }, + "llm": { + "provider": "ollama", + "config": { + "model": self.valves.ollama_llm_model, + "temperature": self.valves.ollama_llm_temperature, + "max_tokens": self.valves.ollama_llm_tokens, + "ollama_base_url": self.valves.ollama_llm_url, + }, + }, + "embedder": { + "provider": "ollama", + "config": { + "model": self.valves.ollama_embedder_model, + "ollama_base_url": self.valves.ollama_embedder_url, + }, + }, + } + + return Memory.from_config(config) \ No newline at end of file diff --git a/openwebui/pipelines/examples/filters/opik_filter_pipeline.py b/openwebui/pipelines/examples/filters/opik_filter_pipeline.py new file mode 100644 index 0000000..ab768b0 --- /dev/null +++ b/openwebui/pipelines/examples/filters/opik_filter_pipeline.py @@ -0,0 +1,274 @@ +""" +title: Opik Filter Pipeline +author: open-webui +date: 2025-03-12 +version: 1.0 +license: MIT +description: A filter pipeline that uses Opik for LLM observability. +requirements: opik +""" + +from typing import List, Optional +import os +import uuid +import json + +from pydantic import BaseModel +from opik import Opik + + +def get_last_assistant_message_obj(messages: List[dict]) -> dict: + for message in reversed(messages): + if message["role"] == "assistant": + return message + return {} + + +class Pipeline: + class Valves(BaseModel): + pipelines: List[str] = [] + priority: int = 0 + api_key: Optional[str] = None + workspace: str + project_name: str + host: str + debug: bool = False + + def __init__(self): + self.type = "filter" + self.name = "Opik Filter" + + self.valves = self.Valves( + **{ + "pipelines": ["*"], + "api_key": os.getenv("OPIK_API_KEY", "set_me_for_opik_cloud"), + "workspace": os.getenv("OPIK_WORKSPACE", "default"), + "project_name": os.getenv("OPIK_PROJECT_NAME", "default"), + "host": os.getenv( + "OPIK_URL_OVERRIDE", "https://www.comet.com/opik/api" + ), + "debug": os.getenv("DEBUG_MODE", "false").lower() == "true", + } + ) + + self.opik = None + # Keep track of the trace and the last-created span for each chat_id + self.chat_traces = {} + self.chat_spans = {} + + self.suppressed_logs = set() + + def log(self, message: str, suppress_repeats: bool = False): + """Logs messages to the terminal if debugging is enabled.""" + if self.valves.debug: + if suppress_repeats: + if message in self.suppressed_logs: + return + self.suppressed_logs.add(message) + print(f"[DEBUG] {message}") + + async def on_startup(self): + self.log(f"on_startup triggered for {__name__}") + self.set_opik() + + async def on_shutdown(self): + self.log(f"on_shutdown triggered for {__name__}") + if self.opik: + self.opik.end() + + async def on_valves_updated(self): + self.log("Valves updated, resetting Opik client.") + if self.opik: + self.opik.end() + self.set_opik() + + def set_opik(self): + try: + self.opik = Opik( + project_name=self.valves.project_name, + workspace=self.valves.workspace, + host=self.valves.host, + api_key=self.valves.api_key, + ) + self.opik.auth_check() + self.log("Opik client initialized successfully.") + except Exception as e: + print( + f"Opik error: {e} Please re-enter your Opik credentials in the pipeline settings." + ) + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + """ + Inlet handles the incoming request (usually a user message). + - If no trace exists yet for this chat_id, we create a new trace. + - If a trace does exist, we simply create a new span for the new user message. + """ + if self.valves.debug: + print(f"[DEBUG] Received request: {json.dumps(body, indent=2)}") + + self.log(f"Inlet function called with body: {body} and user: {user}") + + metadata = body.get("metadata", {}) + task = metadata.get("task", "") + + # Skip logging tasks for now + if task: + self.log(f"Skipping {task} task.") + return body + + if "chat_id" not in metadata: + chat_id = str(uuid.uuid4()) # Regular chat messages + self.log(f"Assigned normal chat_id: {chat_id}") + + metadata["chat_id"] = chat_id + body["metadata"] = metadata + else: + chat_id = metadata["chat_id"] + + required_keys = ["model", "messages"] + missing_keys = [key for key in required_keys if key not in body] + if missing_keys: + error_message = ( + f"Error: Missing keys in the request body: {', '.join(missing_keys)}" + ) + self.log(error_message) + raise ValueError(error_message) + + user_email = user.get("email") if user else None + + assert chat_id not in self.chat_traces, ( + f"There shouldn't be a trace already exists for chat_id {chat_id}" + ) + + # Create a new trace and span + self.log(f"Creating new chat trace for chat_id: {chat_id}") + + # Body copy for traces and span + trace_body = body.copy() + span_body = body.copy() + + # Extract metadata from body + metadata = trace_body.pop("metadata", {}) + metadata.update({"chat_id": chat_id, "user_id": user_email}) + + # We don't need the model at the trace level + trace_body.pop("model", None) + + trace_payload = { + "name": f"{__name__}", + "input": trace_body, + "metadata": metadata, + "thread_id": chat_id, + } + + if self.valves.debug: + print(f"[DEBUG] Opik trace request: {json.dumps(trace_payload, indent=2)}") + + trace = self.opik.trace(**trace_payload) + + span_metadata = metadata.copy() + span_metadata.update({"interface": "open-webui"}) + + # Extract the model from body + span_body.pop("model", None) + # We don't need the metadata in the input for the span + span_body.pop("metadata", None) + + # Extract the model and provider from metadata + model = span_metadata.get("model", {}).get("id", None) + provider = span_metadata.get("model", {}).get("owned_by", None) + + span_payload = { + "name": chat_id, + "model": model, + "provider": provider, + "input": span_body, + "metadata": span_metadata, + "type": "llm", + } + + if self.valves.debug: + print(f"[DEBUG] Opik span request: {json.dumps(span_payload, indent=2)}") + + span = trace.span(**span_payload) + + self.chat_traces[chat_id] = trace + self.chat_spans[chat_id] = span + self.log(f"Trace and span objects successfully created for chat_id: {chat_id}") + + return body + + async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: + """ + Outlet handles the response body (usually the assistant message). + It will finalize/end the span created for the user request. + """ + self.log(f"Outlet function called with body: {body}") + + chat_id = body.get("chat_id") + + # If no trace or span exist, attempt to register again + if chat_id not in self.chat_traces or chat_id not in self.chat_spans: + self.log( + f"[WARNING] No matching chat trace found for chat_id: {chat_id}, chat won't be logged." + ) + return body + + trace = self.chat_traces[chat_id] + span = self.chat_spans[chat_id] + + # Body copy for traces and span + trace_body = body.copy() + span_body = body.copy() + + # Get the last assistant message from the conversation + assistant_message_obj = get_last_assistant_message_obj(body["messages"]) + + # Extract usage if available + usage = None + self.log(f"Assistant message obj: {assistant_message_obj}") + if assistant_message_obj: + message_usage = assistant_message_obj.get("usage", {}) + if isinstance(message_usage, dict): + input_tokens = message_usage.get( + "prompt_eval_count" + ) or message_usage.get("prompt_tokens") + output_tokens = message_usage.get("eval_count") or message_usage.get( + "completion_tokens" + ) + if input_tokens is not None and output_tokens is not None: + usage = { + "prompt_tokens": input_tokens, + "completion_tokens": output_tokens, + "total_tokens": input_tokens + output_tokens, + } + self.log(f"Usage data extracted: {usage}") + + # Chat_id is already logged as trace thread + span_body.pop("chat_id", None) + + # End the span with the final assistant message and updated conversation + span_payload = { + "output": span_body, # include the entire conversation + "usage": usage, + } + + if self.valves.debug: + print( + f"[DEBUG] Opik span end request: {json.dumps(span_payload, indent=2)}" + ) + + span.end(**span_payload) + self.log(f"span ended for chat_id: {chat_id}") + + # Chat_id is already logged as trace thread + span_body.pop("chat_id", None) + + # Optionally update the trace with the final assistant output + trace.end(output=trace_body) + + # Force the creation of a new trace and span for the next chat even if they are part of the same thread + del self.chat_traces[chat_id] + del self.chat_spans[chat_id] + + return body diff --git a/openwebui/pipelines/examples/filters/presidio_filter_pipeline.py b/openwebui/pipelines/examples/filters/presidio_filter_pipeline.py new file mode 100644 index 0000000..cca242d --- /dev/null +++ b/openwebui/pipelines/examples/filters/presidio_filter_pipeline.py @@ -0,0 +1,81 @@ +""" +title: Presidio PII Redaction Pipeline +author: justinh-rahb +date: 2024-07-07 +version: 0.1.0 +license: MIT +description: A pipeline for redacting personally identifiable information (PII) using the Presidio library. +requirements: presidio-analyzer, presidio-anonymizer +""" + +import os +from typing import List, Optional +from pydantic import BaseModel +from schemas import OpenAIChatMessage +from presidio_analyzer import AnalyzerEngine +from presidio_anonymizer import AnonymizerEngine +from presidio_anonymizer.entities import OperatorConfig + +class Pipeline: + class Valves(BaseModel): + pipelines: List[str] = ["*"] + priority: int = 0 + enabled_for_admins: bool = False + entities_to_redact: List[str] = [ + "PERSON", "EMAIL_ADDRESS", "PHONE_NUMBER", "US_SSN", + "CREDIT_CARD", "IP_ADDRESS", "US_PASSPORT", "LOCATION", + "DATE_TIME", "NRP", "MEDICAL_LICENSE", "URL" + ] + language: str = "en" + + def __init__(self): + self.type = "filter" + self.name = "Presidio PII Redaction Pipeline" + + self.valves = self.Valves( + **{ + "pipelines": os.getenv("PII_REDACT_PIPELINES", "*").split(","), + "enabled_for_admins": os.getenv("PII_REDACT_ENABLED_FOR_ADMINS", "false").lower() == "true", + "entities_to_redact": os.getenv("PII_REDACT_ENTITIES", ",".join(self.Valves().entities_to_redact)).split(","), + "language": os.getenv("PII_REDACT_LANGUAGE", "en"), + } + ) + + self.analyzer = AnalyzerEngine() + self.anonymizer = AnonymizerEngine() + + async def on_startup(self): + print(f"on_startup:{__name__}") + + async def on_shutdown(self): + print(f"on_shutdown:{__name__}") + + def redact_pii(self, text: str) -> str: + results = self.analyzer.analyze( + text=text, + language=self.valves.language, + entities=self.valves.entities_to_redact + ) + + anonymized_text = self.anonymizer.anonymize( + text=text, + analyzer_results=results, + operators={ + "DEFAULT": OperatorConfig("replace", {"new_value": "[REDACTED]"}) + } + ) + + return anonymized_text.text + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"pipe:{__name__}") + print(body) + print(user) + + if user is None or user.get("role") != "admin" or self.valves.enabled_for_admins: + messages = body.get("messages", []) + for message in messages: + if message.get("role") == "user": + message["content"] = self.redact_pii(message["content"]) + + return body diff --git a/openwebui/pipelines/examples/filters/rate_limit_filter_pipeline.py b/openwebui/pipelines/examples/filters/rate_limit_filter_pipeline.py new file mode 100644 index 0000000..d1e8823 --- /dev/null +++ b/openwebui/pipelines/examples/filters/rate_limit_filter_pipeline.py @@ -0,0 +1,127 @@ +import os +from typing import List, Optional +from pydantic import BaseModel +from schemas import OpenAIChatMessage +import time + + +class Pipeline: + class Valves(BaseModel): + # List target pipeline ids (models) that this filter will be connected to. + # If you want to connect this filter to all pipelines, you can set pipelines to ["*"] + pipelines: List[str] = [] + + # Assign a priority level to the filter pipeline. + # The priority level determines the order in which the filter pipelines are executed. + # The lower the number, the higher the priority. + priority: int = 0 + + # Valves for rate limiting + requests_per_minute: Optional[int] = None + requests_per_hour: Optional[int] = None + sliding_window_limit: Optional[int] = None + sliding_window_minutes: Optional[int] = None + + def __init__(self): + # Pipeline filters are only compatible with Open WebUI + # You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API. + self.type = "filter" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "rate_limit_filter_pipeline" + self.name = "Rate Limit Filter" + + # Initialize rate limits + self.valves = self.Valves( + **{ + "pipelines": os.getenv("RATE_LIMIT_PIPELINES", "*").split(","), + "requests_per_minute": int( + os.getenv("RATE_LIMIT_REQUESTS_PER_MINUTE", 10) + ), + "requests_per_hour": int( + os.getenv("RATE_LIMIT_REQUESTS_PER_HOUR", 1000) + ), + "sliding_window_limit": int( + os.getenv("RATE_LIMIT_SLIDING_WINDOW_LIMIT", 100) + ), + "sliding_window_minutes": int( + os.getenv("RATE_LIMIT_SLIDING_WINDOW_MINUTES", 15) + ), + } + ) + + # Tracking data - user_id -> (timestamps of requests) + self.user_requests = {} + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def prune_requests(self, user_id: str): + """Prune old requests that are outside of the sliding window period.""" + now = time.time() + if user_id in self.user_requests: + self.user_requests[user_id] = [ + req + for req in self.user_requests[user_id] + if ( + (self.valves.requests_per_minute is not None and now - req < 60) + or (self.valves.requests_per_hour is not None and now - req < 3600) + or ( + self.valves.sliding_window_limit is not None + and now - req < self.valves.sliding_window_minutes * 60 + ) + ) + ] + + def log_request(self, user_id: str): + """Log a new request for a user.""" + now = time.time() + if user_id not in self.user_requests: + self.user_requests[user_id] = [] + self.user_requests[user_id].append(now) + + def rate_limited(self, user_id: str) -> bool: + """Check if a user is rate limited.""" + self.prune_requests(user_id) + + user_reqs = self.user_requests.get(user_id, []) + + if self.valves.requests_per_minute is not None: + requests_last_minute = sum(1 for req in user_reqs if time.time() - req < 60) + if requests_last_minute >= self.valves.requests_per_minute: + return True + + if self.valves.requests_per_hour is not None: + requests_last_hour = sum(1 for req in user_reqs if time.time() - req < 3600) + if requests_last_hour >= self.valves.requests_per_hour: + return True + + if self.valves.sliding_window_limit is not None: + requests_in_window = len(user_reqs) + if requests_in_window >= self.valves.sliding_window_limit: + return True + + return False + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + print(f"pipe:{__name__}") + print(body) + print(user) + + if user.get("role", "admin") == "user": + user_id = user["id"] if user and "id" in user else "default_user" + if self.rate_limited(user_id): + raise Exception("Rate limit exceeded. Please try again later.") + + self.log_request(user_id) + return body diff --git a/openwebui/pipelines/examples/pipelines/events_pipeline.py b/openwebui/pipelines/examples/pipelines/events_pipeline.py new file mode 100644 index 0000000..2baece9 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/events_pipeline.py @@ -0,0 +1,83 @@ +from typing import List, Union, Generator, Iterator, Optional +from pprint import pprint +import time + +# Uncomment to disable SSL verification warnings if needed. +# warnings.filterwarnings('ignore', message='Unverified HTTPS request') + + +class Pipeline: + def __init__(self): + self.name = "Pipeline with Status Event" + self.description = ( + "This is a pipeline that demonstrates how to use the status event." + ) + self.debug = True + self.version = "0.1.0" + self.author = "Anthony Durussel" + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup: {__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is shutdown. + print(f"on_shutdown: {__name__}") + pass + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + # This function is called before the OpenAI API request is made. You can modify the form data before it is sent to the OpenAI API. + print(f"inlet: {__name__}") + if self.debug: + print(f"inlet: {__name__} - body:") + pprint(body) + print(f"inlet: {__name__} - user:") + pprint(user) + return body + + async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: + # This function is called after the OpenAI API response is completed. You can modify the messages after they are received from the OpenAI API. + print(f"outlet: {__name__}") + if self.debug: + print(f"outlet: {__name__} - body:") + pprint(body) + print(f"outlet: {__name__} - user:") + pprint(user) + return body + + def pipe( + self, + user_message: str, + model_id: str, + messages: List[dict], + body: dict, + ) -> Union[str, Generator, Iterator]: + print(f"pipe: {__name__}") + + if self.debug: + print(f"pipe: {__name__} - received message from user: {user_message}") + + yield { + "event": { + "type": "status", + "data": { + "description": "Fake Status", + "done": False, + }, + } + } + + time.sleep(5) # Sleep for 5 seconds + + yield f"user_message: {user_message}" + + yield { + "event": { + "type": "status", + "data": { + "description": "", + "done": True, + }, + } + } diff --git a/openwebui/pipelines/examples/pipelines/integrations/applescript_pipeline.py b/openwebui/pipelines/examples/pipelines/integrations/applescript_pipeline.py new file mode 100644 index 0000000..bc6266f --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/integrations/applescript_pipeline.py @@ -0,0 +1,89 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +import requests + + +from subprocess import call + + +class Pipeline: + def __init__(self): + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "applescript_pipeline" + self.name = "AppleScript Pipeline" + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + OLLAMA_BASE_URL = "http://localhost:11434" + MODEL = "llama3" + + if body.get("title", False): + print("Title Generation") + return "AppleScript Pipeline" + else: + if "user" in body: + print("######################################") + print(f'# User: {body["user"]["name"]} ({body["user"]["id"]})') + print(f"# Message: {user_message}") + print("######################################") + + commands = user_message.split(" ") + + if commands[0] == "volume": + + try: + commands[1] = int(commands[1]) + if 0 <= commands[1] <= 100: + call( + [f"osascript -e 'set volume output volume {commands[1]}'"], + shell=True, + ) + except: + pass + + payload = { + "model": MODEL, + "messages": [ + { + "role": "system", + "content": f"You are an agent of the AppleScript Pipeline. You have the power to control the volume of the system.", + }, + {"role": "user", "content": user_message}, + ], + "stream": body["stream"], + } + + try: + r = requests.post( + url=f"{OLLAMA_BASE_URL}/v1/chat/completions", + json=payload, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/integrations/dify_pipeline.py b/openwebui/pipelines/examples/pipelines/integrations/dify_pipeline.py new file mode 100644 index 0000000..86e412a --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/integrations/dify_pipeline.py @@ -0,0 +1,84 @@ +from typing import List, Union, Generator, Iterator, Optional +from pprint import pprint +import requests, json, warnings + +# Uncomment to disable SSL verification warnings if needed. +# warnings.filterwarnings('ignore', message='Unverified HTTPS request') + +class Pipeline: + def __init__(self): + self.name = "Dify Agent Pipeline" + self.api_url = "http://dify.hostname/v1/workflows/run" # Set correct hostname + self.api_key = "app-dify-key" # Insert your actual API key here.v + self.api_request_stream = True # Dify support stream + self.verify_ssl = True + self.debug = False + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup: {__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is shutdown. + print(f"on_shutdown: {__name__}") + pass + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + # This function is called before the OpenAI API request is made. You can modify the form data before it is sent to the OpenAI API. + print(f"inlet: {__name__}") + if self.debug: + print(f"inlet: {__name__} - body:") + pprint(body) + print(f"inlet: {__name__} - user:") + pprint(user) + return body + + async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: + # This function is called after the OpenAI API response is completed. You can modify the messages after they are received from the OpenAI API. + print(f"outlet: {__name__}") + if self.debug: + print(f"outlet: {__name__} - body:") + pprint(body) + print(f"outlet: {__name__} - user:") + pprint(user) + return body + + def pipe(self, user_message: str, model_id: str, messages: List[dict], body: dict) -> Union[str, Generator, Iterator]: + print(f"pipe: {__name__}") + + if self.debug: + print(f"pipe: {__name__} - received message from user: {user_message}") + + # Set reponse mode Dify API parameter + if self.api_request_stream is True: + response_mode = "streaming" + else: + response_mode = "blocking" + + # This function triggers the workflow using the specified API. + headers = { + 'Authorization': f'Bearer {self.api_key}', + 'Content-Type': 'application/json' + } + data = { + "inputs": {"prompt": user_message}, + "response_mode": response_mode, + "user": body["user"]["email"] + } + + response = requests.post(self.api_url, headers=headers, json=data, stream=self.api_request_stream, verify=self.verify_ssl) + if response.status_code == 200: + # Process and yield each chunk from the response + for line in response.iter_lines(): + if line: + try: + # Remove 'data: ' prefix and parse JSON + json_data = json.loads(line.decode('utf-8').replace('data: ', '')) + # Extract and yield only the 'text' field from the nested 'data' object + if 'data' in json_data and 'text' in json_data['data']: + yield json_data['data']['text'] + except json.JSONDecodeError: + print(f"Failed to parse JSON: {line}") + else: + yield f"Workflow request failed with status code: {response.status_code}" diff --git a/openwebui/pipelines/examples/pipelines/integrations/flowise_pipeline.py b/openwebui/pipelines/examples/pipelines/integrations/flowise_pipeline.py new file mode 100644 index 0000000..d57d1bd --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/integrations/flowise_pipeline.py @@ -0,0 +1,428 @@ +""" +title: FlowiseAI Integration +author: Eric Zavesky +author_url: https://github.com/ezavesky +git_url: https://github.com/open-webui/pipelines/ +description: Access FlowiseAI endpoints via chat integration +required_open_webui_version: 0.4.3 +requirements: requests,flowise>=1.0.4 +version: 0.4.3 +licence: MIT +""" + +from typing import List, Union, Generator, Iterator, Dict, Optional +from pydantic import BaseModel, Field +import requests +import os +import re +import json +from datetime import datetime +import time +from flowise import Flowise, PredictionData + +from logging import getLogger +logger = getLogger(__name__) +logger.setLevel("DEBUG") + + +class Pipeline: + class Valves(BaseModel): + FLOWISE_API_KEY: str = Field(default="", description="FlowiseAI API key (from Bearer key, e.g. QMknVTFTB40Pk23n6KIVRgdB7va2o-Xlx73zEfpeOu0)") + FLOWISE_BASE_URL: str = Field(default="", description="FlowiseAI base URL (e.g. http://localhost:3000 (URL before '/api/v1/prediction'))") + RATE_LIMIT: int = Field(default=5, description="Rate limit for the pipeline (ops/minute)") + + FLOW_0_ENABLED: Optional[bool] = Field(default=False, description="Flow 0 Enabled (make this flow available for use)") + FLOW_0_ID: Optional[str] = Field(default=None, description="Flow 0 ID (the flow GUID, e.g. b06d97f5-da14-4d29-81bd-8533261b6c88)") + FLOW_0_NAME: Optional[str] = Field(default=None, description="Flow 0 Name (human-readable flow name, no special characters, e.g. news or stock-reader)") + + FLOW_1_ENABLED: Optional[bool] = Field(default=False, description="Flow 1 Enabled (make this flow available for use)") + FLOW_1_ID: Optional[str] = Field(default=None, description="Flow 1 ID (the flow GUID, e.g. b06d97f5-da14-4d29-81bd-8533261b6c88)") + FLOW_1_NAME: Optional[str] = Field(default=None, description="Flow 1 Name (human-readable flwo name, no special characters, e.g. news or stock-reader)") + + FLOW_2_ENABLED: Optional[bool] = Field(default=False, description="Flow 2 Enabled (make this flow available for use)") + FLOW_2_ID: Optional[str] = Field(default=None, description="Flow 2 ID (the flow GUID, e.g. b06d97f5-da14-4d29-81bd-8533261b6c88)") + FLOW_2_NAME: Optional[str] = Field(default=None, description="Flow 2 Name (human-readable flow name, no special characters, e.g. news or stock-reader)") + + FLOW_3_ENABLED: Optional[bool] = Field(default=False, description="Flow 3 Enabled (make this flow available for use)") + FLOW_3_ID: Optional[str] = Field(default=None, description="Flow 3 ID (the flow GUID, e.g. b06d97f5-da14-4d29-81bd-8533261b6c88)") + FLOW_3_NAME: Optional[str] = Field(default=None, description="Flow 3 Name (human-readable flow name, no special characters, e.g. news or stock-reader)") + + FLOW_4_ENABLED: Optional[bool] = Field(default=False, description="Flow 4 Enabled (make this flow available for use)") + FLOW_4_ID: Optional[str] = Field(default=None, description="Flow 4 ID (the flow GUID, e.g. b06d97f5-da14-4d29-81bd-8533261b6c88)") + FLOW_4_NAME: Optional[str] = Field(default=None, description="Flow 4 Name (human-readable flow name, no special characters, e.g. news or stock-reader)") + + FLOW_5_ENABLED: Optional[bool] = Field(default=False, description="Flow 5 Enabled (make this flow available for use)") + FLOW_5_ID: Optional[str] = Field(default=None, description="Flow 5 ID (the flow GUID, e.g. b06d97f5-da14-4d29-81bd-8533261b6c88)") + FLOW_5_NAME: Optional[str] = Field(default=None, description="Flow 5 Name (human-readable flow name, no special characters, e.g. news or stock-reader)") + + FLOW_6_ENABLED: Optional[bool] = Field(default=False, description="Flow 6 Enabled (make this flow available for use)") + FLOW_6_ID: Optional[str] = Field(default=None, description="Flow 6 ID (the flow GUID, e.g. b06d97f5-da14-4d29-81bd-8533261b6c88)") + FLOW_6_NAME: Optional[str] = Field(default=None, description="Flow 6 Name (human-readable flow name, no special characters, e.g. news or stock-reader)") + + FLOW_7_ENABLED: Optional[bool] = Field(default=False, description="Flow 7 Enabled (make this flow available for use)") + FLOW_7_ID: Optional[str] = Field(default=None, description="Flow 7 ID (the flow GUID, e.g. b06d97f5-da14-4d29-81bd-8533261b6c88)") + FLOW_7_NAME: Optional[str] = Field(default=None, description="Flow 7 Name (human-readable flow name, no special characters, e.g. news or stock-reader)") + + FLOW_8_ENABLED: Optional[bool] = Field(default=False, description="Flow 8 Enabled (make this flow available for use)") + FLOW_8_ID: Optional[str] = Field(default=None, description="Flow 8 ID (the flow GUID, e.g. b06d97f5-da14-4d29-81bd-8533261b6c88)") + FLOW_8_NAME: Optional[str] = Field(default=None, description="Flow 8 Name (human-readable flow name, no special characters, e.g. news or stock-reader)") + + FLOW_9_ENABLED: Optional[bool] = Field(default=False, description="Flow 9 Enabled (make this flow available for use)") + FLOW_9_ID: Optional[str] = Field(default=None, description="Flow 9 ID (the flow GUID, e.g. b06d97f5-da14-4d29-81bd-8533261b6c88)") + FLOW_9_NAME: Optional[str] = Field(default=None, description="Flow 9 Name (human-readable flow name, no special characters, e.g. news or stock-reader)") + + + + def __init__(self): + self.name = "FlowiseAI Pipeline" + + # Initialize valve parameters from environment variables + self.valves = self.Valves( + **{k: os.getenv(k, v.default) for k, v in self.Valves.model_fields.items()} + ) + + # Build flow mapping for faster lookup + self.flows = {} + self.update_flows() + + def get_flow_details(self, flow_id: str) -> Optional[dict]: + """ + Fetch flow details from the FlowiseAI API + + Args: + flow_id (str): The ID of the flow to fetch + + Returns: + Optional[dict]: Flow details if successful, None if failed + """ + try: + api_url = f"{self.valves.FLOWISE_BASE_URL.rstrip('/')}/api/v1/chatflows/{flow_id}" + headers = {"Authorization": f"Bearer {self.valves.FLOWISE_API_KEY}"} + + response = requests.get(api_url, headers=headers) + + if response.status_code == 200: + data = response.json() + return data + else: + logger.error(f"Error fetching flow details: Status {response.status_code}") + return None + + except Exception as e: + logger.error(f"Error fetching flow details: {str(e)}") + return None + + def update_flows(self): + """Update the flows dictionary based on the current valve settings""" + self.flows = {} + # Iterate through each flow + for i in range(20): # Support up to 20 flows + enabled_name = f"FLOW_{i}_ENABLED" + if not hasattr(self.valves, enabled_name): # sequential numbering + break + enabled = getattr(self.valves, f"FLOW_{i}_ENABLED", False) + flow_id = getattr(self.valves, f"FLOW_{i}_ID", None) + flow_name = getattr(self.valves, f"FLOW_{i}_NAME", None) + + if enabled and flow_id and flow_name: + # Fetch flow details from API + flow_details = self.get_flow_details(flow_id) + api_name = flow_details.get('name', 'Unknown') if flow_details else 'Unknown' + + # Store both names in the flows dictionary + self.flows[flow_name.lower()] = { + 'id': flow_id, + 'brief_name': flow_name, + 'api_name': api_name + } + + logger.info(f"Updated flows: {[{k: v['api_name']} for k, v in self.flows.items()]}") + + async def on_startup(self): + """Called when the server is started""" + logger.debug(f"on_startup:{self.name}") + self.update_flows() + + async def on_shutdown(self): + """Called when the server is stopped""" + logger.debug(f"on_shutdown:{self.name}") + + async def on_valves_updated(self) -> None: + """Called when valves are updated""" + logger.debug(f"on_valves_updated:{self.name}") + self.update_flows() + + def rate_check(self, dt_start: datetime) -> bool: + """ + Check time, sleep if not enough time has passed for rate + + Args: + dt_start (datetime): Start time of the operation + Returns: + bool: True if sleep was done + """ + dt_end = datetime.now() + time_diff = (dt_end - dt_start).total_seconds() + time_buffer = (1 / self.valves.RATE_LIMIT) + if time_diff >= time_buffer: # no need to sleep + return False + time.sleep(time_buffer - time_diff) + return True + + def parse_user_input(self, user_message: str) -> tuple[str, str]: + """ + Parse the user message to extract flow name and query + + Format expected: @flow_name: query + + Args: + user_message (str): User's input message + + Returns: + tuple[str, str]: Flow name and query + """ + # Match pattern flow_name: query + pattern = r"^([^:]+):\s*(.+)$" + match = re.match(pattern, user_message.strip()) + + if not match: + return None, user_message + + flow_name = match.group(1).strip().lower() + query = match.group(2).strip() + + date_now = datetime.now().strftime("%Y-%m-%d") + time_now = datetime.now().strftime("%H:%M:%S") + query = f"{query}; today's date is {date_now} and the current time is {time_now}" + + return flow_name, query + + def pipe( + self, + user_message: str, + model_id: str, + messages: List[dict], + body: dict + ) -> Union[str, Generator, Iterator]: + """ + Main pipeline function. Calls a specified FlowiseAI flow with the provided query. + + Format expected: @flow_name: query + If no flow is specified, a list of available flows will be returned. + """ + logger.debug(f"pipe:{self.name}") + + dt_start = datetime.now() + streaming = body.get("stream", False) + logger.warning(f"Stream: {streaming}") + context = "" + + # Check if we have valid API configuration + if not self.valves.FLOWISE_API_KEY or not self.valves.FLOWISE_BASE_URL: + error_msg = "FlowiseAI configuration missing. Please set FLOWISE_API_KEY and FLOWISE_BASE_URL valves." + if streaming: + yield error_msg + else: + return error_msg + + # Parse the user message to extract flow name and query + flow_name, query = self.parse_user_input(user_message) + + # If no flow specified or invalid flow, list available flows + if flow_name is None or flow_name not in self.flows: + available_flows = list(self.flows.keys()) + if not available_flows: + no_flows_msg = "No flows configured. Enable at least one FLOW_X_ENABLED valve and set its ID and NAME." + if streaming: + yield no_flows_msg + else: + return no_flows_msg + + flows_list = "\n".join([f"- flow_name: {flow} (description:{self.flows[flow]['api_name']})" for flow in available_flows]) + help_msg = f"Please specify a flow using the format: : \n\nAvailable flows:\n{flows_list}" + + if flow_name is None: + help_msg = "No flow specified. " + help_msg + else: + help_msg = f"Invalid flow '{flow_name}'. " + help_msg + + if streaming: + yield help_msg + return + else: + return help_msg + + # Get the flow ID from the map + flow_id = self.flows[flow_name]['id'] + + if streaming: + yield from self.stream_retrieve(flow_id, flow_name, query, dt_start) + else: + for chunk in self.static_retrieve(flow_id, flow_name, query, dt_start): + context += chunk + return context if context else "No response from FlowiseAI" + + def stream_retrieve( + self, flow_id: str, flow_name: str, query: str, dt_start: datetime + ) -> Generator: + """ + Stream responses from FlowiseAI using the official client library. + + Args: + flow_id (str): The ID of the flow to call + flow_name (str): The name of the flow (for logging) + query (str): The user's query + dt_start (datetime): Start time for rate limiting + + Returns: + Generator: Response chunks for streaming + """ + if not query: + yield "Query is empty. Please provide a question or prompt for the flow." + return + + try: + logger.info(f"Streaming from FlowiseAI flow '{flow_name}' with query: {query}") + + # Rate limiting check + self.rate_check(dt_start) + + # Initialize Flowise client with API configuration + client = Flowise( + base_url=self.valves.FLOWISE_BASE_URL.rstrip('/'), + api_key=self.valves.FLOWISE_API_KEY + ) + + # Create streaming prediction request + completion = client.create_prediction( + PredictionData( + chatflowId=flow_id, + question=query, + streaming=True + ) + ) + + except Exception as e: + error_msg = f"Error streaming from FlowiseAI: {str(e)}" + logger.error(error_msg) + yield error_msg + + idx_last_update = 0 + yield f"Analysis started... {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n" + + # Process each streamed chunk + for chunk in completion: + try: + if isinstance(chunk, str): + chunk = json.loads(chunk) + except Exception as e: + # If chunk is not a string, it's already a dictionary + pass + + try: + if isinstance(chunk, dict): + # Expected format: {event: "token", data: "content"} + if "event" in chunk: + if ((chunk["event"] in ["start", "update", "agentReasoning"]) and + ("data" in chunk) and (isinstance(chunk["data"], list))): + for data_update in chunk["data"][idx_last_update:]: + # e.g. {"event":"start","data":[{"agentName":"Perspective Explorer","messages":["... + idx_last_update += 1 + yield "\n---\n" + yield f"\n__Reasoning: {data_update['agentName']} ({datetime.now().strftime('%Y-%m-%d %H:%M:%S')})__\n\n" + for message in data_update["messages"]: + yield message # yield message for each agent update + elif chunk["event"] == "end": + # {"event":"end","data":"[DONE]"} + yield "\n---\n" + yield f"\nAnalysis complete. ({datetime.now().strftime('%Y-%m-%d %H:%M:%S')})\n\n" + elif chunk["event"] == "token": + # do nothing, this is the flat output of the flow (final) + pass + elif "error" in chunk: + error_msg = f"Error from FlowiseAI: {chunk['error']}" + logger.error(error_msg) + yield error_msg + else: + # If chunk format is unexpected, yield as is + yield str(chunk) + except Exception as e: + logger.error(f"Error processing chunk: {str(e)}") + yield f"\nUnusual Response Chunk: ({datetime.now().strftime('%Y-%m-%d %H:%M:%S')})\n{str(e)}\n" + yield f"\n---\n" + yield str(chunk) + + return + + def static_retrieve( + self, flow_id: str, flow_name: str, query: str, dt_start: datetime + ) -> Generator: + """ + Call the FlowiseAI endpoint with the specified flow ID and query using REST API. + + Args: + flow_id (str): The ID of the flow to call + flow_name (str): The name of the flow (for logging) + query (str): The user's query + dt_start (datetime): Start time for rate limiting + + Returns: + Generator: Response chunks for non-streaming requests + """ + if not query: + yield "Query is empty. Please provide a question or prompt for the flow." + return + + api_url = f"{self.valves.FLOWISE_BASE_URL.rstrip('/')}/api/v1/prediction/{flow_id}" + headers = {"Authorization": f"Bearer {self.valves.FLOWISE_API_KEY}"} + + payload = { + "question": query, + } + + try: + logger.info(f"Calling FlowiseAI flow '{flow_name}' with query: {query}") + + # Rate limiting check + self.rate_check(dt_start) + + response = requests.post(api_url, headers=headers, json=payload) + + if response.status_code != 200: + error_msg = f"Error from FlowiseAI: Status {response.status_code}" + logger.error(f"{error_msg} - {response.text}") + yield error_msg + return + + try: + result = response.json() + + # Format might vary based on flow configuration + # Try common response formats + if isinstance(result, dict): + if "text" in result: + yield result["text"] + elif "answer" in result: + yield result["answer"] + elif "response" in result: + yield result["response"] + elif "result" in result: + yield result["result"] + else: + # If no standard field found, return full JSON as string + yield f"```json\n{json.dumps(result, indent=2)}\n```" + elif isinstance(result, str): + yield result + else: + yield f"```json\n{json.dumps(result, indent=2)}\n```" + + except json.JSONDecodeError: + # If not JSON, return the raw text + yield response.text + + except Exception as e: + error_msg = f"Error calling FlowiseAI: {str(e)}" + logger.error(error_msg) + yield error_msg + + return \ No newline at end of file diff --git a/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/README.md b/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/README.md new file mode 100644 index 0000000..8d2cca6 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/README.md @@ -0,0 +1,28 @@ +# Example of langgraph integration +## Python version: 3.11 +## Feature +1. Using langgraph stream writer and custom mode of stream to integrate langgraph with open webui pipeline. +2. Support \ block display. +## Prerequirement +Install the open webui pipeline. +You can follow the docs : https://docs.openwebui.com/pipelines/#-quick-start-with-docker + +## Usage +### 1. Upload pipeline file +Upload `langgraph_stream_pipeline.py` to the open webui pipeline. + +### 2. Enable the uploaded pipeline +Properly set up your langgraph api url. + +And choose **"LangGraph stream"** as your model. + +### 2. Install dependencies +Under the folder `pipelines/examples/pipelines/integrations/langgraph_pipeline`, run command below : +``` +pip install -r requirements.txt +``` +### 3. Start langgraph api server +Run command below : +``` +uvicorn langgraph_example:app --reload +``` \ No newline at end of file diff --git a/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/langgraph_example.py b/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/langgraph_example.py new file mode 100644 index 0000000..6ae57a2 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/langgraph_example.py @@ -0,0 +1,166 @@ +""" +title: Langgraph stream integration +author: bartonzzx +author_url: https://github.com/bartonzzx +git_url: +description: Integrate langgraph with open webui pipeline +required_open_webui_version: 0.4.3 +requirements: none +version: 0.4.3 +licence: MIT +""" + + +import os +import json +import getpass +from typing import Annotated, Literal +from typing_extensions import TypedDict + +from fastapi import FastAPI +from fastapi.responses import StreamingResponse + +from langgraph.graph import StateGraph, START, END +from langgraph.graph.message import add_messages +from langchain_openai import ChatOpenAI +from langgraph.config import get_stream_writer + + +''' +Define LLM API key +''' +def _set_env(var: str): + if not os.environ.get(var): + os.environ[var] = getpass.getpass(f"{var}: ") + + +_set_env("OPENAI_API_KEY") + + +''' +Define Langgraph +''' +def generate_custom_stream(type: Literal["think","normal"], content: str): + content = "\n"+content+"\n" + custom_stream_writer = get_stream_writer() + return custom_stream_writer({type:content}) + +class State(TypedDict): + messages: Annotated[list, add_messages] + +llm = ChatOpenAI(model="gpt-3.5-turbo") + +def chatbot(state: State): + think_response = llm.invoke(["Please reasoning:"] + state["messages"]) + normal_response = llm.invoke(state["messages"]) + generate_custom_stream("think", think_response.content) + generate_custom_stream("normal", normal_response.content) + return {"messages": [normal_response]} + +# Define graph +graph_builder = StateGraph(State) + +# Define nodes +graph_builder.add_node("chatbot", chatbot) +graph_builder.add_edge("chatbot", END) + +# Define edges +graph_builder.add_edge(START, "chatbot") + +# Compile graph +graph = graph_builder.compile() + + +''' +Define api processing +''' +app = FastAPI( + title="Langgraph API", + description="Langgraph API", + ) + +@app.get("/test") +async def test(): + return {"message": "Hello World"} + + +@app.post("/stream") +async def stream(inputs: State): + async def event_stream(): + try: + stream_start_msg = { + 'choices': + [ + { + 'delta': {}, + 'finish_reason': None + } + ] + } + + # Stream start + yield f"data: {json.dumps(stream_start_msg)}\n\n" + + # Processing langgraph stream response with block support + async for event in graph.astream(input=inputs, stream_mode="custom"): + print(event) + think_content = event.get("think", None) + normal_content = event.get("normal", None) + + think_msg = { + 'choices': + [ + { + 'delta': + { + 'reasoning_content': think_content, + }, + 'finish_reason': None + } + ] + } + + normal_msg = { + 'choices': + [ + { + 'delta': + { + 'content': normal_content, + }, + 'finish_reason': None + } + ] + } + + yield f"data: {json.dumps(think_msg)}\n\n" + yield f"data: {json.dumps(normal_msg)}\n\n" + + # End of the stream + stream_end_msg = { + 'choices': [ + { + 'delta': {}, + 'finish_reason': 'stop' + } + ] + } + yield f"data: {json.dumps(stream_end_msg)}\n\n" + + except Exception as e: + # Simply print the error information + print(f"An error occurred: {e}") + + return StreamingResponse( + event_stream(), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + } + ) + +if __name__ == "__main__": + import uvicorn + + uvicorn.run(app, host="0.0.0.0", port=9000) \ No newline at end of file diff --git a/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/langgraph_stream_pipeline.py b/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/langgraph_stream_pipeline.py new file mode 100644 index 0000000..65da0df --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/langgraph_stream_pipeline.py @@ -0,0 +1,63 @@ +""" +title: Langgraph stream integration +author: bartonzzx +author_url: https://github.com/bartonzzx +git_url: +description: Integrate langgraph with open webui pipeline +required_open_webui_version: 0.4.3 +requirements: none +version: 0.4.3 +licence: MIT +""" + + +import os +import requests +from pydantic import BaseModel, Field +from typing import List, Union, Generator, Iterator + + +class Pipeline: + class Valves(BaseModel): + API_URL: str = Field(default="http://127.0.0.1:9000/stream", description="Langgraph API URL") + + def __init__(self): + self.id = "LangGraph stream" + self.name = "LangGraph stream" + # Initialize valve paramaters + self.valves = self.Valves( + **{k: os.getenv(k, v.default) for k, v in self.Valves.model_fields.items()} + ) + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup: {__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is shutdown. + print(f"on_shutdown: {__name__}") + pass + + def pipe( + self, + user_message: str, + model_id: str, + messages: List[dict], + body: dict + ) -> Union[str, Generator, Iterator]: + + data = { + "messages": [[msg['role'], msg['content']] for msg in messages], + } + + headers = { + 'accept': 'text/event-stream', + 'Content-Type': 'application/json', + } + + response = requests.post(self.valves.API_URL, json=data, headers=headers, stream=True) + + response.raise_for_status() + + return response.iter_lines() \ No newline at end of file diff --git a/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/requirements.txt b/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/requirements.txt new file mode 100644 index 0000000..fc122d6 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/integrations/langgraph_pipeline/requirements.txt @@ -0,0 +1,40 @@ +annotated-types==0.7.0 +anyio==4.8.0 +certifi==2025.1.31 +charset-normalizer==3.4.1 +click==8.1.8 +distro==1.9.0 +fastapi==0.115.11 +h11==0.14.0 +httpcore==1.0.7 +httpx==0.28.1 +idna==3.10 +jiter==0.9.0 +jsonpatch==1.33 +jsonpointer==3.0.0 +langchain-core==0.3.45 +langchain-openai==0.3.8 +langgraph==0.3.11 +langgraph-checkpoint==2.0.20 +langgraph-prebuilt==0.1.3 +langgraph-sdk==0.1.57 +langsmith==0.3.15 +msgpack==1.1.0 +openai==1.66.3 +orjson==3.10.15 +packaging==24.2 +pydantic==2.10.6 +pydantic_core==2.27.2 +PyYAML==6.0.2 +regex==2024.11.6 +requests==2.32.3 +requests-toolbelt==1.0.0 +sniffio==1.3.1 +starlette==0.46.1 +tenacity==9.0.0 +tiktoken==0.9.0 +tqdm==4.67.1 +typing_extensions==4.12.2 +urllib3==2.3.0 +uvicorn==0.34.0 +zstandard==0.23.0 diff --git a/openwebui/pipelines/examples/pipelines/integrations/n8n_pipeline.py b/openwebui/pipelines/examples/pipelines/integrations/n8n_pipeline.py new file mode 100644 index 0000000..51e0e4d --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/integrations/n8n_pipeline.py @@ -0,0 +1,79 @@ +from typing import List, Union, Generator, Iterator, Optional +from pprint import pprint +import requests, json, warnings + +# Uncomment to disable SSL verification warnings if needed. +# warnings.filterwarnings('ignore', message='Unverified HTTPS request') + +class Pipeline: + def __init__(self): + self.name = "N8N Agent Pipeline" + self.api_url = "https://n8n.host/webhook/myflow" # Set correct hostname + self.api_key = "" # Insert your actual API key here + self.verify_ssl = True + self.debug = False + # Please note that N8N do not support stream reponses + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup: {__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is shutdown. + print(f"on_shutdown: {__name__}") + pass + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + # This function is called before the OpenAI API request is made. You can modify the form data before it is sent to the OpenAI API. + print(f"inlet: {__name__}") + if self.debug: + print(f"inlet: {__name__} - body:") + pprint(body) + print(f"inlet: {__name__} - user:") + pprint(user) + return body + + async def outlet(self, body: dict, user: Optional[dict] = None) -> dict: + # This function is called after the OpenAI API response is completed. You can modify the messages after they are received from the OpenAI API. + print(f"outlet: {__name__}") + if self.debug: + print(f"outlet: {__name__} - body:") + pprint(body) + print(f"outlet: {__name__} - user:") + pprint(user) + return body + + def pipe(self, user_message: str, model_id: str, messages: List[dict], body: dict) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe: {__name__}") + + if self.debug: + print(f"pipe: {__name__} - received message from user: {user_message}") + + # This function triggers the workflow using the specified API. + headers = { + 'Authorization': f'Bearer {self.api_key}', + 'Content-Type': 'application/json' + } + data = { + "inputs": {"prompt": user_message}, + "user": body["user"]["email"] + } + + response = requests.post(self.api_url, headers=headers, json=data, verify=self.verify_ssl) + if response.status_code == 200: + # Process and yield each chunk from the response + try: + for line in response.iter_lines(): + if line: + # Decode each line assuming UTF-8 encoding and directly parse it as JSON + json_data = json.loads(line.decode('utf-8')) + # Check if 'output' exists in json_data and yield it + if 'output' in json_data: + yield json_data['output'] + except json.JSONDecodeError as e: + print(f"Failed to parse JSON from line. Error: {str(e)}") + yield "Error in JSON parsing." + else: + yield f"Workflow request failed with status code: {response.status_code}" diff --git a/openwebui/pipelines/examples/pipelines/integrations/python_code_pipeline.py b/openwebui/pipelines/examples/pipelines/integrations/python_code_pipeline.py new file mode 100644 index 0000000..938d984 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/integrations/python_code_pipeline.py @@ -0,0 +1,50 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +import subprocess + + +class Pipeline: + def __init__(self): + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "python_code_pipeline" + self.name = "Python Code Pipeline" + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def execute_python_code(self, code): + try: + result = subprocess.run( + ["python", "-c", code], capture_output=True, text=True, check=True + ) + stdout = result.stdout.strip() + return stdout, result.returncode + except subprocess.CalledProcessError as e: + return e.output.strip(), e.returncode + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + print(messages) + print(user_message) + + if body.get("title", False): + print("Title Generation") + return "Python Code Pipeline" + else: + stdout, return_code = self.execute_python_code(user_message) + return stdout diff --git a/openwebui/pipelines/examples/pipelines/integrations/wikipedia_pipeline.py b/openwebui/pipelines/examples/pipelines/integrations/wikipedia_pipeline.py new file mode 100644 index 0000000..15eb797 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/integrations/wikipedia_pipeline.py @@ -0,0 +1,218 @@ +""" +title: Wikipedia Article Retrieval +author: Unknown +author_url: Unknown +git_url: https://github.com/open-webui/pipelines/blob/main/examples/pipelines/integrations/wikipedia_pipeline.py +description: Wikipedia Search and Return +required_open_webui_version: 0.4.3 +requirements: wikipedia +version: 0.4.3 +licence: MIT +""" + + +from typing import List, Union, Generator, Iterator +from pydantic import BaseModel, Field +import wikipedia +import requests +import os +from datetime import datetime +import time +import re + +from logging import getLogger +logger = getLogger(__name__) +logger.setLevel("DEBUG") + + +class Pipeline: + class Valves(BaseModel): + # OPENAI_API_KEY: str = Field(default="", description="OpenAI API key") + RATE_LIMIT: int = Field(default=5, description="Rate limit for the pipeline") + WORD_LIMIT: int = Field(default=300, description="Word limit when getting page summary") + WIKIPEDIA_ROOT: str = Field(default="https://en.wikipedia.org/wiki", description="Wikipedia root URL") + + def __init__(self): + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "wiki_pipeline" + self.name = "Wikipedia Pipeline" + + # Initialize valve paramaters + self.valves = self.Valves( + **{k: os.getenv(k, v.default) for k, v in self.Valves.model_fields.items()} + ) + + async def on_startup(self): + # This function is called when the server is started. + logger.debug(f"on_startup:{self.name}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + logger.debug(f"on_shutdown:{self.name}") + pass + + def rate_check(self, dt_start: datetime): + """ + Check time, sleep if not enough time has passed for rate + + Args: + dt_start (datetime): Start time of the operation + Returns: + bool: True if sleep was done + """ + dt_end = datetime.now() + time_diff = (dt_end - dt_start).total_seconds() + time_buffer = (1 / self.valves.RATE_LIMIT) + if time_diff >= time_buffer: # no need to sleep + return False + time.sleep(time_buffer - time_diff) + return True + + def pipe( + self, + user_message: str, + model_id: str, + messages: List[dict], + body: dict + ) -> Union[str, Generator, Iterator]: + """ + Main pipeline function. Performs wikipedia article lookup by query + and returns the summary of the first article. + """ + logger.debug(f"pipe:{self.name}") + + # Check if title generation is requested + # as of 12/28/24, these were standard greetings + if ("broad tags categorizing" in user_message.lower()) \ + or ("Create a concise" in user_message.lower()): + # ## Create a concise, 3-5 word title with + # ## Task:\nGenerate 1-3 broad tags categorizing the main themes + logger.debug(f"Title Generation (aborted): {user_message}") + return "(title generation disabled)" + + logger.info(f"User Message: {user_message}") + # logger.info(f"Messages: {messages}") + # [{'role': 'user', 'content': 'history of ibm'}] + + # logger.info(f"Body: {body}") + # {'stream': True, 'model': 'wikipedia_pipeline', + # 'messages': [{'role': 'user', 'content': 'history of ibm'}], + # 'user': {'name': 'User', 'id': '235a828f-84a3-44a0-b7af-721ee8be6571', + # 'email': 'admin@localhost', 'role': 'admin'}} + + dt_start = datetime.now() + multi_part = False + streaming = body.get("stream", False) + logger.warning(f"Stream: {streaming}") + context = "" + + # examples from https://pypi.org/project/wikipedia/ + # new addition - ability to include multiple topics with a semicolon + for query in user_message.split(';'): + self.rate_check(dt_start) + query = query.strip() + + if multi_part: + if streaming: + yield "---\n" + else: + context += "---\n" + if body.get("stream", True): + yield from self.stream_retrieve(query, dt_start) + else: + for chunk in self.stream_retrieve(query, dt_start): + context += chunk + multi_part = True + + if not streaming: + return context if context else "No information found" + + + def stream_retrieve( + self, query:str, dt_start: datetime, + ) -> Generator: + """ + Retrieve the wikipedia page for the query and return the summary. Return a generator + for streaming responses but can also be iterated for a single response. + """ + + re_query = re.compile(r"[^0-9A-Z]", re.IGNORECASE) + re_rough_word = re.compile(r"[\w]+", re.IGNORECASE) + + titles_found = None + try: + titles_found = wikipedia.search(query) + # r = requests.get( + # f"https://en.wikipedia.org/w/api.php?action=opensearch&search={query}&limit=1&namespace=0&format=json" + # ) + logger.info(f"Query: {query}, Found: {titles_found}") + except Exception as e: + logger.error(f"Search Error: {query} -> {e}") + yield f"Page Search Error: {query}" + + if titles_found is None or not titles_found: # no results + yield f"No information found for '{query}'" + return + + self.rate_check(dt_start) + + # if context: # add separator if multiple topics + # context += "---\n" + try: + title_check = titles_found[0] + wiki_page = wikipedia.page(title_check, auto_suggest=False) # trick! don't auto-suggest + except wikipedia.exceptions.DisambiguationError as e: + str_error = str(e).replace("\n", ", ") + str_error = f"## Disambiguation Error ({query})\n* Status: {str_error}" + logger.error(str_error) + yield str_error + "\n" + return + except wikipedia.exceptions.RedirectError as e: + str_error = str(e).replace("\n", ", ") + str_error = f"## Redirect Error ({query})\n* Status: {str_error}" + logger.error(str_error) + yield str_error + "\n" + return + except Exception as e: + if titles_found: + str_error = f"## Page Retrieve Error ({query})\n* Found Topics (matched '{title_check}') {titles_found}" + logger.error(f"{str_error} -> {e}") + else: + str_error = f"## Page Not Found ({query})\n* Unknown error" + logger.error(f"{str_error} -> {e}") + yield str_error + "\n" + return + + # found a page / section + logger.info(f"Page Sections[{query}]: {wiki_page.sections}") + yield f"## {title_check}\n" + + # flatten internal links + # link_md = [f"[{x}]({self.valves.WIKIPEDIA_ROOT}/{re_query.sub('_', x)})" for x in wiki_page.links[:10]] + # yield "* Links (first 30): " + ",".join(link_md) + "\n" + + # add the textual summary + summary_full = wiki_page.summary + word_positions = [x.start() for x in re_rough_word.finditer(summary_full)] + if len(word_positions) > self.valves.WORD_LIMIT: + yield summary_full[:word_positions[self.valves.WORD_LIMIT]] + "...\n" + else: + yield summary_full + "\n" + + # the more you know! link to further reading + yield "### Learn More" + "\n" + yield f"* [Read more on Wikipedia...]({wiki_page.url})\n" + + # also spit out the related topics from search + link_md = [f"[{x}]({self.valves.WIKIPEDIA_ROOT}/{re_query.sub('_', x)})" for x in titles_found] + yield f"* Related topics: {', '.join(link_md)}\n" + + # throw in the first image for good measure + if wiki_page.images: + yield f"\n![Image: {title_check}]({wiki_page.images[0]})\n" + + return \ No newline at end of file diff --git a/openwebui/pipelines/examples/pipelines/providers/anthropic_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/anthropic_manifold_pipeline.py new file mode 100644 index 0000000..f8a4c67 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/anthropic_manifold_pipeline.py @@ -0,0 +1,293 @@ +""" +title: Anthropic Manifold Pipeline +author: justinh-rahb, sriparashiva +date: 2024-06-20 +version: 1.4 +license: MIT +description: A pipeline for generating text and processing images using the Anthropic API. +requirements: requests, sseclient-py +environment_variables: ANTHROPIC_API_KEY, ANTHROPIC_THINKING_BUDGET_TOKENS, ANTHROPIC_ENABLE_THINKING +""" + +import os +import requests +import json +from typing import List, Union, Generator, Iterator +from pydantic import BaseModel +import sseclient + +from utils.pipelines.main import pop_system_message + +REASONING_EFFORT_BUDGET_TOKEN_MAP = { + "none": None, + "low": 1024, + "medium": 4096, + "high": 16384, + "max": 32768, +} + +# Maximum combined token limit for Claude 3.7 +MAX_COMBINED_TOKENS = 64000 + + +class Pipeline: + class Valves(BaseModel): + ANTHROPIC_API_KEY: str = "" + + def __init__(self): + self.type = "manifold" + self.id = "anthropic" + self.name = "anthropic/" + + self.valves = self.Valves( + **{ + "ANTHROPIC_API_KEY": os.getenv( + "ANTHROPIC_API_KEY", "your-api-key-here" + ), + } + ) + self.url = "https://api.anthropic.com/v1/messages" + self.update_headers() + + def update_headers(self): + self.headers = { + "anthropic-version": "2023-06-01", + "content-type": "application/json", + "x-api-key": self.valves.ANTHROPIC_API_KEY, + } + + def get_anthropic_models(self): + return [ + {"id": "claude-3-haiku-20240307", "name": "claude-3-haiku"}, + {"id": "claude-3-opus-20240229", "name": "claude-3-opus"}, + {"id": "claude-3-sonnet-20240229", "name": "claude-3-sonnet"}, + {"id": "claude-3-5-haiku-20241022", "name": "claude-3.5-haiku"}, + {"id": "claude-3-5-sonnet-20241022", "name": "claude-3.5-sonnet"}, + {"id": "claude-3-7-sonnet-20250219", "name": "claude-3.7-sonnet"}, + {"id": "claude-opus-4-20250514", "name": "claude-4-opus"}, + {"id": "claude-sonnet-4-20250514", "name": "claude-4-sonnet"}, + {"id": "claude-opus-4-1-20250805", "name": "claude-4.1-opus"}, + ] + + def get_thinking_supported_models(self): + """Returns list of model identifiers that support extended thinking""" + return [ + "claude-3-7", + "claude-sonnet-4", + "claude-opus-4" + ] + + async def on_startup(self): + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + self.update_headers() + + def pipelines(self) -> List[dict]: + return self.get_anthropic_models() + + def process_image(self, image_data): + if image_data["url"].startswith("data:image"): + mime_type, base64_data = image_data["url"].split(",", 1) + media_type = mime_type.split(":")[1].split(";")[0] + return { + "type": "image", + "source": { + "type": "base64", + "media_type": media_type, + "data": base64_data, + }, + } + else: + return { + "type": "image", + "source": {"type": "url", "url": image_data["url"]}, + } + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + try: + # Remove unnecessary keys + for key in ["user", "chat_id", "title"]: + body.pop(key, None) + + system_message, messages = pop_system_message(messages) + + processed_messages = [] + image_count = 0 + total_image_size = 0 + + for message in messages: + processed_content = [] + if isinstance(message.get("content"), list): + for item in message["content"]: + if item["type"] == "text": + processed_content.append( + {"type": "text", "text": item["text"]} + ) + elif item["type"] == "image_url": + if image_count >= 5: + raise ValueError( + "Maximum of 5 images per API call exceeded" + ) + + processed_image = self.process_image(item["image_url"]) + processed_content.append(processed_image) + + if processed_image["source"]["type"] == "base64": + image_size = ( + len(processed_image["source"]["data"]) * 3 / 4 + ) + else: + image_size = 0 + + total_image_size += image_size + if total_image_size > 100 * 1024 * 1024: + raise ValueError( + "Total size of images exceeds 100 MB limit" + ) + + image_count += 1 + else: + processed_content = [ + {"type": "text", "text": message.get("content", "")} + ] + + processed_messages.append( + {"role": message["role"], "content": processed_content} + ) + + # Prepare the payload + payload = { + "model": model_id, + "messages": processed_messages, + "max_tokens": body.get("max_tokens", 4096), + "temperature": body.get("temperature", 0.8), + "stop_sequences": body.get("stop", []), + **({"system": str(system_message)} if system_message else {}), + "stream": body.get("stream", False), + } + + # Add optional parameters only if explicitly provided + if "top_k" in body: + payload["top_k"] = body["top_k"] + + # Only include top_p if explicitly set (not both temperature and top_p) + if "top_p" in body: + payload["top_p"] = body["top_p"] + # Remove temperature if top_p is explicitly set + if "temperature" in payload: + del payload["temperature"] + + if body.get("stream", False): + supports_thinking = any(model in model_id for model in self.get_thinking_supported_models()) + reasoning_effort = body.get("reasoning_effort", "none") + budget_tokens = REASONING_EFFORT_BUDGET_TOKEN_MAP.get(reasoning_effort) + + # Allow users to input an integer value representing budget tokens + if ( + not budget_tokens + and reasoning_effort is not None + and reasoning_effort not in REASONING_EFFORT_BUDGET_TOKEN_MAP.keys() + ): + try: + budget_tokens = int(reasoning_effort) + except ValueError as e: + print("Failed to convert reasoning effort to int", e) + budget_tokens = None + + if supports_thinking and budget_tokens: + # Check if the combined tokens (budget_tokens + max_tokens) exceeds the limit + max_tokens = payload.get("max_tokens", 4096) + combined_tokens = budget_tokens + max_tokens + + if combined_tokens > MAX_COMBINED_TOKENS: + error_message = f"Error: Combined tokens (budget_tokens {budget_tokens} + max_tokens {max_tokens} = {combined_tokens}) exceeds the maximum limit of {MAX_COMBINED_TOKENS}" + print(error_message) + return error_message + + payload["max_tokens"] = combined_tokens + payload["thinking"] = { + "type": "enabled", + "budget_tokens": budget_tokens, + } + # Thinking requires temperature 1.0 and does not support top_p, top_k + payload["temperature"] = 1.0 + if "top_k" in payload: + del payload["top_k"] + if "top_p" in payload: + del payload["top_p"] + return self.stream_response(payload) + else: + return self.get_completion(payload) + except Exception as e: + return f"Error: {e}" + + def stream_response(self, payload: dict) -> Generator: + """Used for title and tag generation""" + try: + response = requests.post( + self.url, headers=self.headers, json=payload, stream=True + ) + print(f"{response} for {payload}") + + if response.status_code == 200: + client = sseclient.SSEClient(response) + for event in client.events(): + try: + data = json.loads(event.data) + if data["type"] == "content_block_start": + if data["content_block"]["type"] == "thinking": + yield "" + else: + yield data["content_block"]["text"] + elif data["type"] == "content_block_delta": + if data["delta"]["type"] == "thinking_delta": + yield data["delta"]["thinking"] + elif data["delta"]["type"] == "signature_delta": + yield "\n \n\n" + else: + yield data["delta"]["text"] + elif data["type"] == "message_stop": + break + except json.JSONDecodeError: + print(f"Failed to parse JSON: {event.data}") + yield f"Error: Failed to parse JSON response" + except KeyError as e: + print(f"Unexpected data structure: {e} for payload {payload}") + print(f"Full data: {data}") + yield f"Error: Unexpected data structure: {e}" + else: + error_message = f"Error: {response.status_code} - {response.text}" + print(error_message) + yield error_message + except Exception as e: + error_message = f"Error: {str(e)}" + print(error_message) + yield error_message + + def get_completion(self, payload: dict) -> str: + try: + response = requests.post(self.url, headers=self.headers, json=payload) + print(response, payload) + if response.status_code == 200: + res = response.json() + for content in res["content"]: + if not content.get("text"): + continue + return content["text"] + return "" + else: + error_message = f"Error: {response.status_code} - {response.text}" + print(error_message) + return error_message + except Exception as e: + error_message = f"Error: {str(e)}" + print(error_message) + return error_message diff --git a/openwebui/pipelines/examples/pipelines/providers/aws_bedrock_claude_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/aws_bedrock_claude_pipeline.py new file mode 100644 index 0000000..1a5a028 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/aws_bedrock_claude_pipeline.py @@ -0,0 +1,285 @@ +""" +title: AWS Bedrock Claude Pipeline +author: G-mario +date: 2024-08-18 +version: 1.0 +license: MIT +description: A pipeline for generating text and processing images using the AWS Bedrock API(By Anthropic claude). +requirements: requests, boto3 +environment_variables: AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION_NAME +""" +import base64 +import json +import logging +from io import BytesIO +from typing import List, Union, Generator, Iterator, Optional, Any + +import boto3 + +from pydantic import BaseModel + +import os +import requests + +from utils.pipelines.main import pop_system_message + +REASONING_EFFORT_BUDGET_TOKEN_MAP = { + "none": None, + "low": 1024, + "medium": 4096, + "high": 16384, + "max": 32768, +} + +# Maximum combined token limit for Claude 3.7 +MAX_COMBINED_TOKENS = 64000 + + +class Pipeline: + class Valves(BaseModel): + AWS_ACCESS_KEY: Optional[str] = None + AWS_SECRET_KEY: Optional[str] = None + AWS_REGION_NAME: Optional[str] = None + + def __init__(self): + self.type = "manifold" + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "openai_pipeline" + self.name = "Bedrock: " + + self.valves = self.Valves( + **{ + "AWS_ACCESS_KEY": os.getenv("AWS_ACCESS_KEY", ""), + "AWS_SECRET_KEY": os.getenv("AWS_SECRET_KEY", ""), + "AWS_REGION_NAME": os.getenv( + "AWS_REGION_NAME", os.getenv( + "AWS_REGION", os.getenv("AWS_DEFAULT_REGION", "") + ) + ), + } + ) + + self.update_pipelines() + + def get_thinking_supported_models(self): + """Returns list of model identifiers that support extended thinking""" + return [ + "claude-3-7", + "claude-sonnet-4", + "claude-opus-4" + ] + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + self.update_pipelines() + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + print(f"on_valves_updated:{__name__}") + self.update_pipelines() + + def update_pipelines(self) -> None: + try: + self.bedrock = boto3.client(service_name="bedrock", + aws_access_key_id=self.valves.AWS_ACCESS_KEY, + aws_secret_access_key=self.valves.AWS_SECRET_KEY, + region_name=self.valves.AWS_REGION_NAME) + self.bedrock_runtime = boto3.client(service_name="bedrock-runtime", + aws_access_key_id=self.valves.AWS_ACCESS_KEY, + aws_secret_access_key=self.valves.AWS_SECRET_KEY, + region_name=self.valves.AWS_REGION_NAME) + self.pipelines = self.get_models() + except Exception as e: + print(f"Error: {e}") + self.pipelines = [ + { + "id": "error", + "name": "Could not fetch models from Bedrock, please set up AWS Key/Secret or Instance/Task Role.", + }, + ] + + def get_models(self): + try: + res = [] + response = self.bedrock.list_foundation_models(byProvider='Anthropic') + for model in response['modelSummaries']: + inference_types = model.get('inferenceTypesSupported', []) + if "ON_DEMAND" in inference_types: + res.append({'id': model['modelId'], 'name': model['modelName']}) + elif "INFERENCE_PROFILE" in inference_types: + inferenceProfileId = self.getInferenceProfileId(model['modelArn']) + if inferenceProfileId: + res.append({'id': inferenceProfileId, 'name': model['modelName']}) + + return res + except Exception as e: + print(f"Error: {e}") + return [ + { + "id": "error", + "name": "Could not fetch models from Bedrock, please check permissoin.", + }, + ] + + def getInferenceProfileId(self, modelArn: str) -> str: + response = self.bedrock.list_inference_profiles() + for profile in response.get('inferenceProfileSummaries', []): + for model in profile.get('models', []): + if model.get('modelArn') == modelArn: + return profile['inferenceProfileId'] + return None + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + system_message, messages = pop_system_message(messages) + + logging.info(f"pop_system_message: {json.dumps(messages)}") + + try: + processed_messages = [] + image_count = 0 + for message in messages: + processed_content = [] + if isinstance(message.get("content"), list): + for item in message["content"]: + if item["type"] == "text": + processed_content.append({"text": item["text"]}) + elif item["type"] == "image_url": + if image_count >= 20: + raise ValueError("Maximum of 20 images per API call exceeded") + processed_image = self.process_image(item["image_url"]) + processed_content.append(processed_image) + image_count += 1 + else: + processed_content = [{"text": message.get("content", "")}] + + processed_messages.append({"role": message["role"], "content": processed_content}) + + payload = { + "modelId": model_id, + "messages": processed_messages, + "system": [{'text': system_message["content"] if system_message else 'you are an intelligent ai assistant'}], + "inferenceConfig": { + "temperature": body.get("temperature", 0.5), + "maxTokens": body.get("max_tokens", 4096), + "stopSequences": body.get("stop", []), + }, + "additionalModelRequestFields": {} + } + + # Handle top_p and temperature conflict + if "top_p" in body: + payload["inferenceConfig"]["topP"] = body["top_p"] + # Remove temperature if top_p is explicitly set + if "temperature" in payload["inferenceConfig"]: + del payload["inferenceConfig"]["temperature"] + + # Add top_k if explicitly provided + if "top_k" in body: + payload["additionalModelRequestFields"]["top_k"] = body["top_k"] + else: + # Use default top_k value + payload["additionalModelRequestFields"]["top_k"] = 200 + + if body.get("stream", False): + supports_thinking = any(model in model_id for model in self.get_thinking_supported_models()) + reasoning_effort = body.get("reasoning_effort", "none") + budget_tokens = REASONING_EFFORT_BUDGET_TOKEN_MAP.get(reasoning_effort) + + # Allow users to input an integer value representing budget tokens + if ( + not budget_tokens + and reasoning_effort is not None + and reasoning_effort not in REASONING_EFFORT_BUDGET_TOKEN_MAP.keys() + ): + try: + budget_tokens = int(reasoning_effort) + except ValueError as e: + print("Failed to convert reasoning effort to int", e) + budget_tokens = None + + if supports_thinking and budget_tokens: + # Check if the combined tokens (budget_tokens + max_tokens) exceeds the limit + max_tokens = payload.get("max_tokens", 4096) + combined_tokens = budget_tokens + max_tokens + + if combined_tokens > MAX_COMBINED_TOKENS: + error_message = f"Error: Combined tokens (budget_tokens {budget_tokens} + max_tokens {max_tokens} = {combined_tokens}) exceeds the maximum limit of {MAX_COMBINED_TOKENS}" + print(error_message) + return error_message + + payload["inferenceConfig"]["maxTokens"] = combined_tokens + payload["additionalModelRequestFields"]["thinking"] = { + "type": "enabled", + "budget_tokens": budget_tokens, + } + # Thinking requires temperature 1.0 and does not support top_p, top_k + payload["inferenceConfig"]["temperature"] = 1.0 + if "top_k" in payload["additionalModelRequestFields"]: + del payload["additionalModelRequestFields"]["top_k"] + if "topP" in payload["inferenceConfig"]: + del payload["inferenceConfig"]["topP"] + return self.stream_response(model_id, payload) + else: + return self.get_completion(model_id, payload) + except Exception as e: + return f"Error: {e}" + + def process_image(self, image: str): + img_stream = None + content_type = None + + if image["url"].startswith("data:image"): + mime_type, base64_string = image["url"].split(",", 1) + content_type = mime_type.split(":")[1].split(";")[0] + image_data = base64.b64decode(base64_string) + img_stream = BytesIO(image_data) + else: + response = requests.get(image["url"]) + img_stream = BytesIO(response.content) + content_type = response.headers.get('Content-Type', 'image/jpeg') + + media_type = content_type.split('/')[-1] if '/' in content_type else content_type + return { + "image": { + "format": media_type, + "source": {"bytes": img_stream.read()} + } + } + + def stream_response(self, model_id: str, payload: dict) -> Generator: + streaming_response = self.bedrock_runtime.converse_stream(**payload) + + in_resasoning_context = False + for chunk in streaming_response["stream"]: + if in_resasoning_context and "contentBlockStop" in chunk: + in_resasoning_context = False + yield "\n \n\n" + elif "contentBlockDelta" in chunk and "delta" in chunk["contentBlockDelta"]: + if "reasoningContent" in chunk["contentBlockDelta"]["delta"]: + if not in_resasoning_context: + yield "" + + in_resasoning_context = True + if "text" in chunk["contentBlockDelta"]["delta"]["reasoningContent"]: + yield chunk["contentBlockDelta"]["delta"]["reasoningContent"]["text"] + elif "text" in chunk["contentBlockDelta"]["delta"]: + yield chunk["contentBlockDelta"]["delta"]["text"] + + def get_completion(self, model_id: str, payload: dict) -> str: + response = self.bedrock_runtime.converse(**payload) + return response['output']['message']['content'][0]['text'] diff --git a/openwebui/pipelines/examples/pipelines/providers/aws_bedrock_deepseek_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/aws_bedrock_deepseek_pipeline.py new file mode 100644 index 0000000..8f6512e --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/aws_bedrock_deepseek_pipeline.py @@ -0,0 +1,187 @@ +""" +title: AWS Bedrock DeepSeek Pipeline +author: kikumoto +date: 2025-03-17 +version: 1.0 +license: MIT +description: A pipeline for generating text using the AWS Bedrock API. +requirements: boto3 +environment_variables: +""" + +import json +import logging + +from typing import List, Union, Generator, Iterator, Dict, Optional, Any + +import boto3 + +from pydantic import BaseModel + +import os + +from utils.pipelines.main import pop_system_message + +class Pipeline: + class Valves(BaseModel): + AWS_ACCESS_KEY: Optional[str] = None + AWS_SECRET_KEY: Optional[str] = None + AWS_REGION_NAME: Optional[str] = None + + def __init__(self): + self.type = "manifold" + self.name = "Bedrock DeepSeek: " + + self.valves = self.Valves( + **{ + "AWS_ACCESS_KEY": os.getenv("AWS_ACCESS_KEY", ""), + "AWS_SECRET_KEY": os.getenv("AWS_SECRET_KEY", ""), + "AWS_REGION_NAME": os.getenv( + "AWS_REGION_NAME", os.getenv( + "AWS_REGION", os.getenv("AWS_DEFAULT_REGION", "") + ) + ), + } + ) + + self.update_pipelines() + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + self.update_pipelines() + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + print(f"on_valves_updated:{__name__}") + self.update_pipelines() + + def update_pipelines(self) -> None: + try: + self.bedrock = boto3.client(service_name="bedrock", + aws_access_key_id=self.valves.AWS_ACCESS_KEY, + aws_secret_access_key=self.valves.AWS_SECRET_KEY, + region_name=self.valves.AWS_REGION_NAME) + self.bedrock_runtime = boto3.client(service_name="bedrock-runtime", + aws_access_key_id=self.valves.AWS_ACCESS_KEY, + aws_secret_access_key=self.valves.AWS_SECRET_KEY, + region_name=self.valves.AWS_REGION_NAME) + self.pipelines = self.get_models() + except Exception as e: + print(f"Error: {e}") + self.pipelines = [ + { + "id": "error", + "name": "Could not fetch models from Bedrock, please set up AWS Key/Secret or Instance/Task Role.", + }, + ] + + def pipelines(self) -> List[dict]: + return self.get_models() + + def get_models(self): + try: + res = [] + response = self.bedrock.list_foundation_models(byProvider='DeepSeek') + for model in response['modelSummaries']: + inference_types = model.get('inferenceTypesSupported', []) + if "ON_DEMAND" in inference_types: + res.append({'id': model['modelId'], 'name': model['modelName']}) + elif "INFERENCE_PROFILE" in inference_types: + inferenceProfileId = self.getInferenceProfileId(model['modelArn']) + if inferenceProfileId: + res.append({'id': inferenceProfileId, 'name': model['modelName']}) + + return res + except Exception as e: + print(f"Error: {e}") + return [ + { + "id": "error", + "name": "Could not fetch models from Bedrock, please check permissoin.", + }, + ] + + def getInferenceProfileId(self, modelArn: str) -> str: + response = self.bedrock.list_inference_profiles() + for profile in response.get('inferenceProfileSummaries', []): + for model in profile.get('models', []): + if model.get('modelArn') == modelArn: + return profile['inferenceProfileId'] + return None + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + try: + # Remove unnecessary keys + for key in ['user', 'chat_id', 'title']: + body.pop(key, None) + + system_message, messages = pop_system_message(messages) + + logging.info(f"pop_system_message: {json.dumps(messages)}") + + processed_messages = [] + for message in messages: + processed_content = [] + if isinstance(message.get("content"), list): + for item in message["content"]: + # DeepSeek currently doesn't support multi-modal inputs + if item["type"] == "text": + processed_content.append({"text": item["text"]}) + else: + processed_content = [{"text": message.get("content", "")}] + + processed_messages.append({"role": message["role"], "content": processed_content}) + + payload = {"modelId": model_id, + "system": [{'text': system_message["content"] if system_message else 'you are an intelligent ai assistant'}], + "messages": processed_messages, + "inferenceConfig": { + "temperature": body.get("temperature", 0.5), + "topP": body.get("top_p", 0.9), + "maxTokens": body.get("max_tokens", 8192), + "stopSequences": body.get("stop", []), + }, + } + + if body.get("stream", False): + return self.stream_response(model_id, payload) + else: + return self.get_completion(model_id, payload) + + except Exception as e: + return f"Error: {e}" + + def stream_response(self, model_id: str, payload: dict) -> Generator: + streaming_response = self.bedrock_runtime.converse_stream(**payload) + + in_resasoning_context = False + for chunk in streaming_response["stream"]: + if in_resasoning_context and "contentBlockStop" in chunk: + in_resasoning_context = False + yield "\n \n\n" + elif "contentBlockDelta" in chunk and "delta" in chunk["contentBlockDelta"]: + if "reasoningContent" in chunk["contentBlockDelta"]["delta"]: + if not in_resasoning_context: + yield "" + + in_resasoning_context = True + if "text" in chunk["contentBlockDelta"]["delta"]["reasoningContent"]: + yield chunk["contentBlockDelta"]["delta"]["reasoningContent"]["text"] + elif "text" in chunk["contentBlockDelta"]["delta"]: + yield chunk["contentBlockDelta"]["delta"]["text"] + + def get_completion(self, model_id: str, payload: dict) -> str: + response = self.bedrock_runtime.converse(**payload) + return response['output']['message']['content'][0]['text'] \ No newline at end of file diff --git a/openwebui/pipelines/examples/pipelines/providers/azure_dalle_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/azure_dalle_manifold_pipeline.py new file mode 100644 index 0000000..c64a766 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/azure_dalle_manifold_pipeline.py @@ -0,0 +1,89 @@ +""" +title: Azure - Dall-E Manifold Pipeline +author: weisser-dev +date: 2025-03-26 +version: 1.0 +license: MIT +description: A pipeline for generating text and processing images using the Azure API. And including multiple Dall-e models +requirements: requests, os +environment_variables: AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_VERSION, AZURE_OPENAI_MODELS, AZURE_OPENAI_MODEL_NAMES, IMAGE_SIZE, NUM_IMAGES +""" +from typing import List, Union, Generator, Iterator +from pydantic import BaseModel +import requests +import os + +class Pipeline: + class Valves(BaseModel): + AZURE_OPENAI_API_KEY: str + AZURE_OPENAI_ENDPOINT: str + AZURE_OPENAI_API_VERSION: str + AZURE_OPENAI_MODELS: str + AZURE_OPENAI_MODEL_NAMES: str + IMAGE_SIZE: str = "1024x1024" + NUM_IMAGES: int = 1 + + def __init__(self): + self.type = "manifold" + self.name = "Azure DALL·E: " + self.valves = self.Valves( + **{ + "AZURE_OPENAI_API_KEY": os.getenv("AZURE_OPENAI_API_KEY", "your-azure-openai-api-key-here"), + "AZURE_OPENAI_ENDPOINT": os.getenv("AZURE_OPENAI_ENDPOINT", "your-azure-openai-endpoint-here"), + "AZURE_OPENAI_API_VERSION": os.getenv("AZURE_OPENAI_API_VERSION", "2024-02-01"), + "AZURE_OPENAI_MODELS": os.getenv("AZURE_OPENAI_MODELS", "dall-e-2;dall-e-3"), #ensure that the model here is within your enpoint url, sometime the name within the url it is also like Dalle3 + "AZURE_OPENAI_MODEL_NAMES": os.getenv("AZURE_OPENAI_MODEL_NAMES", "DALL-E 2;DALL-E 3"), + } + ) + self.set_pipelines() + + def set_pipelines(self): + models = self.valves.AZURE_OPENAI_MODELS.split(";") + model_names = self.valves.AZURE_OPENAI_MODEL_NAMES.split(";") + self.pipelines = [ + {"id": model, "name": name} for model, name in zip(models, model_names) + ] + print(f"azure_dalle_pipeline - models: {self.pipelines}") + + async def on_startup(self) -> None: + print(f"on_startup:{__name__}") + + async def on_shutdown(self): + print(f"on_shutdown:{__name__}") + + async def on_valves_updated(self): + print(f"on_valves_updated:{__name__}") + self.set_pipelines() + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + print(f"pipe:{__name__}") + + headers = { + "api-key": self.valves.AZURE_OPENAI_API_KEY, + "Content-Type": "application/json", + } + + url = f"{self.valves.AZURE_OPENAI_ENDPOINT}/openai/deployments/{model_id}/images/generations?api-version={self.valves.AZURE_OPENAI_API_VERSION}" + + payload = { + "model": model_id, + "prompt": user_message, + "size": self.valves.IMAGE_SIZE, + "n": self.valves.NUM_IMAGES, + } + + try: + response = requests.post(url, json=payload, headers=headers) + response.raise_for_status() + data = response.json() + + message = "" + for image in data.get("data", []): + if "url" in image: + message += f"![image]({image['url']})\n" + + yield message + except Exception as e: + yield f"Error: {e} ({response.text if response else 'No response'})" diff --git a/openwebui/pipelines/examples/pipelines/providers/azure_deepseek_r1_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/azure_deepseek_r1_pipeline.py new file mode 100644 index 0000000..fc4c14e --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/azure_deepseek_r1_pipeline.py @@ -0,0 +1,99 @@ +from typing import List, Union, Generator, Iterator +from pydantic import BaseModel +import requests +import os + + +class Pipeline: + class Valves(BaseModel): + # You can add your custom valves here. + AZURE_DEEPSEEKR1_API_KEY: str + AZURE_DEEPSEEKR1_ENDPOINT: str + AZURE_DEEPSEEKR1_API_VERSION: str + + def __init__(self): + self.type = "manifold" + self.name = "Azure " + self.valves = self.Valves( + **{ + "AZURE_DEEPSEEKR1_API_KEY": os.getenv("AZURE_DEEPSEEKR1_API_KEY", "your-azure-deepseek-r1-api-key-here"), + "AZURE_DEEPSEEKR1_ENDPOINT": os.getenv("AZURE_DEEPSEEKR1_ENDPOINT", "your-azure-deepseek-r1-endpoint-here"), + "AZURE_DEEPSEEKR1_API_VERSION": os.getenv("AZURE_DEEPSEEKR1_API_VERSION", "2024-05-01-preview"), + } + ) + self.set_pipelines() + pass + + def set_pipelines(self): + models = ['DeepSeek-R1'] + model_names = ['DeepSeek-R1'] + self.pipelines = [ + {"id": model, "name": name} for model, name in zip(models, model_names) + ] + print(f"azure_deepseek_r1_pipeline - models: {self.pipelines}") + pass + + async def on_valves_updated(self): + self.set_pipelines() + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + print(messages) + print(user_message) + + headers = { + "api-key": self.valves.AZURE_DEEPSEEKR1_API_KEY, + "Content-Type": "application/json", + } + + url = f"{self.valves.AZURE_DEEPSEEKR1_ENDPOINT}/models/chat/completions?api-version={self.valves.AZURE_DEEPSEEKR1_API_VERSION}" + + print(body) + + allowed_params = {'messages', 'temperature', 'role', 'content', 'contentPart', 'contentPartImage', + 'enhancements', 'dataSources', 'n', 'stream', 'stop', 'max_tokens', 'presence_penalty', + 'frequency_penalty', 'logit_bias', 'user', 'function_call', 'funcions', 'tools', + 'tool_choice', 'top_p', 'log_probs', 'top_logprobs', 'response_format', 'seed', 'model'} + # remap user field + if "user" in body and not isinstance(body["user"], str): + body["user"] = body["user"]["id"] if "id" in body["user"] else str(body["user"]) + # Fill in model field as per Azure's api requirements + body["model"] = model_id + filtered_body = {k: v for k, v in body.items() if k in allowed_params} + # log fields that were filtered out as a single line + if len(body) != len(filtered_body): + print(f"Dropped params: {', '.join(set(body.keys()) - set(filtered_body.keys()))}") + + try: + r = requests.post( + url=url, + json=filtered_body, + headers=headers, + stream=True, + ) + + r.raise_for_status() + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + if r: + text = r.text + return f"Error: {e} ({text})" + else: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/azure_jais_core42_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/azure_jais_core42_pipeline.py new file mode 100644 index 0000000..2b8e8a7 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/azure_jais_core42_pipeline.py @@ -0,0 +1,215 @@ +""" +title: Jais Azure Pipeline with Stream Handling Fix +author: Abdessalaam Al-Alestini +date: 2024-06-20 +version: 1.3 +license: MIT +description: A pipeline for generating text using the Jais model via Azure AI Inference API, with fixed stream handling. +About Jais: https://inceptionai.ai/jais/ +requirements: azure-ai-inference +environment_variables: AZURE_INFERENCE_CREDENTIAL, AZURE_INFERENCE_ENDPOINT, MODEL_ID +""" + +import os +import json +import logging +from typing import List, Union, Generator, Iterator, Tuple +from pydantic import BaseModel +from azure.ai.inference import ChatCompletionsClient +from azure.core.credentials import AzureKeyCredential +from azure.ai.inference.models import SystemMessage, UserMessage, AssistantMessage + +# Set up logging +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + + +def pop_system_message(messages: List[dict]) -> Tuple[str, List[dict]]: + """ + Extract the system message from the list of messages. + + Args: + messages (List[dict]): List of message dictionaries. + + Returns: + Tuple[str, List[dict]]: A tuple containing the system message (or empty string) and the updated list of messages. + """ + system_message = "" + updated_messages = [] + + for message in messages: + if message['role'] == 'system': + system_message = message['content'] + else: + updated_messages.append(message) + + return system_message, updated_messages + + +class Pipeline: + + class Valves(BaseModel): + AZURE_INFERENCE_CREDENTIAL: str = "" + AZURE_INFERENCE_ENDPOINT: str = "" + MODEL_ID: str = "jais-30b-chat" + + def __init__(self): + self.type = "manifold" + self.id = "jais-azure" + self.name = "jais-azure/" + + self.valves = self.Valves( + **{ + "AZURE_INFERENCE_CREDENTIAL": + os.getenv("AZURE_INFERENCE_CREDENTIAL", + "your-azure-inference-key-here"), + "AZURE_INFERENCE_ENDPOINT": + os.getenv("AZURE_INFERENCE_ENDPOINT", + "your-azure-inference-endpoint-here"), + "MODEL_ID": + os.getenv("MODEL_ID", "jais-30b-chat"), + }) + self.update_client() + + def update_client(self): + self.client = ChatCompletionsClient( + endpoint=self.valves.AZURE_INFERENCE_ENDPOINT, + credential=AzureKeyCredential( + self.valves.AZURE_INFERENCE_CREDENTIAL)) + + def get_jais_models(self): + return [ + { + "id": "jais-30b-chat", + "name": "Jais 30B Chat" + }, + ] + + async def on_startup(self): + logger.info(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + logger.info(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + self.update_client() + + def pipelines(self) -> List[dict]: + return self.get_jais_models() + + def pipe(self, user_message: str, model_id: str, messages: List[dict], + body: dict) -> Union[str, Generator, Iterator]: + try: + logger.debug( + f"Received request - user_message: {user_message}, model_id: {model_id}" + ) + logger.debug(f"Messages: {json.dumps(messages, indent=2)}") + logger.debug(f"Body: {json.dumps(body, indent=2)}") + + # Remove unnecessary keys + for key in ['user', 'chat_id', 'title']: + body.pop(key, None) + + system_message, messages = pop_system_message(messages) + + # Prepare messages for Jais + jais_messages = [SystemMessage( + content=system_message)] if system_message else [] + jais_messages += [ + UserMessage(content=msg['content']) if msg['role'] == 'user' + else SystemMessage(content=msg['content']) if msg['role'] + == 'system' else AssistantMessage(content=msg['content']) + for msg in messages + ] + + # Prepare the payload + allowed_params = { + 'temperature', 'max_tokens', 'presence_penalty', + 'frequency_penalty', 'top_p' + } + filtered_body = { + k: v + for k, v in body.items() if k in allowed_params + } + + logger.debug(f"Prepared Jais messages: {jais_messages}") + logger.debug(f"Filtered body: {filtered_body}") + + is_stream = body.get("stream", False) + if is_stream: + return self.stream_response(jais_messages, filtered_body) + else: + return self.get_completion(jais_messages, filtered_body) + except Exception as e: + logger.error(f"Error in pipe: {str(e)}", exc_info=True) + return json.dumps({"error": str(e)}) + + def stream_response(self, jais_messages: List[Union[SystemMessage, UserMessage, AssistantMessage]], params: dict) -> str: + try: + complete_response = "" + response = self.client.complete(messages=jais_messages, + model=self.valves.MODEL_ID, + stream=True, + **params) + for update in response: + if update.choices: + delta_content = update.choices[0].delta.content + if delta_content: + complete_response += delta_content + return complete_response + except Exception as e: + logger.error(f"Error in stream_response: {str(e)}", exc_info=True) + return json.dumps({"error": str(e)}) + + def get_completion(self, jais_messages: List[Union[SystemMessage, UserMessage, AssistantMessage]], params: dict) -> str: + try: + response = self.client.complete(messages=jais_messages, + model=self.valves.MODEL_ID, + **params) + if response.choices: + result = response.choices[0].message.content + logger.debug(f"Completion result: {result}") + return result + else: + logger.warning("No choices in completion response") + return "" + except Exception as e: + logger.error(f"Error in get_completion: {str(e)}", exc_info=True) + return json.dumps({"error": str(e)}) + + +# TEST CASE TO RUN THE PIPELINE +if __name__ == "__main__": + pipeline = Pipeline() + + messages = [{ + "role": "user", + "content": "How many languages are in the world?" + }] + body = { + "temperature": 0.5, + "max_tokens": 150, + "presence_penalty": 0.1, + "frequency_penalty": 0.8, + "stream": True # Change to True to test streaming + } + + result = pipeline.pipe(user_message="How many languages are in the world?", + model_id="jais-30b-chat", + messages=messages, + body=body) + + # Handle streaming result + if isinstance(result, str): + content = json.dumps({"content": result}, ensure_ascii=False) + print(content) + else: + complete_response = "" + for part in result: + content_delta = json.loads(part).get("delta") + if content_delta: + complete_response += content_delta + + print(json.dumps({"content": complete_response}, ensure_ascii=False)) diff --git a/openwebui/pipelines/examples/pipelines/providers/azure_openai_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/azure_openai_manifold_pipeline.py new file mode 100644 index 0000000..6f77a44 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/azure_openai_manifold_pipeline.py @@ -0,0 +1,99 @@ +from typing import List, Union, Generator, Iterator +from pydantic import BaseModel +import requests +import os + + +class Pipeline: + class Valves(BaseModel): + # You can add your custom valves here. + AZURE_OPENAI_API_KEY: str + AZURE_OPENAI_ENDPOINT: str + AZURE_OPENAI_API_VERSION: str + AZURE_OPENAI_MODELS: str + AZURE_OPENAI_MODEL_NAMES: str + + def __init__(self): + self.type = "manifold" + self.name = "Azure OpenAI: " + self.valves = self.Valves( + **{ + "AZURE_OPENAI_API_KEY": os.getenv("AZURE_OPENAI_API_KEY", "your-azure-openai-api-key-here"), + "AZURE_OPENAI_ENDPOINT": os.getenv("AZURE_OPENAI_ENDPOINT", "your-azure-openai-endpoint-here"), + "AZURE_OPENAI_API_VERSION": os.getenv("AZURE_OPENAI_API_VERSION", "2024-02-01"), + "AZURE_OPENAI_MODELS": os.getenv("AZURE_OPENAI_MODELS", "gpt-35-turbo;gpt-4o"), + "AZURE_OPENAI_MODEL_NAMES": os.getenv("AZURE_OPENAI_MODEL_NAMES", "GPT-35 Turbo;GPT-4o"), + } + ) + self.set_pipelines() + pass + + def set_pipelines(self): + models = self.valves.AZURE_OPENAI_MODELS.split(";") + model_names = self.valves.AZURE_OPENAI_MODEL_NAMES.split(";") + self.pipelines = [ + {"id": model, "name": name} for model, name in zip(models, model_names) + ] + print(f"azure_openai_manifold_pipeline - models: {self.pipelines}") + pass + + async def on_valves_updated(self): + self.set_pipelines() + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + print(messages) + print(user_message) + + headers = { + "api-key": self.valves.AZURE_OPENAI_API_KEY, + "Content-Type": "application/json", + } + + url = f"{self.valves.AZURE_OPENAI_ENDPOINT}/openai/deployments/{model_id}/chat/completions?api-version={self.valves.AZURE_OPENAI_API_VERSION}" + + allowed_params = {'messages', 'temperature', 'role', 'content', 'contentPart', 'contentPartImage', + 'enhancements', 'dataSources', 'n', 'stream', 'stop', 'max_tokens', 'presence_penalty', + 'frequency_penalty', 'logit_bias', 'user', 'function_call', 'funcions', 'tools', + 'tool_choice', 'top_p', 'log_probs', 'top_logprobs', 'response_format', 'seed'} + # remap user field + if "user" in body and not isinstance(body["user"], str): + body["user"] = body["user"]["id"] if "id" in body["user"] else str(body["user"]) + filtered_body = {k: v for k, v in body.items() if k in allowed_params} + # log fields that were filtered out as a single line + if len(body) != len(filtered_body): + print(f"Dropped params: {', '.join(set(body.keys()) - set(filtered_body.keys()))}") + + try: + r = requests.post( + url=url, + json=filtered_body, + headers=headers, + stream=True, + ) + + r.raise_for_status() + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + if r: + text = r.text + return f"Error: {e} ({text})" + else: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/azure_openai_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/azure_openai_pipeline.py new file mode 100644 index 0000000..bb4e6e7 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/azure_openai_pipeline.py @@ -0,0 +1,90 @@ +from typing import List, Union, Generator, Iterator +from pydantic import BaseModel +import requests +import os + + +class Pipeline: + class Valves(BaseModel): + # You can add your custom valves here. + AZURE_OPENAI_API_KEY: str + AZURE_OPENAI_ENDPOINT: str + AZURE_OPENAI_DEPLOYMENT_NAME: str + AZURE_OPENAI_API_VERSION: str + + def __init__(self): + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "azure_openai_pipeline" + self.name = "Azure OpenAI Pipeline" + self.valves = self.Valves( + **{ + "AZURE_OPENAI_API_KEY": os.getenv("AZURE_OPENAI_API_KEY", "your-azure-openai-api-key-here"), + "AZURE_OPENAI_ENDPOINT": os.getenv("AZURE_OPENAI_ENDPOINT", "your-azure-openai-endpoint-here"), + "AZURE_OPENAI_DEPLOYMENT_NAME": os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "your-deployment-name-here"), + "AZURE_OPENAI_API_VERSION": os.getenv("AZURE_OPENAI_API_VERSION", "2024-02-01"), + } + ) + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + print(messages) + print(user_message) + + headers = { + "api-key": self.valves.AZURE_OPENAI_API_KEY, + "Content-Type": "application/json", + } + + url = f"{self.valves.AZURE_OPENAI_ENDPOINT}/openai/deployments/{self.valves.AZURE_OPENAI_DEPLOYMENT_NAME}/chat/completions?api-version={self.valves.AZURE_OPENAI_API_VERSION}" + + allowed_params = {'messages', 'temperature', 'role', 'content', 'contentPart', 'contentPartImage', + 'enhancements', 'data_sources', 'n', 'stream', 'stop', 'max_tokens', 'presence_penalty', + 'frequency_penalty', 'logit_bias', 'user', 'function_call', 'functions', 'tools', + 'tool_choice', 'top_p', 'log_probs', 'top_logprobs', 'response_format', 'seed'} + # remap user field + if "user" in body and not isinstance(body["user"], str): + body["user"] = body["user"]["id"] if "id" in body["user"] else str(body["user"]) + filtered_body = {k: v for k, v in body.items() if k in allowed_params} + # log fields that were filtered out as a single line + if len(body) != len(filtered_body): + print(f"Dropped params: {', '.join(set(body.keys()) - set(filtered_body.keys()))}") + + # Initialize the response variable to None. + r = None + try: + r = requests.post( + url=url, + json=filtered_body, + headers=headers, + stream=True, + ) + + r.raise_for_status() + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + if r: + text = r.text + return f"Error: {e} ({text})" + else: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/cloudflare_ai_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/cloudflare_ai_pipeline.py new file mode 100644 index 0000000..3bbcadc --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/cloudflare_ai_pipeline.py @@ -0,0 +1,83 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +from pydantic import BaseModel +import os +import requests + + +class Pipeline: + class Valves(BaseModel): + CLOUDFLARE_ACCOUNT_ID: str = "" + CLOUDFLARE_API_KEY: str = "" + CLOUDFLARE_MODEL: str = "" + pass + + def __init__(self): + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "openai_pipeline" + self.name = "Cloudlfare AI" + self.valves = self.Valves( + **{ + "CLOUDFLARE_ACCOUNT_ID": os.getenv( + "CLOUDFLARE_ACCOUNT_ID", + "your-account-id", + ), + "CLOUDFLARE_API_KEY": os.getenv( + "CLOUDFLARE_API_KEY", "your-cloudflare-api-key" + ), + "CLOUDFLARE_MODEL": os.getenv( + "CLOUDFLARE_MODELS", + "@cf/meta/llama-3.1-8b-instruct", + ), + } + ) + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + headers = {} + headers["Authorization"] = f"Bearer {self.valves.CLOUDFLARE_API_KEY}" + headers["Content-Type"] = "application/json" + + payload = {**body, "model": self.valves.CLOUDFLARE_MODEL} + + if "user" in payload: + del payload["user"] + if "chat_id" in payload: + del payload["chat_id"] + if "title" in payload: + del payload["title"] + + try: + r = requests.post( + url=f"https://api.cloudflare.com/client/v4/accounts/{self.valves.CLOUDFLARE_ACCOUNT_ID}/ai/v1/chat/completions", + json=payload, + headers=headers, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/cohere_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/cohere_manifold_pipeline.py new file mode 100644 index 0000000..61fcf8b --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/cohere_manifold_pipeline.py @@ -0,0 +1,163 @@ +""" +title: Cohere Manifold Pipeline +author: justinh-rahb +date: 2024-05-28 +version: 1.0 +license: MIT +description: A pipeline for generating text using the Anthropic API. +requirements: requests +environment_variables: COHERE_API_KEY +""" + +import os +import json +from schemas import OpenAIChatMessage +from typing import List, Union, Generator, Iterator +from pydantic import BaseModel +import requests + + +class Pipeline: + class Valves(BaseModel): + COHERE_API_BASE_URL: str = "https://api.cohere.com/v1" + COHERE_API_KEY: str = "" + + def __init__(self): + self.type = "manifold" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + + self.id = "cohere" + + self.name = "cohere/" + + self.valves = self.Valves( + **{"COHERE_API_KEY": os.getenv("COHERE_API_KEY", "your-api-key-here")} + ) + + self.pipelines = self.get_cohere_models() + + async def on_startup(self): + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + + self.pipelines = self.get_cohere_models() + + pass + + def get_cohere_models(self): + if self.valves.COHERE_API_KEY: + try: + headers = {} + headers["Authorization"] = f"Bearer {self.valves.COHERE_API_KEY}" + headers["Content-Type"] = "application/json" + + r = requests.get( + f"{self.valves.COHERE_API_BASE_URL}/models", headers=headers + ) + + models = r.json() + return [ + { + "id": model["name"], + "name": model["name"] if "name" in model else model["name"], + } + for model in models["models"] + ] + except Exception as e: + + print(f"Error: {e}") + return [ + { + "id": self.id, + "name": "Could not fetch models from Cohere, please update the API Key in the valves.", + }, + ] + else: + return [] + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + try: + if body.get("stream", False): + return self.stream_response(user_message, model_id, messages, body) + else: + return self.get_completion(user_message, model_id, messages, body) + except Exception as e: + return f"Error: {e}" + + def stream_response( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Generator: + + headers = {} + headers["Authorization"] = f"Bearer {self.valves.COHERE_API_KEY}" + headers["Content-Type"] = "application/json" + + r = requests.post( + url=f"{self.valves.COHERE_API_BASE_URL}/chat", + json={ + "model": model_id, + "chat_history": [ + { + "role": "USER" if message["role"] == "user" else "CHATBOT", + "message": message["content"], + } + for message in messages[:-1] + ], + "message": user_message, + "stream": True, + }, + headers=headers, + stream=True, + ) + + r.raise_for_status() + + for line in r.iter_lines(): + if line: + try: + line = json.loads(line) + if line["event_type"] == "text-generation": + yield line["text"] + except: + pass + + def get_completion( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> str: + headers = {} + headers["Authorization"] = f"Bearer {self.valves.COHERE_API_KEY}" + headers["Content-Type"] = "application/json" + + r = requests.post( + url=f"{self.valves.COHERE_API_BASE_URL}/chat", + json={ + "model": model_id, + "chat_history": [ + { + "role": "USER" if message["role"] == "user" else "CHATBOT", + "message": message["content"], + } + for message in messages[:-1] + ], + "message": user_message, + }, + headers=headers, + ) + + r.raise_for_status() + data = r.json() + + return data["text"] if "text" in data else "No response from Cohere." diff --git a/openwebui/pipelines/examples/pipelines/providers/deepseek_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/deepseek_manifold_pipeline.py new file mode 100644 index 0000000..a8b1c49 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/deepseek_manifold_pipeline.py @@ -0,0 +1,150 @@ +""" +title: DeepSeek Manifold Pipeline +author: Mohammed El-Beltagy +date: 2025-01-20 +version: 1.4 +license: MIT +description: A pipeline for generating text using the DeepSeeks API. +requirements: requests, sseclient-py +environment_variables: DEEPSEEK_API_KEY +""" + + +import os +import requests +import json +from typing import List, Union, Generator, Iterator +from pydantic import BaseModel +import sseclient + +from utils.pipelines.main import pop_system_message + + +class Pipeline: + class Valves(BaseModel): + DEEPSEEK_API_KEY: str = "" + + def __init__(self): + self.type = "manifold" + self.id = "deepseek" + self.name = "deepseek/" + + self.valves = self.Valves( + **{"DEEPSEEK_API_KEY": os.getenv("DEEPSEEK_API_KEY", "your-api-key-here")} + ) + self.url = 'https://api.deepseek.com/chat/completions' + self.update_headers() + + def update_headers(self): + self.headers = { + 'Content-Type': 'application/json', + 'Authorization': f'Bearer {self.valves.DEEPSEEK_API_KEY}' + } + + def get_deepseek_models(self): + return [ + {"id": "deepseek-chat", "name": "DeepSeek Chat"}, + {"id": "deepseek-reasoner", "name": "DeepSeek R1"}, + ] + + async def on_startup(self): + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + self.update_headers() + + def pipelines(self) -> List[dict]: + return self.get_deepseek_models() + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + try: + # Remove unnecessary keys + for key in ['user', 'chat_id', 'title']: + body.pop(key, None) + + system_message, messages = pop_system_message(messages) + + # Process messages for DeepSeek format + processed_messages = [] + for message in messages: + if isinstance(message.get("content"), list): + # DeepSeek currently doesn't support multi-modal inputs + # Combine all text content + text_content = " ".join( + item["text"] for item in message["content"] + if item["type"] == "text" + ) + processed_messages.append({ + "role": message["role"], + "content": text_content + }) + else: + processed_messages.append({ + "role": message["role"], + "content": message.get("content", "") + }) + + # Add system message if present + if system_message: + processed_messages.insert(0, { + "role": "system", + "content": str(system_message) + }) + + # Prepare the payload for DeepSeek API + payload = { + "model": model_id, + "messages": processed_messages, + "max_tokens": body.get("max_tokens", 4096), + "temperature": body.get("temperature", 0.8), + "top_p": body.get("top_p", 0.9), + "stream": body.get("stream", False) + } + + # Add optional parameters if present + if "stop" in body: + payload["stop"] = body["stop"] + + if body.get("stream", False): + return self.stream_response(payload) + else: + return self.get_completion(payload) + except Exception as e: + return f"Error: {e}" + + def stream_response(self, payload: dict) -> Generator: + response = requests.post(self.url, headers=self.headers, json=payload, stream=True) + + if response.status_code == 200: + client = sseclient.SSEClient(response) + for event in client.events(): + try: + data = json.loads(event.data) + if "choices" in data and len(data["choices"]) > 0: + delta = data["choices"][0].get("delta", {}) + if "content" in delta: + yield delta["content"] + if data["choices"][0].get("finish_reason") is not None: + break + except json.JSONDecodeError: + print(f"Failed to parse JSON: {event.data}") + except KeyError as e: + print(f"Unexpected data structure: {e}") + print(f"Full data: {data}") + else: + raise Exception(f"Error: {response.status_code} - {response.text}") + + def get_completion(self, payload: dict) -> str: + response = requests.post(self.url, headers=self.headers, json=payload) + if response.status_code == 200: + res = response.json() + return res["choices"][0]["message"]["content"] if "choices" in res else "" + else: + raise Exception(f"Error: {response.status_code} - {response.text}") diff --git a/openwebui/pipelines/examples/pipelines/providers/google_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/google_manifold_pipeline.py new file mode 100644 index 0000000..d5d500b --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/google_manifold_pipeline.py @@ -0,0 +1,210 @@ +""" +title: Google GenAI Manifold Pipeline +author: Marc Lopez (refactor by justinh-rahb) +date: 2024-06-06 +version: 1.3 +license: MIT +description: A pipeline for generating text using Google's GenAI models in Open-WebUI. +requirements: google-genai +environment_variables: GOOGLE_API_KEY +""" + +from typing import List, Union, Iterator +import os + +from pydantic import BaseModel, Field + +from google import genai +from google.genai import types +from PIL import Image +from io import BytesIO +import base64 + + +class Pipeline: + """Google GenAI pipeline""" + + class Valves(BaseModel): + """Options to change from the WebUI""" + + GOOGLE_API_KEY: str = Field(default="",description="Google Generative AI API key") + USE_PERMISSIVE_SAFETY: bool = Field(default=False,description="Use permissive safety settings") + GENERATE_IMAGE: bool = Field(default=False,description="Allow image generation") + + def __init__(self): + self.type = "manifold" + self.id = "google_genai" + self.name = "Google: " + + self.valves = self.Valves(**{ + "GOOGLE_API_KEY": os.getenv("GOOGLE_API_KEY", ""), + "USE_PERMISSIVE_SAFETY": False, + "GENERATE_IMAGE": False + }) + self.pipelines = [] + + if self.valves.GOOGLE_API_KEY: + self.update_pipelines() + + async def on_startup(self) -> None: + """This function is called when the server is started.""" + + print(f"on_startup:{__name__}") + if self.valves.GOOGLE_API_KEY: + self.update_pipelines() + + async def on_shutdown(self) -> None: + """This function is called when the server is stopped.""" + + print(f"on_shutdown:{__name__}") + + async def on_valves_updated(self) -> None: + """This function is called when the valves are updated.""" + + print(f"on_valves_updated:{__name__}") + if self.valves.GOOGLE_API_KEY: + self.update_pipelines() + + def update_pipelines(self) -> None: + """Update the available models from Google GenAI""" + + if self.valves.GOOGLE_API_KEY: + client = genai.Client(api_key=self.valves.GOOGLE_API_KEY) + try: + models = client.models.list() + self.pipelines = [ + { + "id": model.name[7:], # the "models/" part messeses up the URL + "name": model.display_name, + } + for model in models + if "generateContent" in model.supported_actions + if model.name[:7] == "models/" + ] + except Exception: + self.pipelines = [ + { + "id": "error", + "name": "Could not fetch models from Google, please update the API Key in the valves.", + } + ] + else: + self.pipelines = [] + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Iterator]: + if not self.valves.GOOGLE_API_KEY: + return "Error: GOOGLE_API_KEY is not set" + + try: + client = genai.Client(api_key=self.valves.GOOGLE_API_KEY) + + if model_id.startswith("google_genai."): + model_id = model_id[12:] + model_id = model_id.lstrip(".") + + if not (model_id.startswith("gemini-") or model_id.startswith("learnlm-") or model_id.startswith("gemma-")): + return f"Error: Invalid model name format: {model_id}" + + print(f"Pipe function called for model: {model_id}") + print(f"Stream mode: {body.get('stream', False)}") + + system_message = next((msg["content"] for msg in messages if msg["role"] == "system"), None) + + contents = [] + for message in messages: + if message["role"] != "system": + if isinstance(message.get("content"), list): + parts = [] + for content in message["content"]: + if content["type"] == "text": + parts.append({"text": content["text"]}) + elif content["type"] == "image_url": + image_url = content["image_url"]["url"] + if image_url.startswith("data:image"): + image_data = image_url.split(",")[1] + parts.append({"inline_data": {"mime_type": "image/jpeg", "data": image_data}}) + else: + parts.append({"image_url": image_url}) + contents.append({"role": message["role"], "parts": parts}) + else: + contents.append({ + "role": "user" if message["role"] == "user" else "model", + "parts": [{"text": message["content"]}] + }) + print(f"{contents}") + + generation_config = { + "temperature": body.get("temperature", 0.7), + "top_p": body.get("top_p", 0.9), + "top_k": body.get("top_k", 40), + "max_output_tokens": body.get("max_tokens", 8192), + "stop_sequences": body.get("stop", []), + "response_modalities": ['Text'] + } + + if self.valves.GENERATE_IMAGE and model_id.startswith("gemini-2.0-flash-exp"): + generation_config["response_modalities"].append("Image") + + if self.valves.USE_PERMISSIVE_SAFETY: + safety_settings = [ + types.SafetySetting(category='HARM_CATEGORY_HARASSMENT', threshold='OFF'), + types.SafetySetting(category='HARM_CATEGORY_HATE_SPEECH', threshold='OFF'), + types.SafetySetting(category='HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold='OFF'), + types.SafetySetting(category='HARM_CATEGORY_DANGEROUS_CONTENT', threshold='OFF'), + types.SafetySetting(category='HARM_CATEGORY_CIVIC_INTEGRITY', threshold='OFF') + ] + generation_config = types.GenerateContentConfig(**generation_config, safety_settings=safety_settings) + else: + generation_config = types.GenerateContentConfig(**generation_config) + + if system_message: + contents.insert(0, {"role": "user", "parts": [{"text": f"System: {system_message}"}]}) + + if body.get("stream", False): + response = client.models.generate_content_stream( + model = model_id, + contents = contents, + config = generation_config, + ) + return self.stream_response(response) + else: + response = client.models.generate_content( + model = model_id, + contents = contents, + config = generation_config, + ) + for part in response.candidates[0].content.parts: + if part.text is not None: + return part.text + elif part.inline_data is not None: + try: + image_data = base64.b64decode(part.inline_data.data) + image = Image.open(BytesIO((image_data))) + content_type = part.inline_data.mime_type + return "Image not supported yet." + except Exception as e: + print(f"Error processing image: {e}") + return "Error processing image." + + except Exception as e: + print(f"Error generating content: {e}") + return f"{e}" + + def stream_response(self, response): + for chunk in response: + for candidate in chunk.candidates: + if candidate.content.parts is not None: + for part in candidate.content.parts: + if part.text is not None: + yield chunk.text + elif part.inline_data is not None: + try: + image_data = base64.b64decode(part.inline_data.data) + image = Image.open(BytesIO(image_data)) + content_type = part.inline_data.mime_type + yield "Image not supported yet." + except Exception as e: + print(f"Error processing image: {e}") + yield "Error processing image." diff --git a/openwebui/pipelines/examples/pipelines/providers/google_vertexai_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/google_vertexai_manifold_pipeline.py new file mode 100644 index 0000000..2d77d28 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/google_vertexai_manifold_pipeline.py @@ -0,0 +1,232 @@ +""" +title: Google GenAI (Vertex AI) Manifold Pipeline +author: Hiromasa Kakehashi & Olv Grolle +date: 2024-09-19 +version: 1.0 +license: MIT +description: A pipeline for generating text using Google's GenAI models in Open-WebUI. +requirements: vertexai +environment_variables: GOOGLE_PROJECT_ID, GOOGLE_CLOUD_REGION +usage_instructions: + To use Gemini with the Vertex AI API, a service account with the appropriate role (e.g., `roles/aiplatform.user`) is required. + - For deployment on Google Cloud: Associate the service account with the deployment. + - For use outside of Google Cloud: Set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the path of the service account key file. +""" + +import os +import base64 +from typing import Iterator, List, Union + +import vertexai +from pydantic import BaseModel, Field +from vertexai.generative_models import ( + Content, + GenerationConfig, + GenerativeModel, + HarmBlockThreshold, + HarmCategory, + Part, +) + + +class Pipeline: + """Google GenAI pipeline""" + + class Valves(BaseModel): + """Options to change from the WebUI""" + + GOOGLE_PROJECT_ID: str = "" + GOOGLE_CLOUD_REGION: str = "" + USE_PERMISSIVE_SAFETY: bool = Field(default=False) + + def __init__(self): + self.type = "manifold" + self.name = "VertexAI: " + + self.valves = self.Valves( + **{ + "GOOGLE_PROJECT_ID": os.getenv("GOOGLE_PROJECT_ID", ""), + "GOOGLE_CLOUD_REGION": os.getenv("GOOGLE_CLOUD_REGION", ""), + "USE_PERMISSIVE_SAFETY": False, + } + ) + self.pipelines = [ + + # Gemini 2.0 models + {"id": "gemini-2.0-flash-lite", "name": "Gemini 2.0 Flash-Lite"}, + {"id": "gemini-2.0-flash", "name": "Gemini 2.0 Flash"}, + # Gemini 2.5 models + {"id": "gemini-2.5-flash-lite", "name": "Gemini 2.5 Flash-Lite"}, + {"id": "gemini-2.5-flash", "name": "Gemini 2.5 Flash"}, + {"id": "gemini-2.5-pro", "name": "Gemini 2.5 Pro "}, + + ] + + async def on_startup(self) -> None: + """This function is called when the server is started.""" + + print(f"on_startup:{__name__}") + vertexai.init( + project=self.valves.GOOGLE_PROJECT_ID, + location=self.valves.GOOGLE_CLOUD_REGION, + ) + + async def on_shutdown(self) -> None: + """This function is called when the server is stopped.""" + print(f"on_shutdown:{__name__}") + + async def on_valves_updated(self) -> None: + """This function is called when the valves are updated.""" + print(f"on_valves_updated:{__name__}") + vertexai.init( + project=self.valves.GOOGLE_PROJECT_ID, + location=self.valves.GOOGLE_CLOUD_REGION, + ) + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Iterator]: + try: + if not (model_id.startswith("gemini-") or model_id.startswith("gemma-")): + return f"Error: Invalid model name format: {model_id}" + + print(f"Pipe function called for model: {model_id}") + print(f"Stream mode: {body.get('stream', False)}") + print(f"Received {len(messages)} messages from OpenWebUI") + + # Debug: Log message structure + for i, msg in enumerate(messages): + print(f"Message {i}: role={msg.get('role')}, content type={type(msg.get('content'))}") + if isinstance(msg.get('content'), list): + for j, content_part in enumerate(msg['content']): + print(f" Part {j}: type={content_part.get('type')}") + if content_part.get('type') == 'image_url': + img_url = content_part.get('image_url', {}).get('url', '') + print(f" Image URL prefix: {img_url[:50]}...") + + system_message = next( + (msg["content"] for msg in messages if msg["role"] == "system"), None + ) + + model = GenerativeModel( + model_name=model_id, + system_instruction=system_message, + ) + + if body.get("title", False): # If chat title generation is requested + contents = [Content(role="user", parts=[Part.from_text(user_message)])] + print("Title generation mode - using simple text content") + else: + contents = self.build_conversation_history(messages) + + # Log what we're sending to Vertex AI + print(f"Sending {len(contents)} messages to Vertex AI:") + for i, content in enumerate(contents): + print(f" Message {i}: role={content.role}, parts={len(content.parts)}") + for j, part in enumerate(content.parts): + if hasattr(part, '_raw_data') and part._raw_data: + print(f" Part {j}: Image data ({len(part._raw_data)} bytes)") + else: + part_text = str(part)[:100] if str(part) else "No text" + print(f" Part {j}: Text - {part_text}...") + + generation_config = GenerationConfig( + temperature=body.get("temperature", 0.7), + top_p=body.get("top_p", 0.9), + top_k=body.get("top_k", 40), + max_output_tokens=body.get("max_tokens", 8192), + stop_sequences=body.get("stop", []), + ) + + if self.valves.USE_PERMISSIVE_SAFETY: + safety_settings = { + HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE, + HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE, + HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE, + HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE, + } + else: + safety_settings = body.get("safety_settings") + + print("Calling Vertex AI generate_content...") + response = model.generate_content( + contents, + stream=body.get("stream", False), + generation_config=generation_config, + safety_settings=safety_settings, + ) + + if body.get("stream", False): + return self.stream_response(response) + else: + return response.text + + except Exception as e: + print(f"Error generating content: {e}") + return f"An error occurred: {str(e)}" + + def stream_response(self, response): + for chunk in response: + if chunk.text: + print(f"Chunk: {chunk.text}") + yield chunk.text + + def build_conversation_history(self, messages: List[dict]) -> List[Content]: + contents = [] + + for message in messages: + if message["role"] == "system": + continue + + parts = [] + + if isinstance(message.get("content"), list): + print(f"Processing multi-part message with {len(message['content'])} parts") + for content in message["content"]: + print(f"Processing content type: {content.get('type', 'unknown')}") + if content["type"] == "text": + parts.append(Part.from_text(content["text"])) + print(f"Added text part: {content['text'][:50]}...") + elif content["type"] == "image_url": + image_url = content["image_url"]["url"] + print(f"Processing image URL (first 50 chars): {image_url[:50]}...") + if image_url.startswith("data:image"): + try: + # Split the data URL to get mime type and base64 data + header, image_data = image_url.split(',', 1) + mime_type = header.split(':')[1].split(';')[0] + print(f"Detected image MIME type: {mime_type}") + + # Validate supported image formats + supported_formats = ['image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/webp'] + if mime_type not in supported_formats: + print(f"ERROR: Unsupported image format: {mime_type}") + continue + + # Decode the base64 image data + decoded_image_data = base64.b64decode(image_data) + print(f"Successfully decoded image data: {len(decoded_image_data)} bytes") + + # Create the Part object with the image data + image_part = Part.from_data(decoded_image_data, mime_type=mime_type) + parts.append(image_part) + print(f"Successfully added image part to conversation") + except Exception as e: + print(f"ERROR processing image: {e}") + import traceback + traceback.print_exc() + continue + else: + # Handle image URLs + print(f"Processing external image URL: {image_url}") + parts.append(Part.from_uri(image_url)) + else: + parts = [Part.from_text(message["content"])] + print(f"Added simple text message: {message['content'][:50]}...") + + role = "user" if message["role"] == "user" else "model" + print(f"Creating Content with role='{role}' and {len(parts)} parts") + contents.append(Content(role=role, parts=parts)) + + print(f"Built conversation history with {len(contents)} messages") + return contents diff --git a/openwebui/pipelines/examples/pipelines/providers/groq_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/groq_manifold_pipeline.py new file mode 100644 index 0000000..717f738 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/groq_manifold_pipeline.py @@ -0,0 +1,122 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +from pydantic import BaseModel + +import os +import requests + + +class Pipeline: + class Valves(BaseModel): + GROQ_API_BASE_URL: str = "https://api.groq.com/openai/v1" + GROQ_API_KEY: str = "" + pass + + def __init__(self): + self.type = "manifold" + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + self.id = "groq" + self.name = "Groq: " + + self.valves = self.Valves( + **{ + "GROQ_API_KEY": os.getenv( + "GROQ_API_KEY", "your-groq-api-key-here" + ) + } + ) + + self.pipelines = self.get_models() + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + print(f"on_valves_updated:{__name__}") + self.pipelines = self.get_models() + pass + + def get_models(self): + if self.valves.GROQ_API_KEY: + try: + headers = {} + headers["Authorization"] = f"Bearer {self.valves.GROQ_API_KEY}" + headers["Content-Type"] = "application/json" + + r = requests.get( + f"{self.valves.GROQ_API_BASE_URL}/models", headers=headers + ) + + models = r.json() + return [ + { + "id": model["id"], + "name": model["name"] if "name" in model else model["id"], + } + for model in models["data"] + ] + + except Exception as e: + + print(f"Error: {e}") + return [ + { + "id": "error", + "name": "Could not fetch models from Groq, please update the API Key in the valves.", + }, + ] + else: + return [] + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + print(messages) + print(user_message) + + headers = {} + headers["Authorization"] = f"Bearer {self.valves.GROQ_API_KEY}" + headers["Content-Type"] = "application/json" + + payload = {**body, "model": model_id} + + if "user" in payload: + del payload["user"] + if "chat_id" in payload: + del payload["chat_id"] + if "title" in payload: + del payload["title"] + + print(payload) + + try: + r = requests.post( + url=f"{self.valves.GROQ_API_BASE_URL}/chat/completions", + json=payload, + headers=headers, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" \ No newline at end of file diff --git a/openwebui/pipelines/examples/pipelines/providers/litellm_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/litellm_manifold_pipeline.py new file mode 100644 index 0000000..904a9f0 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/litellm_manifold_pipeline.py @@ -0,0 +1,135 @@ +""" +title: LiteLLM Manifold Pipeline +author: open-webui +date: 2024-05-30 +version: 1.0.1 +license: MIT +description: A manifold pipeline that uses LiteLLM. +""" + +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +from pydantic import BaseModel +import requests +import os + + +class Pipeline: + + class Valves(BaseModel): + LITELLM_BASE_URL: str = "" + LITELLM_API_KEY: str = "" + LITELLM_PIPELINE_DEBUG: bool = False + + def __init__(self): + # You can also set the pipelines that are available in this pipeline. + # Set manifold to True if you want to use this pipeline as a manifold. + # Manifold pipelines can have multiple pipelines. + self.type = "manifold" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "litellm_manifold" + + # Optionally, you can set the name of the manifold pipeline. + self.name = "LiteLLM: " + + # Initialize rate limits + self.valves = self.Valves( + **{ + "LITELLM_BASE_URL": os.getenv( + "LITELLM_BASE_URL", "http://localhost:4001" + ), + "LITELLM_API_KEY": os.getenv("LITELLM_API_KEY", "your-api-key-here"), + "LITELLM_PIPELINE_DEBUG": os.getenv("LITELLM_PIPELINE_DEBUG", False), + } + ) + # Get models on initialization + self.pipelines = self.get_litellm_models() + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + # Get models on startup + self.pipelines = self.get_litellm_models() + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + + self.pipelines = self.get_litellm_models() + pass + + def get_litellm_models(self): + + headers = {} + if self.valves.LITELLM_API_KEY: + headers["Authorization"] = f"Bearer {self.valves.LITELLM_API_KEY}" + + if self.valves.LITELLM_BASE_URL: + try: + r = requests.get( + f"{self.valves.LITELLM_BASE_URL}/v1/models", headers=headers + ) + models = r.json() + return [ + { + "id": model["id"], + "name": model["name"] if "name" in model else model["id"], + } + for model in models["data"] + ] + except Exception as e: + print(f"Error fetching models from LiteLLM: {e}") + return [ + { + "id": "error", + "name": "Could not fetch models from LiteLLM, please update the URL in the valves.", + }, + ] + else: + print("LITELLM_BASE_URL not set. Please configure it in the valves.") + return [] + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + if "user" in body: + print("######################################") + print(f'# User: {body["user"]["name"]} ({body["user"]["id"]})') + print(f"# Message: {user_message}") + print("######################################") + + headers = {} + if self.valves.LITELLM_API_KEY: + headers["Authorization"] = f"Bearer {self.valves.LITELLM_API_KEY}" + + try: + payload = {**body, "model": model_id, "user": body["user"]["id"]} + payload.pop("chat_id", None) + payload.pop("user", None) + payload.pop("title", None) + + r = requests.post( + url=f"{self.valves.LITELLM_BASE_URL}/v1/chat/completions", + json=payload, + headers=headers, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/litellm_subprocess_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/litellm_subprocess_manifold_pipeline.py new file mode 100644 index 0000000..99b778a --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/litellm_subprocess_manifold_pipeline.py @@ -0,0 +1,211 @@ +""" +title: LiteLLM Subprocess Manifold Pipeline +author: open-webui +date: 2024-05-30 +version: 1.0 +license: MIT +description: A manifold pipeline that uses LiteLLM as a subprocess. +requirements: yaml, litellm[proxy] +""" + +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +from pydantic import BaseModel +import requests + + +import os +import asyncio +import subprocess +import yaml + + +class Pipeline: + class Valves(BaseModel): + LITELLM_CONFIG_DIR: str = "./litellm/config.yaml" + LITELLM_PROXY_PORT: int = 4001 + LITELLM_PROXY_HOST: str = "127.0.0.1" + litellm_config: dict = {} + + def __init__(self): + # You can also set the pipelines that are available in this pipeline. + # Set manifold to True if you want to use this pipeline as a manifold. + # Manifold pipelines can have multiple pipelines. + self.type = "manifold" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "litellm_subprocess_manifold" + + # Optionally, you can set the name of the manifold pipeline. + self.name = "LiteLLM: " + + # Initialize Valves + self.valves = self.Valves(**{"LITELLM_CONFIG_DIR": f"./litellm/config.yaml"}) + self.background_process = None + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + + # Check if the config file exists + if not os.path.exists(self.valves.LITELLM_CONFIG_DIR): + with open(self.valves.LITELLM_CONFIG_DIR, "w") as file: + yaml.dump( + { + "general_settings": {}, + "litellm_settings": {}, + "model_list": [], + "router_settings": {}, + }, + file, + ) + + print( + f"Config file not found. Created a default config file at {self.valves.LITELLM_CONFIG_DIR}" + ) + + with open(self.valves.LITELLM_CONFIG_DIR, "r") as file: + litellm_config = yaml.safe_load(file) + + self.valves.litellm_config = litellm_config + + asyncio.create_task(self.start_litellm_background()) + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + await self.shutdown_litellm_background() + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + + print(f"on_valves_updated:{__name__}") + + with open(self.valves.LITELLM_CONFIG_DIR, "r") as file: + litellm_config = yaml.safe_load(file) + + self.valves.litellm_config = litellm_config + + await self.shutdown_litellm_background() + await self.start_litellm_background() + pass + + async def run_background_process(self, command): + print("run_background_process") + + try: + # Log the command to be executed + print(f"Executing command: {command}") + + # Execute the command and create a subprocess + process = await asyncio.create_subprocess_exec( + *command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + self.background_process = process + print("Subprocess started successfully.") + + # Capture STDERR for debugging purposes + stderr_output = await process.stderr.read() + stderr_text = stderr_output.decode().strip() + if stderr_text: + print(f"Subprocess STDERR: {stderr_text}") + + # log.info output line by line + async for line in process.stdout: + print(line.decode().strip()) + + # Wait for the process to finish + returncode = await process.wait() + print(f"Subprocess exited with return code {returncode}") + except Exception as e: + print(f"Failed to start subprocess: {e}") + raise # Optionally re-raise the exception if you want it to propagate + + async def start_litellm_background(self): + print("start_litellm_background") + # Command to run in the background + command = [ + "litellm", + "--port", + str(self.valves.LITELLM_PROXY_PORT), + "--host", + self.valves.LITELLM_PROXY_HOST, + "--telemetry", + "False", + "--config", + self.valves.LITELLM_CONFIG_DIR, + ] + + await self.run_background_process(command) + + async def shutdown_litellm_background(self): + print("shutdown_litellm_background") + + if self.background_process: + self.background_process.terminate() + await self.background_process.wait() # Ensure the process has terminated + print("Subprocess terminated") + self.background_process = None + + def get_litellm_models(self): + if self.background_process: + try: + r = requests.get( + f"http://{self.valves.LITELLM_PROXY_HOST}:{self.valves.LITELLM_PROXY_PORT}/v1/models" + ) + models = r.json() + return [ + { + "id": model["id"], + "name": model["name"] if "name" in model else model["id"], + } + for model in models["data"] + ] + except Exception as e: + print(f"Error: {e}") + return [ + { + "id": "error", + "name": "Could not fetch models from LiteLLM, please update the URL in the valves.", + }, + ] + else: + return [] + + # Pipelines are the models that are available in the manifold. + # It can be a list or a function that returns a list. + def pipelines(self) -> List[dict]: + return self.get_litellm_models() + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + if "user" in body: + print("######################################") + print(f'# User: {body["user"]["name"]} ({body["user"]["id"]})') + print(f"# Message: {user_message}") + print("######################################") + + try: + r = requests.post( + url=f"http://{self.valves.LITELLM_PROXY_HOST}:{self.valves.LITELLM_PROXY_PORT}/v1/chat/completions", + json={**body, "model": model_id, "user": body["user"]["id"]}, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/llama_cpp_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/llama_cpp_pipeline.py new file mode 100644 index 0000000..51692da --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/llama_cpp_pipeline.py @@ -0,0 +1,61 @@ +""" +title: Llama C++ Pipeline +author: open-webui +date: 2024-05-30 +version: 1.0 +license: MIT +description: A pipeline for generating responses using the Llama C++ library. +requirements: llama-cpp-python +""" + +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage + + +class Pipeline: + def __init__(self): + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "llama_cpp_pipeline" + + self.name = "Llama C++ Pipeline" + self.llm = None + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + from llama_cpp import Llama + + self.llm = Llama( + model_path="./models/llama3.gguf", + # n_gpu_layers=-1, # Uncomment to use GPU acceleration + # seed=1337, # Uncomment to set a specific seed + # n_ctx=2048, # Uncomment to increase the context window + ) + + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + print(messages) + print(user_message) + print(body) + + response = self.llm.create_chat_completion_openai_v1( + messages=messages, + stream=body["stream"], + ) + + return response diff --git a/openwebui/pipelines/examples/pipelines/providers/mlx_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/mlx_manifold_pipeline.py new file mode 100644 index 0000000..2626090 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/mlx_manifold_pipeline.py @@ -0,0 +1,211 @@ +""" +title: MLX Manifold Pipeline +author: justinh-rahb +date: 2024-05-28 +version: 2.0 +license: MIT +description: A pipeline for generating text using Apple MLX Framework with dynamic model loading. +requirements: requests, mlx-lm, huggingface-hub, psutil +""" + +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +from pydantic import BaseModel +import requests +import subprocess +import logging +from huggingface_hub import login +import time +import psutil + +class Pipeline: + class Valves(BaseModel): + MLX_DEFAULT_MODEL: str = "mlx-community/Meta-Llama-3-8B-Instruct-8bit" + MLX_MODEL_FILTER: str = "mlx-community" + MLX_STOP: str = "<|start_header_id|>,<|end_header_id|>,<|eot_id|>" + MLX_CHAT_TEMPLATE: str | None = None + MLX_USE_DEFAULT_CHAT_TEMPLATE: bool | None = False + HUGGINGFACE_TOKEN: str | None = None + + def __init__(self): + # Pipeline identification + self.type = "manifold" + self.id = "mlx" + self.name = "MLX/" + + # Initialize valves and update them + self.valves = self.Valves() + self.update_valves() + + # Server configuration + self.host = "localhost" # Always use localhost for security + self.port = None # Port will be dynamically assigned + + # Model management + self.models = self.get_mlx_models() + self.current_model = None + self.server_process = None + + # Start the MLX server with the default model + self.start_mlx_server(self.valves.MLX_DEFAULT_MODEL) + + def update_valves(self): + """Update pipeline configuration based on valve settings.""" + if self.valves.HUGGINGFACE_TOKEN: + login(self.valves.HUGGINGFACE_TOKEN) + self.stop_sequence = self.valves.MLX_STOP.split(",") + + def get_mlx_models(self): + """Fetch available MLX models based on the specified pattern.""" + try: + cmd = [ + 'mlx_lm.manage', + '--scan', + '--pattern', self.valves.MLX_MODEL_FILTER, + ] + result = subprocess.run(cmd, capture_output=True, text=True) + lines = result.stdout.strip().split('\n') + + content_lines = [line for line in lines if line and not line.startswith('-')] + + models = [] + for line in content_lines[2:]: # Skip header lines + parts = line.split() + if len(parts) >= 2: + repo_id = parts[0] + models.append({ + "id": f"{repo_id.split('/')[-1].lower()}", + "name": repo_id + }) + if not models: + # Add default model if no models are found + models.append({ + "id": f"mlx.{self.valves.MLX_DEFAULT_MODEL.split('/')[-1].lower()}", + "name": self.valves.MLX_DEFAULT_MODEL + }) + return models + except Exception as e: + logging.error(f"Error fetching MLX models: {e}") + # Return default model on error + return [{ + "id": f"mlx.{self.valves.MLX_DEFAULT_MODEL.split('/')[-1].lower()}", + "name": self.valves.MLX_DEFAULT_MODEL + }] + + def pipelines(self) -> List[dict]: + """Return the list of available models as pipelines.""" + return self.models + + def start_mlx_server(self, model_name): + """Start the MLX server with the specified model.""" + model_id = f"mlx.{model_name.split('/')[-1].lower()}" + if self.current_model == model_id and self.server_process and self.server_process.poll() is None: + logging.info(f"MLX server already running with model {model_name}") + return + + self.stop_mlx_server() + + self.port = self.find_free_port() + + command = [ + "mlx_lm.server", + "--model", model_name, + "--port", str(self.port), + ] + + # Add chat template options if specified + if self.valves.MLX_CHAT_TEMPLATE: + command.extend(["--chat-template", self.valves.MLX_CHAT_TEMPLATE]) + elif self.valves.MLX_USE_DEFAULT_CHAT_TEMPLATE: + command.append("--use-default-chat-template") + + logging.info(f"Starting MLX server with command: {' '.join(command)}") + self.server_process = subprocess.Popen(command) + self.current_model = model_id + logging.info(f"Started MLX server for model {model_name} on port {self.port}") + time.sleep(5) # Give the server some time to start up + + def stop_mlx_server(self): + """Stop the currently running MLX server.""" + if self.server_process: + try: + process = psutil.Process(self.server_process.pid) + for proc in process.children(recursive=True): + proc.terminate() + process.terminate() + process.wait(timeout=10) # Wait for the process to terminate + except psutil.NoSuchProcess: + pass # Process already terminated + except psutil.TimeoutExpired: + logging.warning("Timeout while terminating MLX server process") + finally: + self.server_process = None + self.current_model = None + self.port = None + logging.info("Stopped MLX server") + + def find_free_port(self): + """Find and return a free port to use for the MLX server.""" + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.bind(("", 0)) + port = s.getsockname()[1] + s.close() + return port + + async def on_startup(self): + """Perform any necessary startup operations.""" + logging.info(f"on_startup:{__name__}") + + async def on_shutdown(self): + """Perform cleanup operations on shutdown.""" + self.stop_mlx_server() + + async def on_valves_updated(self): + """Handle updates to the pipeline configuration.""" + self.update_valves() + self.models = self.get_mlx_models() + self.start_mlx_server(self.valves.MLX_DEFAULT_MODEL) + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + """Process a request through the MLX pipeline.""" + logging.info(f"pipe:{__name__}") + + # Switch model if necessary + if model_id != self.current_model: + model_name = next((model['name'] for model in self.models if model['id'] == model_id), self.valves.MLX_DEFAULT_MODEL) + self.start_mlx_server(model_name) + + url = f"http://{self.host}:{self.port}/v1/chat/completions" + headers = {"Content-Type": "application/json"} + + # Prepare the payload for the MLX server + max_tokens = body.get("max_tokens", 4096) + temperature = body.get("temperature", 0.8) + repeat_penalty = body.get("repeat_penalty", 1.0) + + payload = { + "messages": messages, + "max_tokens": max_tokens, + "temperature": temperature, + "repetition_penalty": repeat_penalty, + "stop": self.stop_sequence, + "stream": body.get("stream", False), + } + + try: + # Send request to MLX server + r = requests.post( + url, headers=headers, json=payload, stream=body.get("stream", False) + ) + r.raise_for_status() + + # Return streamed response or full JSON response + if body.get("stream", False): + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/mlx_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/mlx_pipeline.py new file mode 100644 index 0000000..8365ab2 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/mlx_pipeline.py @@ -0,0 +1,115 @@ +""" +title: MLX Pipeline +author: justinh-rahb +date: 2024-05-27 +version: 1.2 +license: MIT +description: A pipeline for generating text using Apple MLX Framework. +requirements: requests, mlx-lm, huggingface-hub +environment_variables: MLX_HOST, MLX_PORT, MLX_SUBPROCESS +""" + +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +from pydantic import BaseModel +import requests +import os +import subprocess +import logging +from huggingface_hub import login + +class Pipeline: + class Valves(BaseModel): + MLX_MODEL: str = "mistralai/Mistral-7B-Instruct-v0.3" + MLX_STOP: str = "[INST]" + HUGGINGFACE_TOKEN: str = "" + + def __init__(self): + self.id = "mlx_pipeline" + self.name = "MLX Pipeline" + self.valves = self.Valves() + self.update_valves() + + self.host = os.getenv("MLX_HOST", "localhost") + self.port = os.getenv("MLX_PORT", "8080") + self.subprocess = os.getenv("MLX_SUBPROCESS", "true").lower() == "true" + + if self.subprocess: + self.start_mlx_server() + + def update_valves(self): + if self.valves.HUGGINGFACE_TOKEN: + login(self.valves.HUGGINGFACE_TOKEN) + self.stop_sequence = self.valves.MLX_STOP.split(",") + + def start_mlx_server(self): + if not os.getenv("MLX_PORT"): + self.port = self.find_free_port() + command = f"mlx_lm.server --model {self.valves.MLX_MODEL} --port {self.port}" + self.server_process = subprocess.Popen(command, shell=True) + logging.info(f"Started MLX server on port {self.port}") + + def find_free_port(self): + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.bind(("", 0)) + port = s.getsockname()[1] + s.close() + return port + + async def on_startup(self): + logging.info(f"on_startup:{__name__}") + + async def on_shutdown(self): + if self.subprocess and hasattr(self, "server_process"): + self.server_process.terminate() + logging.info(f"Terminated MLX server on port {self.port}") + + async def on_valves_updated(self): + self.update_valves() + if self.subprocess and hasattr(self, "server_process"): + self.server_process.terminate() + logging.info(f"Terminated MLX server on port {self.port}") + self.start_mlx_server() + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + logging.info(f"pipe:{__name__}") + + url = f"http://{self.host}:{self.port}/v1/chat/completions" + headers = {"Content-Type": "application/json"} + + max_tokens = body.get("max_tokens", 4096) + if not isinstance(max_tokens, int) or max_tokens < 0: + max_tokens = 4096 + + temperature = body.get("temperature", 0.8) + if not isinstance(temperature, (int, float)) or temperature < 0: + temperature = 0.8 + + repeat_penalty = body.get("repeat_penalty", 1.0) + if not isinstance(repeat_penalty, (int, float)) or repeat_penalty < 0: + repeat_penalty = 1.0 + + payload = { + "messages": messages, + "max_tokens": max_tokens, + "temperature": temperature, + "repetition_penalty": repeat_penalty, + "stop": self.stop_sequence, + "stream": body.get("stream", False), + } + + try: + r = requests.post( + url, headers=headers, json=payload, stream=body.get("stream", False) + ) + r.raise_for_status() + + if body.get("stream", False): + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" \ No newline at end of file diff --git a/openwebui/pipelines/examples/pipelines/providers/ollama_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/ollama_manifold_pipeline.py new file mode 100644 index 0000000..ec5c3b8 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/ollama_manifold_pipeline.py @@ -0,0 +1,99 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +import os + +from pydantic import BaseModel +import requests + + +class Pipeline: + + class Valves(BaseModel): + OLLAMA_BASE_URL: str + + def __init__(self): + # You can also set the pipelines that are available in this pipeline. + # Set manifold to True if you want to use this pipeline as a manifold. + # Manifold pipelines can have multiple pipelines. + self.type = "manifold" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "ollama_manifold" + + # Optionally, you can set the name of the manifold pipeline. + self.name = "Ollama: " + + self.valves = self.Valves( + **{ + "OLLAMA_BASE_URL": os.getenv("OLLAMA_BASE_URL", "http://localhost:11435"), + } + ) + self.pipelines = [] + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + self.pipelines = self.get_ollama_models() + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + print(f"on_valves_updated:{__name__}") + self.pipelines = self.get_ollama_models() + pass + + def get_ollama_models(self): + if self.valves.OLLAMA_BASE_URL: + try: + r = requests.get(f"{self.valves.OLLAMA_BASE_URL}/api/tags") + models = r.json() + return [ + {"id": model["model"], "name": model["name"]} + for model in models["models"] + ] + except Exception as e: + print(f"Error: {e}") + return [ + { + "id": "error", + "name": "Could not fetch models from Ollama, please update the URL in the valves.", + }, + ] + else: + return [] + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + + if "user" in body: + print("######################################") + print(f'# User: {body["user"]["name"]} ({body["user"]["id"]})') + print(f"# Message: {user_message}") + print("######################################") + + try: + r = requests.post( + url=f"{self.valves.OLLAMA_BASE_URL}/v1/chat/completions", + json={**body, "model": model_id}, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/ollama_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/ollama_pipeline.py new file mode 100644 index 0000000..d2560d6 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/ollama_pipeline.py @@ -0,0 +1,55 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +import requests + + +class Pipeline: + def __init__(self): + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "ollama_pipeline" + self.name = "Ollama Pipeline" + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + OLLAMA_BASE_URL = "http://localhost:11434" + MODEL = "llama3" + + if "user" in body: + print("######################################") + print(f'# User: {body["user"]["name"]} ({body["user"]["id"]})') + print(f"# Message: {user_message}") + print("######################################") + + try: + r = requests.post( + url=f"{OLLAMA_BASE_URL}/v1/chat/completions", + json={**body, "model": MODEL}, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/openai_dalle_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/openai_dalle_manifold_pipeline.py new file mode 100644 index 0000000..e35dbad --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/openai_dalle_manifold_pipeline.py @@ -0,0 +1,86 @@ +"""A manifold to integrate OpenAI's ImageGen models into Open-WebUI""" + +from typing import List, Union, Generator, Iterator + +from pydantic import BaseModel + +from openai import OpenAI + +class Pipeline: + """OpenAI ImageGen pipeline""" + + class Valves(BaseModel): + """Options to change from the WebUI""" + + OPENAI_API_BASE_URL: str = "https://api.openai.com/v1" + OPENAI_API_KEY: str = "" + IMAGE_SIZE: str = "1024x1024" + NUM_IMAGES: int = 1 + + def __init__(self): + self.type = "manifold" + self.name = "ImageGen: " + + self.valves = self.Valves() + self.client = OpenAI( + base_url=self.valves.OPENAI_API_BASE_URL, + api_key=self.valves.OPENAI_API_KEY, + ) + + self.pipelines = self.get_openai_assistants() + + async def on_startup(self) -> None: + """This function is called when the server is started.""" + print(f"on_startup:{__name__}") + + async def on_shutdown(self): + """This function is called when the server is stopped.""" + print(f"on_shutdown:{__name__}") + + async def on_valves_updated(self): + """This function is called when the valves are updated.""" + print(f"on_valves_updated:{__name__}") + self.client = OpenAI( + base_url=self.valves.OPENAI_API_BASE_URL, + api_key=self.valves.OPENAI_API_KEY, + ) + self.pipelines = self.get_openai_assistants() + + def get_openai_assistants(self) -> List[dict]: + """Get the available ImageGen models from OpenAI + + Returns: + List[dict]: The list of ImageGen models + """ + + if self.valves.OPENAI_API_KEY: + models = self.client.models.list() + return [ + { + "id": model.id, + "name": model.id, + } + for model in models + if "dall-e" in model.id + ] + + return [] + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + print(f"pipe:{__name__}") + + response = self.client.images.generate( + model=model_id, + prompt=user_message, + size=self.valves.IMAGE_SIZE, + n=self.valves.NUM_IMAGES, + ) + + message = "" + for image in response.data: + if image.url: + message += "![image](" + image.url + ")\n" + + yield message diff --git a/openwebui/pipelines/examples/pipelines/providers/openai_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/openai_manifold_pipeline.py new file mode 100644 index 0000000..84bc5cd --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/openai_manifold_pipeline.py @@ -0,0 +1,130 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +from pydantic import BaseModel + +import os +import requests + + +class Pipeline: + class Valves(BaseModel): + OPENAI_API_BASE_URL: str = "https://api.openai.com/v1" + OPENAI_API_KEY: str = "" + pass + + def __init__(self): + self.type = "manifold" + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "openai_pipeline" + self.name = "OpenAI: " + + self.valves = self.Valves( + **{ + "OPENAI_API_KEY": os.getenv( + "OPENAI_API_KEY", "your-openai-api-key-here" + ) + } + ) + + self.pipelines = self.get_openai_models() + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + print(f"on_valves_updated:{__name__}") + self.pipelines = self.get_openai_models() + pass + + def get_openai_models(self): + if self.valves.OPENAI_API_KEY: + try: + headers = {} + headers["Authorization"] = f"Bearer {self.valves.OPENAI_API_KEY}" + headers["Content-Type"] = "application/json" + + r = requests.get( + f"{self.valves.OPENAI_API_BASE_URL}/models", headers=headers + ) + + allowed_models = [ + "gpt", + "o1", + "o3", + "o4", + ] + + models = r.json() + return [ + { + "id": model["id"], + "name": model["name"] if "name" in model else model["id"], + } + for model in models["data"] + if any(substring in model["id"] for substring in allowed_models) + ] + + except Exception as e: + + print(f"Error: {e}") + return [ + { + "id": "error", + "name": "Could not fetch models from OpenAI, please update the API Key in the valves.", + }, + ] + else: + return [] + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + print(messages) + print(user_message) + + headers = {} + headers["Authorization"] = f"Bearer {self.valves.OPENAI_API_KEY}" + headers["Content-Type"] = "application/json" + + payload = {**body, "model": model_id} + + if "user" in payload: + del payload["user"] + if "chat_id" in payload: + del payload["chat_id"] + if "title" in payload: + del payload["title"] + + print(payload) + + try: + r = requests.post( + url=f"{self.valves.OPENAI_API_BASE_URL}/chat/completions", + json=payload, + headers=headers, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/openai_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/openai_pipeline.py new file mode 100644 index 0000000..a7f97e5 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/openai_pipeline.py @@ -0,0 +1,81 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +from pydantic import BaseModel +import os +import requests + + +class Pipeline: + class Valves(BaseModel): + OPENAI_API_KEY: str = "" + pass + + def __init__(self): + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "openai_pipeline" + self.name = "OpenAI Pipeline" + self.valves = self.Valves( + **{ + "OPENAI_API_KEY": os.getenv( + "OPENAI_API_KEY", "your-openai-api-key-here" + ) + } + ) + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + print(messages) + print(user_message) + + OPENAI_API_KEY = self.valves.OPENAI_API_KEY + MODEL = "gpt-3.5-turbo" + + headers = {} + headers["Authorization"] = f"Bearer {OPENAI_API_KEY}" + headers["Content-Type"] = "application/json" + + payload = {**body, "model": MODEL} + + if "user" in payload: + del payload["user"] + if "chat_id" in payload: + del payload["chat_id"] + if "title" in payload: + del payload["title"] + + print(payload) + + try: + r = requests.post( + url="https://api.openai.com/v1/chat/completions", + json=payload, + headers=headers, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" diff --git a/openwebui/pipelines/examples/pipelines/providers/perplexity_manifold_pipeline.py b/openwebui/pipelines/examples/pipelines/providers/perplexity_manifold_pipeline.py new file mode 100644 index 0000000..c985b65 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/providers/perplexity_manifold_pipeline.py @@ -0,0 +1,167 @@ +from typing import List, Union, Generator, Iterator +from pydantic import BaseModel +import os +import requests + +from utils.pipelines.main import pop_system_message + + +class Pipeline: + class Valves(BaseModel): + PERPLEXITY_API_BASE_URL: str = "https://api.perplexity.ai" + PERPLEXITY_API_KEY: str = "" + pass + + def __init__(self): + self.type = "manifold" + self.name = "Perplexity: " + + self.valves = self.Valves( + **{ + "PERPLEXITY_API_KEY": os.getenv( + "PERPLEXITY_API_KEY", "your-perplexity-api-key-here" + ) + } + ) + + # Debugging: print the API key to ensure it's loaded + print(f"Loaded API Key: {self.valves.PERPLEXITY_API_KEY}") + + # List of models + self.pipelines = [ + { + "id": "sonar-pro", + "name": "Sonar Pro" + }, + { + "id": "sonar", + "name": "Sonar" + }, + { + "id": "sonar-deep-research", + "name": "Sonar Deep Research" + }, + { + "id": "sonar-reasoning-pro", + "name": "Sonar Reasoning Pro" + }, + { + "id": "sonar-reasoning", "name": "Sonar Reasoning" + }, + { + "id": "r1-1776", "name": "R1-1776" + } + ] + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + print(f"on_valves_updated:{__name__}") + # No models to fetch, static setup + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + system_message, messages = pop_system_message(messages) + system_prompt = "You are a helpful assistant." + if system_message is not None: + system_prompt = system_message["content"] + + print(system_prompt) + print(messages) + print(user_message) + + headers = { + "Authorization": f"Bearer {self.valves.PERPLEXITY_API_KEY}", + "Content-Type": "application/json", + "accept": "application/json" + } + + payload = { + "model": model_id, + "messages": [ + {"role": "system", "content": system_prompt}, + *messages + ], + "stream": body.get("stream", True), + "return_citations": True, + "return_images": True + } + + if "user" in payload: + del payload["user"] + if "chat_id" in payload: + del payload["chat_id"] + if "title" in payload: + del payload["title"] + + print(payload) + + try: + r = requests.post( + url=f"{self.valves.PERPLEXITY_API_BASE_URL}/chat/completions", + json=payload, + headers=headers, + stream=True, + ) + + r.raise_for_status() + + if body.get("stream", False): + return r.iter_lines() + else: + response = r.json() + formatted_response = { + "id": response["id"], + "model": response["model"], + "created": response["created"], + "usage": response["usage"], + "object": response["object"], + "choices": [ + { + "index": choice["index"], + "finish_reason": choice["finish_reason"], + "message": { + "role": choice["message"]["role"], + "content": choice["message"]["content"] + }, + "delta": {"role": "assistant", "content": ""} + } for choice in response["choices"] + ] + } + return formatted_response + except Exception as e: + return f"Error: {e}" + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Perplexity API Client") + parser.add_argument("--api-key", type=str, required=True, + help="API key for Perplexity") + parser.add_argument("--prompt", type=str, required=True, + help="Prompt to send to the Perplexity API") + + args = parser.parse_args() + + pipeline = Pipeline() + pipeline.valves.PERPLEXITY_API_KEY = args.api_key + response = pipeline.pipe( + user_message=args.prompt, model_id="llama-3-sonar-large-32k-online", messages=[], body={"stream": False}) + + print("Response:", response) diff --git a/openwebui/pipelines/examples/pipelines/rag/haystack_pipeline.py b/openwebui/pipelines/examples/pipelines/rag/haystack_pipeline.py new file mode 100644 index 0000000..41eb305 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/rag/haystack_pipeline.py @@ -0,0 +1,108 @@ +""" +title: Haystack Pipeline +author: open-webui +date: 2024-05-30 +version: 1.0 +license: MIT +description: A pipeline for retrieving relevant information from a knowledge base using the Haystack library. +requirements: haystack-ai, datasets>=2.6.1, sentence-transformers>=2.2.0 +""" + +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +import os +import asyncio + + +class Pipeline: + def __init__(self): + self.basic_rag_pipeline = None + + async def on_startup(self): + os.environ["OPENAI_API_KEY"] = "your_openai_api_key_here" + + from haystack.components.embedders import SentenceTransformersDocumentEmbedder + from haystack.components.embedders import SentenceTransformersTextEmbedder + from haystack.components.retrievers.in_memory import InMemoryEmbeddingRetriever + from haystack.components.builders import PromptBuilder + from haystack.components.generators import OpenAIGenerator + + from haystack.document_stores.in_memory import InMemoryDocumentStore + + from datasets import load_dataset + from haystack import Document + from haystack import Pipeline + + document_store = InMemoryDocumentStore() + + dataset = load_dataset("bilgeyucel/seven-wonders", split="train") + docs = [Document(content=doc["content"], meta=doc["meta"]) for doc in dataset] + + doc_embedder = SentenceTransformersDocumentEmbedder( + model="sentence-transformers/all-MiniLM-L6-v2" + ) + doc_embedder.warm_up() + + docs_with_embeddings = doc_embedder.run(docs) + document_store.write_documents(docs_with_embeddings["documents"]) + + text_embedder = SentenceTransformersTextEmbedder( + model="sentence-transformers/all-MiniLM-L6-v2" + ) + + retriever = InMemoryEmbeddingRetriever(document_store) + + template = """ + Given the following information, answer the question. + + Context: + {% for document in documents %} + {{ document.content }} + {% endfor %} + + Question: {{question}} + Answer: + """ + + prompt_builder = PromptBuilder(template=template) + + generator = OpenAIGenerator(model="gpt-3.5-turbo") + + self.basic_rag_pipeline = Pipeline() + # Add components to your pipeline + self.basic_rag_pipeline.add_component("text_embedder", text_embedder) + self.basic_rag_pipeline.add_component("retriever", retriever) + self.basic_rag_pipeline.add_component("prompt_builder", prompt_builder) + self.basic_rag_pipeline.add_component("llm", generator) + + # Now, connect the components to each other + self.basic_rag_pipeline.connect( + "text_embedder.embedding", "retriever.query_embedding" + ) + self.basic_rag_pipeline.connect("retriever", "prompt_builder.documents") + self.basic_rag_pipeline.connect("prompt_builder", "llm") + + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom RAG pipeline. + # Typically, you would retrieve relevant information from your knowledge base and synthesize it to generate a response. + + print(messages) + print(user_message) + + question = user_message + response = self.basic_rag_pipeline.run( + { + "text_embedder": {"text": question}, + "prompt_builder": {"question": question}, + } + ) + + return response["llm"]["replies"][0] diff --git a/openwebui/pipelines/examples/pipelines/rag/llamaindex_ollama_github_pipeline.py b/openwebui/pipelines/examples/pipelines/rag/llamaindex_ollama_github_pipeline.py new file mode 100644 index 0000000..41e4af8 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/rag/llamaindex_ollama_github_pipeline.py @@ -0,0 +1,94 @@ +""" +title: Llama Index Ollama Github Pipeline +author: open-webui +date: 2024-05-30 +version: 1.0 +license: MIT +description: A pipeline for retrieving relevant information from a knowledge base using the Llama Index library with Ollama embeddings from a GitHub repository. +requirements: llama-index, llama-index-llms-ollama, llama-index-embeddings-ollama, llama-index-readers-github +""" + +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +import os +import asyncio + + +class Pipeline: + def __init__(self): + self.documents = None + self.index = None + + async def on_startup(self): + from llama_index.embeddings.ollama import OllamaEmbedding + from llama_index.llms.ollama import Ollama + from llama_index.core import VectorStoreIndex, Settings + from llama_index.readers.github import GithubRepositoryReader, GithubClient + + Settings.embed_model = OllamaEmbedding( + model_name="nomic-embed-text", + base_url="http://localhost:11434", + ) + Settings.llm = Ollama(model="llama3") + + global index, documents + + github_token = os.environ.get("GITHUB_TOKEN") + owner = "open-webui" + repo = "plugin-server" + branch = "main" + + github_client = GithubClient(github_token=github_token, verbose=True) + + reader = GithubRepositoryReader( + github_client=github_client, + owner=owner, + repo=repo, + use_parser=False, + verbose=False, + filter_file_extensions=( + [ + ".png", + ".jpg", + ".jpeg", + ".gif", + ".svg", + ".ico", + "json", + ".ipynb", + ], + GithubRepositoryReader.FilterType.EXCLUDE, + ), + ) + + loop = asyncio.new_event_loop() + + reader._loop = loop + + try: + # Load data from the branch + self.documents = await asyncio.to_thread(reader.load_data, branch=branch) + self.index = VectorStoreIndex.from_documents(self.documents) + finally: + loop.close() + + print(self.documents) + print(self.index) + + async def on_shutdown(self): + # This function is called when the server is stopped. + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom RAG pipeline. + # Typically, you would retrieve relevant information from your knowledge base and synthesize it to generate a response. + + print(messages) + print(user_message) + + query_engine = self.index.as_query_engine(streaming=True) + response = query_engine.query(user_message) + + return response.response_gen diff --git a/openwebui/pipelines/examples/pipelines/rag/llamaindex_ollama_pipeline.py b/openwebui/pipelines/examples/pipelines/rag/llamaindex_ollama_pipeline.py new file mode 100644 index 0000000..0b7765c --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/rag/llamaindex_ollama_pipeline.py @@ -0,0 +1,74 @@ +""" +title: Llama Index Ollama Pipeline +author: open-webui +date: 2024-05-30 +version: 1.0 +license: MIT +description: A pipeline for retrieving relevant information from a knowledge base using the Llama Index library with Ollama embeddings. +requirements: llama-index, llama-index-llms-ollama, llama-index-embeddings-ollama +""" + +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +import os + +from pydantic import BaseModel + + +class Pipeline: + + class Valves(BaseModel): + LLAMAINDEX_OLLAMA_BASE_URL: str + LLAMAINDEX_MODEL_NAME: str + LLAMAINDEX_EMBEDDING_MODEL_NAME: str + + def __init__(self): + self.documents = None + self.index = None + + self.valves = self.Valves( + **{ + "LLAMAINDEX_OLLAMA_BASE_URL": os.getenv("LLAMAINDEX_OLLAMA_BASE_URL", "http://localhost:11434"), + "LLAMAINDEX_MODEL_NAME": os.getenv("LLAMAINDEX_MODEL_NAME", "llama3"), + "LLAMAINDEX_EMBEDDING_MODEL_NAME": os.getenv("LLAMAINDEX_EMBEDDING_MODEL_NAME", "nomic-embed-text"), + } + ) + + async def on_startup(self): + from llama_index.embeddings.ollama import OllamaEmbedding + from llama_index.llms.ollama import Ollama + from llama_index.core import Settings, VectorStoreIndex, SimpleDirectoryReader + + Settings.embed_model = OllamaEmbedding( + model_name=self.valves.LLAMAINDEX_EMBEDDING_MODEL_NAME, + base_url=self.valves.LLAMAINDEX_OLLAMA_BASE_URL, + ) + Settings.llm = Ollama( + model=self.valves.LLAMAINDEX_MODEL_NAME, + base_url=self.valves.LLAMAINDEX_OLLAMA_BASE_URL, + ) + + # This function is called when the server is started. + global documents, index + + self.documents = SimpleDirectoryReader("/app/backend/data").load_data() + self.index = VectorStoreIndex.from_documents(self.documents) + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom RAG pipeline. + # Typically, you would retrieve relevant information from your knowledge base and synthesize it to generate a response. + + print(messages) + print(user_message) + + query_engine = self.index.as_query_engine(streaming=True) + response = query_engine.query(user_message) + + return response.response_gen diff --git a/openwebui/pipelines/examples/pipelines/rag/llamaindex_pipeline.py b/openwebui/pipelines/examples/pipelines/rag/llamaindex_pipeline.py new file mode 100644 index 0000000..2606361 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/rag/llamaindex_pipeline.py @@ -0,0 +1,49 @@ +""" +title: Llama Index Pipeline +author: open-webui +date: 2024-05-30 +version: 1.0 +license: MIT +description: A pipeline for retrieving relevant information from a knowledge base using the Llama Index library. +requirements: llama-index +""" + +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage + + +class Pipeline: + def __init__(self): + self.documents = None + self.index = None + + async def on_startup(self): + import os + + # Set the OpenAI API key + os.environ["OPENAI_API_KEY"] = "your-api-key-here" + + from llama_index.core import VectorStoreIndex, SimpleDirectoryReader + + self.documents = SimpleDirectoryReader("./data").load_data() + self.index = VectorStoreIndex.from_documents(self.documents) + # This function is called when the server is started. + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom RAG pipeline. + # Typically, you would retrieve relevant information from your knowledge base and synthesize it to generate a response. + + print(messages) + print(user_message) + + query_engine = self.index.as_query_engine(streaming=True) + response = query_engine.query(user_message) + + return response.response_gen diff --git a/openwebui/pipelines/examples/pipelines/rag/r2r_pipeline.py b/openwebui/pipelines/examples/pipelines/rag/r2r_pipeline.py new file mode 100644 index 0000000..1dbcc15 --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/rag/r2r_pipeline.py @@ -0,0 +1,45 @@ +""" +title: R2R Pipeline +author: Nolan Tremelling +date: 2025-03-21 +version: 1.0 +license: MIT +description: A pipeline for retrieving relevant information from a knowledge base using R2R. +requirements: r2r +""" + +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +import os +import asyncio + + +class Pipeline: + def __init__(self): + self.r2r_client = None + + async def on_startup(self): + from r2r import R2RClient + + # Connect to either SciPhi cloud or your self hosted R2R server + self.r2r_client = R2RClient(os.getenv("R2R_SERVER_URL", "https://api.sciphi.ai")) + self.r2r_client.set_api_key(os.getenv("R2R_API_KEY", "")) + + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + self.r2r_client = None + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + + print(messages) + print(user_message) + + response = self.r2r_client.retrieval.rag( + query=user_message, + ) + + return response.results.completion diff --git a/openwebui/pipelines/examples/pipelines/rag/text_to_sql_pipeline.py b/openwebui/pipelines/examples/pipelines/rag/text_to_sql_pipeline.py new file mode 100644 index 0000000..31471ad --- /dev/null +++ b/openwebui/pipelines/examples/pipelines/rag/text_to_sql_pipeline.py @@ -0,0 +1,111 @@ +""" +title: Llama Index DB Pipeline +author: 0xThresh +date: 2024-08-11 +version: 1.1 +license: MIT +description: A pipeline for using text-to-SQL for retrieving relevant information from a database using the Llama Index library. +requirements: llama_index, sqlalchemy, psycopg2-binary +""" + +from typing import List, Union, Generator, Iterator +import os +from pydantic import BaseModel +from llama_index.llms.ollama import Ollama +from llama_index.core.query_engine import NLSQLTableQueryEngine +from llama_index.core import SQLDatabase, PromptTemplate +from sqlalchemy import create_engine + + +class Pipeline: + class Valves(BaseModel): + DB_HOST: str + DB_PORT: str + DB_USER: str + DB_PASSWORD: str + DB_DATABASE: str + DB_TABLE: str + OLLAMA_HOST: str + TEXT_TO_SQL_MODEL: str + + + # Update valves/ environment variables based on your selected database + def __init__(self): + self.name = "Database RAG Pipeline" + self.engine = None + self.nlsql_response = "" + + # Initialize + self.valves = self.Valves( + **{ + "pipelines": ["*"], # Connect to all pipelines + "DB_HOST": os.getenv("DB_HOST", "http://localhost"), # Database hostname + "DB_PORT": os.getenv("DB_PORT", 5432), # Database port + "DB_USER": os.getenv("DB_USER", "postgres"), # User to connect to the database with + "DB_PASSWORD": os.getenv("DB_PASSWORD", "password"), # Password to connect to the database with + "DB_DATABASE": os.getenv("DB_DATABASE", "postgres"), # Database to select on the DB instance + "DB_TABLE": os.getenv("DB_TABLE", "table_name"), # Table(s) to run queries against + "OLLAMA_HOST": os.getenv("OLLAMA_HOST", "http://host.docker.internal:11434"), # Make sure to update with the URL of your Ollama host, such as http://localhost:11434 or remote server address + "TEXT_TO_SQL_MODEL": os.getenv("TEXT_TO_SQL_MODEL", "llama3.1:latest") # Model to use for text-to-SQL generation + } + ) + + def init_db_connection(self): + # Update your DB connection string based on selected DB engine - current connection string is for Postgres + self.engine = create_engine(f"postgresql+psycopg2://{self.valves.DB_USER}:{self.valves.DB_PASSWORD}@{self.valves.DB_HOST}:{self.valves.DB_PORT}/{self.valves.DB_DATABASE}") + return self.engine + + async def on_startup(self): + # This function is called when the server is started. + self.init_db_connection() + + async def on_shutdown(self): + # This function is called when the server is stopped. + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # Debug logging is required to see what SQL query is generated by the LlamaIndex library; enable on Pipelines server if needed + + # Create database reader for Postgres + sql_database = SQLDatabase(self.engine, include_tables=[self.valves.DB_TABLE]) + + # Set up LLM connection; uses phi3 model with 128k context limit since some queries have returned 20k+ tokens + llm = Ollama(model=self.valves.TEXT_TO_SQL_MODEL, base_url=self.valves.OLLAMA_HOST, request_timeout=180.0, context_window=30000) + + # Set up the custom prompt used when generating SQL queries from text + text_to_sql_prompt = """ + Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. + You can order the results by a relevant column to return the most interesting examples in the database. + Unless the user specifies in the question a specific number of examples to obtain, query for at most 5 results using the LIMIT clause as per Postgres. You can order the results to return the most informative data in the database. + Never query for all the columns from a specific table, only ask for a few relevant columns given the question. + You should use DISTINCT statements and avoid returning duplicates wherever possible. + Pay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Pay attention to which column is in which table. Also, qualify column names with the table name when needed. You are required to use the following format, each taking one line: + + Question: Question here + SQLQuery: SQL Query to run + SQLResult: Result of the SQLQuery + Answer: Final answer here + + Only use tables listed below. + {schema} + + Question: {query_str} + SQLQuery: + """ + + text_to_sql_template = PromptTemplate(text_to_sql_prompt) + + query_engine = NLSQLTableQueryEngine( + sql_database=sql_database, + tables=[self.valves.DB_TABLE], + llm=llm, + embed_model="local", + text_to_sql_prompt=text_to_sql_template, + streaming=True + ) + + response = query_engine.query(user_message) + + return response.response_gen diff --git a/openwebui/pipelines/examples/scaffolds/example_pipeline_scaffold.py b/openwebui/pipelines/examples/scaffolds/example_pipeline_scaffold.py new file mode 100644 index 0000000..cb0ec11 --- /dev/null +++ b/openwebui/pipelines/examples/scaffolds/example_pipeline_scaffold.py @@ -0,0 +1,67 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +from pydantic import BaseModel + + +class Pipeline: + class Valves(BaseModel): + pass + + def __init__(self): + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "pipeline_example" + + # The name of the pipeline. + self.name = "Pipeline Example" + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + pass + + async def inlet(self, body: dict, user: dict) -> dict: + # This function is called before the OpenAI API request is made. You can modify the form data before it is sent to the OpenAI API. + print(f"inlet:{__name__}") + + print(body) + print(user) + + return body + + async def outlet(self, body: dict, user: dict) -> dict: + # This function is called after the OpenAI API response is completed. You can modify the messages after they are received from the OpenAI API. + print(f"outlet:{__name__}") + + print(body) + print(user) + + return body + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + # If you'd like to check for title generation, you can add the following check + if body.get("title", False): + print("Title Generation Request") + + print(messages) + print(user_message) + print(body) + + return f"{__name__} response to: {user_message}" diff --git a/openwebui/pipelines/examples/scaffolds/filter_pipeline_scaffold.py b/openwebui/pipelines/examples/scaffolds/filter_pipeline_scaffold.py new file mode 100644 index 0000000..cc08434 --- /dev/null +++ b/openwebui/pipelines/examples/scaffolds/filter_pipeline_scaffold.py @@ -0,0 +1,68 @@ +""" +title: Filter Pipeline +author: open-webui +date: 2024-05-30 +version: 1.1 +license: MIT +description: Example of a filter pipeline that can be used to edit the form data before it is sent to the OpenAI API. +requirements: requests +""" + +from typing import List, Optional +from pydantic import BaseModel +from schemas import OpenAIChatMessage + + +class Pipeline: + class Valves(BaseModel): + # List target pipeline ids (models) that this filter will be connected to. + # If you want to connect this filter to all pipelines, you can set pipelines to ["*"] + pipelines: List[str] = [] + + # Assign a priority level to the filter pipeline. + # The priority level determines the order in which the filter pipelines are executed. + # The lower the number, the higher the priority. + priority: int = 0 + + # Add your custom parameters here + pass + + def __init__(self): + # Pipeline filters are only compatible with Open WebUI + # You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API. + self.type = "filter" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "filter_pipeline" + + self.name = "Filter" + + self.valves = self.Valves(**{"pipelines": ["llama3:latest"]}) + + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def inlet(self, body: dict, user: Optional[dict] = None) -> dict: + # This filter is applied to the form data before it is sent to the OpenAI API. + print(f"inlet:{__name__}") + + # If you'd like to check for title generation, you can add the following check + if body.get("title", False): + print("Title Generation Request") + + print(body) + print(user) + + return body diff --git a/openwebui/pipelines/examples/scaffolds/function_calling_scaffold.py b/openwebui/pipelines/examples/scaffolds/function_calling_scaffold.py new file mode 100644 index 0000000..63940e6 --- /dev/null +++ b/openwebui/pipelines/examples/scaffolds/function_calling_scaffold.py @@ -0,0 +1,33 @@ +from blueprints.function_calling_blueprint import Pipeline as FunctionCallingBlueprint + + +class Pipeline(FunctionCallingBlueprint): + class Valves(FunctionCallingBlueprint.Valves): + # Add your custom valves here + pass + + class Tools: + def __init__(self, pipeline) -> None: + self.pipeline = pipeline + + # Add your custom tools using pure Python code here, make sure to add type hints + # Use Sphinx-style docstrings to document your tools, they will be used for generating tools specifications + # Please refer to function_calling_filter_pipeline.py for an example + pass + + def __init__(self): + super().__init__() + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "my_tools_pipeline" + self.name = "My Tools Pipeline" + self.valves = self.Valves( + **{ + **self.valves.model_dump(), + "pipelines": ["*"], # Connect to all pipelines + }, + ) + self.tools = self.Tools(self) diff --git a/openwebui/pipelines/examples/scaffolds/manifold_pipeline_scaffold.py b/openwebui/pipelines/examples/scaffolds/manifold_pipeline_scaffold.py new file mode 100644 index 0000000..eaff91e --- /dev/null +++ b/openwebui/pipelines/examples/scaffolds/manifold_pipeline_scaffold.py @@ -0,0 +1,59 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage + + +class Pipeline: + def __init__(self): + # You can also set the pipelines that are available in this pipeline. + # Set manifold to True if you want to use this pipeline as a manifold. + # Manifold pipelines can have multiple pipelines. + self.type = "manifold" + + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "manifold_pipeline" + + # Optionally, you can set the name of the manifold pipeline. + self.name = "Manifold: " + + # Define pipelines that are available in this manifold pipeline. + # This is a list of dictionaries where each dictionary has an id and name. + self.pipelines = [ + { + "id": "pipeline-1", # This will turn into `manifold_pipeline.pipeline-1` + "name": "Pipeline 1", # This will turn into `Manifold: Pipeline 1` + }, + { + "id": "pipeline-2", + "name": "Pipeline 2", + }, + ] + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + # If you'd like to check for title generation, you can add the following check + if body.get("title", False): + print("Title Generation Request") + + print(messages) + print(user_message) + print(body) + + return f"{model_id} response to: {user_message}" diff --git a/openwebui/pipelines/main.py b/openwebui/pipelines/main.py new file mode 100644 index 0000000..e277d3a --- /dev/null +++ b/openwebui/pipelines/main.py @@ -0,0 +1,789 @@ +from fastapi import FastAPI, Request, Depends, status, HTTPException, UploadFile, File +from fastapi.middleware.cors import CORSMiddleware +from fastapi.concurrency import run_in_threadpool + + +from starlette.responses import StreamingResponse, Response +from pydantic import BaseModel, ConfigDict +from typing import List, Union, Generator, Iterator + + +from utils.pipelines.auth import bearer_security, get_current_user +from utils.pipelines.main import get_last_user_message, stream_message_template +from utils.pipelines.misc import convert_to_raw_url + +from contextlib import asynccontextmanager +from concurrent.futures import ThreadPoolExecutor +from schemas import FilterForm, OpenAIChatCompletionForm +from urllib.parse import urlparse + +import shutil +import aiohttp +import os +import importlib.util +import logging +import time +import json +import uuid +import sys +import subprocess + + +from config import API_KEY, PIPELINES_DIR, LOG_LEVELS + +if not os.path.exists(PIPELINES_DIR): + os.makedirs(PIPELINES_DIR) + + +PIPELINES = {} +PIPELINE_MODULES = {} +PIPELINE_NAMES = {} + +# Add GLOBAL_LOG_LEVEL for Pipeplines +log_level = os.getenv("GLOBAL_LOG_LEVEL", "INFO").upper() +logging.basicConfig(level=LOG_LEVELS[log_level]) + + +def get_all_pipelines(): + pipelines = {} + for pipeline_id in PIPELINE_MODULES.keys(): + pipeline = PIPELINE_MODULES[pipeline_id] + + if hasattr(pipeline, "type"): + if pipeline.type == "manifold": + manifold_pipelines = [] + + # Check if pipelines is a function or a list + if callable(pipeline.pipelines): + manifold_pipelines = pipeline.pipelines() + else: + manifold_pipelines = pipeline.pipelines + + for p in manifold_pipelines: + manifold_pipeline_id = f'{pipeline_id}.{p["id"]}' + + manifold_pipeline_name = p["name"] + if hasattr(pipeline, "name"): + manifold_pipeline_name = ( + f"{pipeline.name}{manifold_pipeline_name}" + ) + + pipelines[manifold_pipeline_id] = { + "module": pipeline_id, + "type": pipeline.type if hasattr(pipeline, "type") else "pipe", + "id": manifold_pipeline_id, + "name": manifold_pipeline_name, + "valves": ( + pipeline.valves if hasattr(pipeline, "valves") else None + ), + } + if pipeline.type == "filter": + pipelines[pipeline_id] = { + "module": pipeline_id, + "type": (pipeline.type if hasattr(pipeline, "type") else "pipe"), + "id": pipeline_id, + "name": ( + pipeline.name if hasattr(pipeline, "name") else pipeline_id + ), + "pipelines": ( + pipeline.valves.pipelines + if hasattr(pipeline, "valves") + and hasattr(pipeline.valves, "pipelines") + else [] + ), + "priority": ( + pipeline.valves.priority + if hasattr(pipeline, "valves") + and hasattr(pipeline.valves, "priority") + else 0 + ), + "valves": pipeline.valves if hasattr(pipeline, "valves") else None, + } + else: + pipelines[pipeline_id] = { + "module": pipeline_id, + "type": (pipeline.type if hasattr(pipeline, "type") else "pipe"), + "id": pipeline_id, + "name": (pipeline.name if hasattr(pipeline, "name") else pipeline_id), + "valves": pipeline.valves if hasattr(pipeline, "valves") else None, + } + + return pipelines + + +def parse_frontmatter(content): + frontmatter = {} + for line in content.split("\n"): + if ":" in line: + key, value = line.split(":", 1) + frontmatter[key.strip().lower()] = value.strip() + return frontmatter + + +def install_frontmatter_requirements(requirements): + if requirements: + req_list = [req.strip() for req in requirements.split(",")] + for req in req_list: + print(f"Installing requirement: {req}") + subprocess.check_call([sys.executable, "-m", "pip", "install", req]) + else: + print("No requirements found in frontmatter.") + + +async def load_module_from_path(module_name, module_path): + + try: + # Read the module content + with open(module_path, "r") as file: + content = file.read() + + # Parse frontmatter + frontmatter = {} + if content.startswith('"""'): + end = content.find('"""', 3) + if end != -1: + frontmatter_content = content[3:end] + frontmatter = parse_frontmatter(frontmatter_content) + + # Install requirements if specified + if "requirements" in frontmatter: + install_frontmatter_requirements(frontmatter["requirements"]) + + # Load the module + spec = importlib.util.spec_from_file_location(module_name, module_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + print(f"Loaded module: {module.__name__}") + if hasattr(module, "Pipeline"): + return module.Pipeline() + else: + raise Exception("No Pipeline class found") + except Exception as e: + print(f"Error loading module: {module_name}") + + # Move the file to the error folder + failed_pipelines_folder = os.path.join(PIPELINES_DIR, "failed") + if not os.path.exists(failed_pipelines_folder): + os.makedirs(failed_pipelines_folder) + + failed_file_path = os.path.join(failed_pipelines_folder, f"{module_name}.py") + os.rename(module_path, failed_file_path) + print(e) + return None + + +async def load_modules_from_directory(directory): + global PIPELINE_MODULES + global PIPELINE_NAMES + + for filename in os.listdir(directory): + if filename.endswith(".py"): + module_name = filename[:-3] # Remove the .py extension + module_path = os.path.join(directory, filename) + + # Create subfolder matching the filename without the .py extension + subfolder_path = os.path.join(directory, module_name) + if not os.path.exists(subfolder_path): + os.makedirs(subfolder_path) + logging.info(f"Created subfolder: {subfolder_path}") + + # Create a valves.json file if it doesn't exist + valves_json_path = os.path.join(subfolder_path, "valves.json") + if not os.path.exists(valves_json_path): + with open(valves_json_path, "w") as f: + json.dump({}, f) + logging.info(f"Created valves.json in: {subfolder_path}") + + pipeline = await load_module_from_path(module_name, module_path) + if pipeline: + # Overwrite pipeline.valves with values from valves.json + if os.path.exists(valves_json_path): + with open(valves_json_path, "r") as f: + valves_json = json.load(f) + if hasattr(pipeline, "valves"): + ValvesModel = pipeline.valves.__class__ + # Create a ValvesModel instance using default values and overwrite with valves_json + combined_valves = { + **pipeline.valves.model_dump(), + **valves_json, + } + valves = ValvesModel(**combined_valves) + pipeline.valves = valves + + logging.info(f"Updated valves for module: {module_name}") + + pipeline_id = pipeline.id if hasattr(pipeline, "id") else module_name + PIPELINE_MODULES[pipeline_id] = pipeline + PIPELINE_NAMES[pipeline_id] = module_name + logging.info(f"Loaded module: {module_name}") + else: + logging.warning(f"No Pipeline class found in {module_name}") + + global PIPELINES + PIPELINES = get_all_pipelines() + + +async def on_startup(): + await load_modules_from_directory(PIPELINES_DIR) + + for module in PIPELINE_MODULES.values(): + if hasattr(module, "on_startup"): + await module.on_startup() + + +async def on_shutdown(): + for module in PIPELINE_MODULES.values(): + if hasattr(module, "on_shutdown"): + await module.on_shutdown() + + +async def reload(): + await on_shutdown() + # Clear existing pipelines + PIPELINES.clear() + PIPELINE_MODULES.clear() + PIPELINE_NAMES.clear() + # Load pipelines afresh + await on_startup() + + +@asynccontextmanager +async def lifespan(app: FastAPI): + await on_startup() + yield + await on_shutdown() + + +app = FastAPI(docs_url="/docs", redoc_url=None, lifespan=lifespan) + +app.state.PIPELINES = PIPELINES + + +origins = ["*"] + + +app.add_middleware( + CORSMiddleware, + allow_origins=origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +@app.middleware("http") +async def check_url(request: Request, call_next): + start_time = int(time.time()) + app.state.PIPELINES = get_all_pipelines() + response = await call_next(request) + process_time = int(time.time()) - start_time + response.headers["X-Process-Time"] = str(process_time) + + return response + + +@app.get("/v1/models") +@app.get("/models") +async def get_models(user: str = Depends(get_current_user)): + """ + Returns the available pipelines + """ + app.state.PIPELINES = get_all_pipelines() + return { + "data": [ + { + "id": pipeline["id"], + "name": pipeline["name"], + "object": "model", + "created": int(time.time()), + "owned_by": "openai", + "pipeline": { + "type": pipeline["type"], + **( + { + "pipelines": ( + pipeline["valves"].pipelines + if pipeline.get("valves", None) + else [] + ), + "priority": pipeline.get("priority", 0), + } + if pipeline.get("type", "pipe") == "filter" + else {} + ), + "valves": pipeline["valves"] != None, + }, + } + for pipeline in app.state.PIPELINES.values() + ], + "object": "list", + "pipelines": True, + } + + +@app.get("/v1") +@app.get("/") +async def get_status(): + return {"status": True} + + +@app.get("/v1/pipelines") +@app.get("/pipelines") +async def list_pipelines(user: str = Depends(get_current_user)): + if user == API_KEY: + return { + "data": [ + { + "id": pipeline_id, + "name": PIPELINE_NAMES[pipeline_id], + "type": ( + PIPELINE_MODULES[pipeline_id].type + if hasattr(PIPELINE_MODULES[pipeline_id], "type") + else "pipe" + ), + "valves": ( + True + if hasattr(PIPELINE_MODULES[pipeline_id], "valves") + else False + ), + } + for pipeline_id in list(PIPELINE_MODULES.keys()) + ] + } + else: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid API key", + ) + + +class AddPipelineForm(BaseModel): + url: str + + +async def download_file(url: str, dest_folder: str): + filename = os.path.basename(urlparse(url).path) + if not filename.endswith(".py"): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="URL must point to a Python file", + ) + + file_path = os.path.join(dest_folder, filename) + + async with aiohttp.ClientSession() as session: + async with session.get(url) as response: + if response.status != 200: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Failed to download file", + ) + with open(file_path, "wb") as f: + f.write(await response.read()) + + return file_path + + +@app.post("/v1/pipelines/add") +@app.post("/pipelines/add") +async def add_pipeline( + form_data: AddPipelineForm, user: str = Depends(get_current_user) +): + if user != API_KEY: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid API key", + ) + + try: + url = convert_to_raw_url(form_data.url) + + print(url) + file_path = await download_file(url, dest_folder=PIPELINES_DIR) + await reload() + return { + "status": True, + "detail": f"Pipeline added successfully from {file_path}", + } + except HTTPException as e: + raise e + except Exception as e: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=str(e), + ) + + +@app.post("/v1/pipelines/upload") +@app.post("/pipelines/upload") +async def upload_pipeline( + file: UploadFile = File(...), user: str = Depends(get_current_user) +): + if user != API_KEY: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid API key", + ) + + file_ext = os.path.splitext(file.filename)[1] + if file_ext != ".py": + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Only Python files are allowed.", + ) + + try: + # Ensure the destination folder exists + os.makedirs(PIPELINES_DIR, exist_ok=True) + + # Define the file path + file_path = os.path.join(PIPELINES_DIR, file.filename) + + # Save the uploaded file to the specified directory + with open(file_path, "wb") as buffer: + shutil.copyfileobj(file.file, buffer) + + # Perform any necessary reload or processing + await reload() + + return { + "status": True, + "detail": f"Pipeline uploaded successfully to {file_path}", + } + except HTTPException as e: + raise e + except Exception as e: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=str(e), + ) + + +class DeletePipelineForm(BaseModel): + id: str + + +@app.delete("/v1/pipelines/delete") +@app.delete("/pipelines/delete") +async def delete_pipeline( + form_data: DeletePipelineForm, user: str = Depends(get_current_user) +): + if user != API_KEY: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid API key", + ) + + pipeline_id = form_data.id + pipeline_name = PIPELINE_NAMES.get(pipeline_id.split(".")[0], None) + + if PIPELINE_MODULES[pipeline_id]: + if hasattr(PIPELINE_MODULES[pipeline_id], "on_shutdown"): + await PIPELINE_MODULES[pipeline_id].on_shutdown() + + pipeline_path = os.path.join(PIPELINES_DIR, f"{pipeline_name}.py") + if os.path.exists(pipeline_path): + os.remove(pipeline_path) + await reload() + return { + "status": True, + "detail": f"Pipeline {pipeline_id} deleted successfully", + } + else: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Pipeline {pipeline_id} not found", + ) + + +@app.post("/v1/pipelines/reload") +@app.post("/pipelines/reload") +async def reload_pipelines(user: str = Depends(get_current_user)): + if user == API_KEY: + await reload() + return {"message": "Pipelines reloaded successfully."} + else: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid API key", + ) + + +@app.get("/v1/{pipeline_id}/valves") +@app.get("/{pipeline_id}/valves") +async def get_valves(pipeline_id: str): + if pipeline_id not in PIPELINE_MODULES: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Pipeline {pipeline_id} not found", + ) + + pipeline = PIPELINE_MODULES[pipeline_id] + + if hasattr(pipeline, "valves") is False: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Valves for {pipeline_id} not found", + ) + + return pipeline.valves + + +@app.get("/v1/{pipeline_id}/valves/spec") +@app.get("/{pipeline_id}/valves/spec") +async def get_valves_spec(pipeline_id: str): + if pipeline_id not in PIPELINE_MODULES: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Pipeline {pipeline_id} not found", + ) + + pipeline = PIPELINE_MODULES[pipeline_id] + + if hasattr(pipeline, "valves") is False: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Valves for {pipeline_id} not found", + ) + + return pipeline.valves.schema() + + +@app.post("/v1/{pipeline_id}/valves/update") +@app.post("/{pipeline_id}/valves/update") +async def update_valves(pipeline_id: str, form_data: dict): + + if pipeline_id not in PIPELINE_MODULES: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Pipeline {pipeline_id} not found", + ) + + pipeline = PIPELINE_MODULES[pipeline_id] + + if hasattr(pipeline, "valves") is False: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Valves for {pipeline_id} not found", + ) + + try: + ValvesModel = pipeline.valves.__class__ + valves = ValvesModel(**form_data) + pipeline.valves = valves + + # Determine the directory path for the valves.json file + subfolder_path = os.path.join(PIPELINES_DIR, PIPELINE_NAMES[pipeline_id]) + valves_json_path = os.path.join(subfolder_path, "valves.json") + + # Save the updated valves data back to the valves.json file + with open(valves_json_path, "w") as f: + json.dump(valves.model_dump(), f) + + if hasattr(pipeline, "on_valves_updated"): + await pipeline.on_valves_updated() + except Exception as e: + print(e) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"{str(e)}", + ) + + return pipeline.valves + + +@app.post("/v1/{pipeline_id}/filter/inlet") +@app.post("/{pipeline_id}/filter/inlet") +async def filter_inlet(pipeline_id: str, form_data: FilterForm): + if pipeline_id not in app.state.PIPELINES: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Filter {pipeline_id} not found", + ) + + try: + pipeline = app.state.PIPELINES[form_data.body["model"]] + if pipeline["type"] == "manifold": + pipeline_id = pipeline_id.split(".")[0] + except: + pass + + pipeline = PIPELINE_MODULES[pipeline_id] + + try: + if hasattr(pipeline, "inlet"): + body = await pipeline.inlet(form_data.body, form_data.user) + return body + else: + return form_data.body + except Exception as e: + print(e) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"{str(e)}", + ) + + +@app.post("/v1/{pipeline_id}/filter/outlet") +@app.post("/{pipeline_id}/filter/outlet") +async def filter_outlet(pipeline_id: str, form_data: FilterForm): + if pipeline_id not in app.state.PIPELINES: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Filter {pipeline_id} not found", + ) + + try: + pipeline = app.state.PIPELINES[form_data.body["model"]] + if pipeline["type"] == "manifold": + pipeline_id = pipeline_id.split(".")[0] + except: + pass + + pipeline = PIPELINE_MODULES[pipeline_id] + + try: + if hasattr(pipeline, "outlet"): + body = await pipeline.outlet(form_data.body, form_data.user) + return body + else: + return form_data.body + except Exception as e: + print(e) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"{str(e)}", + ) + + +@app.post("/v1/chat/completions") +@app.post("/chat/completions") +async def generate_openai_chat_completion(form_data: OpenAIChatCompletionForm): + messages = [message.model_dump() for message in form_data.messages] + user_message = get_last_user_message(messages) + + if ( + form_data.model not in app.state.PIPELINES + or app.state.PIPELINES[form_data.model]["type"] == "filter" + ): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Pipeline {form_data.model} not found", + ) + + def job(): + print(form_data.model) + + pipeline = app.state.PIPELINES[form_data.model] + pipeline_id = form_data.model + + print(pipeline_id) + + if pipeline["type"] == "manifold": + manifold_id, pipeline_id = pipeline_id.split(".", 1) + pipe = PIPELINE_MODULES[manifold_id].pipe + else: + pipe = PIPELINE_MODULES[pipeline_id].pipe + + if form_data.stream: + + def stream_content(): + res = pipe( + user_message=user_message, + model_id=pipeline_id, + messages=messages, + body=form_data.model_dump(), + ) + logging.info(f"stream:true:{res}") + + if isinstance(res, str): + message = stream_message_template(form_data.model, res) + logging.info(f"stream_content:str:{message}") + yield f"data: {json.dumps(message)}\n\n" + + if isinstance(res, Iterator): + for line in res: + if isinstance(line, BaseModel): + line = line.model_dump_json() + line = f"data: {line}" + + elif isinstance(line, dict): + line = json.dumps(line) + line = f"data: {line}" + + try: + line = line.decode("utf-8") + logging.info(f"stream_content:Generator:{line}") + except: + pass + + if isinstance(line, str) and line.startswith("data:"): + yield f"{line}\n\n" + else: + line = stream_message_template(form_data.model, line) + yield f"data: {json.dumps(line)}\n\n" + + if isinstance(res, str) or isinstance(res, Generator): + finish_message = { + "id": f"{form_data.model}-{str(uuid.uuid4())}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": form_data.model, + "choices": [ + { + "index": 0, + "delta": {}, + "logprobs": None, + "finish_reason": "stop", + } + ], + } + + yield f"data: {json.dumps(finish_message)}\n\n" + yield f"data: [DONE]" + + return StreamingResponse(stream_content(), media_type="text/event-stream") + else: + res = pipe( + user_message=user_message, + model_id=pipeline_id, + messages=messages, + body=form_data.model_dump(), + ) + logging.info(f"stream:false:{res}") + + if isinstance(res, dict): + return res + elif isinstance(res, BaseModel): + return res.model_dump() + else: + + message = "" + + if isinstance(res, str): + message = res + + if isinstance(res, Generator): + for stream in res: + message = f"{message}{stream}" + + logging.info(f"stream:false:{message}") + return { + "id": f"{form_data.model}-{str(uuid.uuid4())}", + "object": "chat.completion", + "created": int(time.time()), + "model": form_data.model, + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": message, + }, + "logprobs": None, + "finish_reason": "stop", + } + ], + } + + return await run_in_threadpool(job) diff --git a/openwebui/pipelines/pipelines/.gitignore b/openwebui/pipelines/pipelines/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/openwebui/pipelines/requirements-minimum.txt b/openwebui/pipelines/requirements-minimum.txt new file mode 100644 index 0000000..559be6a --- /dev/null +++ b/openwebui/pipelines/requirements-minimum.txt @@ -0,0 +1,15 @@ +fastapi==0.111.0 +uvicorn[standard]==0.22.0 +pydantic==2.7.1 +python-multipart==0.0.9 +python-socketio +grpcio + +passlib==1.7.4 +passlib[bcrypt] +PyJWT[crypto] + +requests==2.32.2 +aiohttp==3.9.5 +httpx + diff --git a/openwebui/pipelines/requirements.txt b/openwebui/pipelines/requirements.txt new file mode 100644 index 0000000..b3cc96e --- /dev/null +++ b/openwebui/pipelines/requirements.txt @@ -0,0 +1,67 @@ +fastapi==0.111.0 +uvicorn[standard]==0.22.0 +pydantic==2.7.1 +python-multipart==0.0.9 +python-socketio +grpcio + +passlib==1.7.4 +passlib[bcrypt] +PyJWT[crypto] + +requests==2.32.2 +aiohttp==3.9.5 +httpx + +# AI libraries +openai +anthropic +google-generativeai +vertexai + +# Database +pymongo +peewee +SQLAlchemy +boto3 +redis +sqlmodel +chromadb +psycopg2-binary + +# Observability +langfuse +ddtrace +opik + +# ML libraries +torch +numpy +pandas + +xgboost +scikit-learn + +# NLP libraries +sentence-transformers +transformers +tokenizers +nltk +tiktoken + +# Image processing +Pillow +opencv-python + +# Visualization +matplotlib +seaborn + +# Web scraping +selenium +playwright +beautifulsoup4 + +# Llama Index for RAG +llama-index +llama-index-llms-ollama \ No newline at end of file diff --git a/openwebui/pipelines/schemas.py b/openwebui/pipelines/schemas.py new file mode 100644 index 0000000..caa0342 --- /dev/null +++ b/openwebui/pipelines/schemas.py @@ -0,0 +1,22 @@ +from typing import List, Optional +from pydantic import BaseModel, ConfigDict + +class OpenAIChatMessage(BaseModel): + role: str + content: str | List + + model_config = ConfigDict(extra="allow") + + +class OpenAIChatCompletionForm(BaseModel): + stream: bool = True + model: str + messages: List[OpenAIChatMessage] + + model_config = ConfigDict(extra="allow") + + +class FilterForm(BaseModel): + body: dict + user: Optional[dict] = None + model_config = ConfigDict(extra="allow") \ No newline at end of file diff --git a/openwebui/pipelines/start.bat b/openwebui/pipelines/start.bat new file mode 100644 index 0000000..248325e --- /dev/null +++ b/openwebui/pipelines/start.bat @@ -0,0 +1,5 @@ +@echo off +set PORT=9099 +set HOST=0.0.0.0 + +uvicorn main:app --host %HOST% --port %PORT% --forwarded-allow-ips '*' \ No newline at end of file diff --git a/openwebui/pipelines/start.sh b/openwebui/pipelines/start.sh new file mode 100644 index 0000000..e175741 --- /dev/null +++ b/openwebui/pipelines/start.sh @@ -0,0 +1,157 @@ +#!/usr/bin/env bash +PORT="${PORT:-9099}" +HOST="${HOST:-0.0.0.0}" +# Default value for PIPELINES_DIR +PIPELINES_DIR=${PIPELINES_DIR:-./pipelines} + +UVICORN_LOOP="${UVICORN_LOOP:-auto}" + +# Function to reset pipelines +reset_pipelines_dir() { + if [ "$RESET_PIPELINES_DIR" = true ]; then + echo "Resetting pipelines directory: $PIPELINES_DIR" + + # Check if the directory exists + if [ -d "$PIPELINES_DIR" ]; then + # Remove all contents of the directory + rm -rf "${PIPELINES_DIR:?}"/* + echo "All contents in $PIPELINES_DIR have been removed." + + # Optionally recreate the directory if needed + mkdir -p "$PIPELINES_DIR" + echo "$PIPELINES_DIR has been recreated." + else + echo "Directory $PIPELINES_DIR does not exist. No action taken." + fi + else + echo "RESET_PIPELINES_DIR is not set to true. No action taken." + fi +} + +# Function to install requirements if requirements.txt is provided +install_requirements() { + if [[ -f "$1" ]]; then + echo "requirements.txt found at $1. Installing requirements..." + pip install -r "$1" + else + echo "requirements.txt not found at $1. Skipping installation of requirements." + fi +} + +# Check if the PIPELINES_REQUIREMENTS_PATH environment variable is set and non-empty +if [[ -n "$PIPELINES_REQUIREMENTS_PATH" ]]; then + # Install requirements from the specified requirements.txt + install_requirements "$PIPELINES_REQUIREMENTS_PATH" +else + echo "PIPELINES_REQUIREMENTS_PATH not specified. Skipping installation of requirements." +fi + + +# Function to download the pipeline files +download_pipelines() { + local path=$1 + local destination=$2 + + # Remove any surrounding quotes from the path + path=$(echo "$path" | sed 's/^"//;s/"$//') + + echo "Downloading pipeline files from $path to $destination..." + + if [[ "$path" =~ ^https://github.com/.*/.*/blob/.* ]]; then + # It's a single file + dest_file=$(basename "$path") + curl -L "$path?raw=true" -o "$destination/$dest_file" + elif [[ "$path" =~ ^https://github.com/.*/.*/tree/.* ]]; then + # It's a folder + git_repo=$(echo "$path" | awk -F '/tree/' '{print $1}') + subdir=$(echo "$path" | awk -F '/tree/' '{print $2}') + git clone --depth 1 --filter=blob:none --sparse "$git_repo" "$destination" + ( + cd "$destination" || exit + git sparse-checkout set "$subdir" + ) + elif [[ "$path" =~ \.py$ ]]; then + # It's a single .py file (but not from GitHub) + dest_file=$(basename "$path") + curl -L "$path" -o "$destination/$dest_file" + else + echo "Invalid URL format: $path" + exit 1 + fi +} + +# Function to parse and install requirements from frontmatter +install_frontmatter_requirements() { + local file=$1 + local file_content=$(cat "$1") + # Extract the first triple-quoted block + local first_block=$(echo "$file_content" | awk '/"""/{flag=!flag; if(flag) count++; if(count == 2) {exit}} flag' ) + + # Check if the block contains requirements + local requirements=$(echo "$first_block" | grep -i 'requirements:') + + if [ -n "$requirements" ]; then + # Extract the requirements list + requirements=$(echo "$requirements" | awk -F': ' '{print $2}' | tr ',' ' ' | tr -d '\r') + + # Construct and echo the pip install command + local pip_command="pip install $requirements" + echo "$pip_command" + pip install $requirements + else + echo "No requirements found in frontmatter of $file." + fi +} + + +# Parse command line arguments for mode +MODE="full" # select a runmode ("setup", "run", "full" (setup + run)) +while [[ "$#" -gt 0 ]]; do + case $1 in + --mode) MODE="$2"; shift ;; + *) echo "Unknown parameter passed: $1"; exit 1 ;; + esac + shift +done +if [[ "$MODE" != "setup" && "$MODE" != "run" && "$MODE" != "full" ]]; then + echo "Invalid script mode: $MODE" + echo " Example usage: './start.sh --mode [setup|run|full]' " + exit 1 +fi + +# Function to handle different modes, added 1/29/24 +if [[ "$MODE" == "setup" || "$MODE" == "full" ]]; then + echo "Download + install Executed in mode: $MODE" + + reset_pipelines_dir + if [[ -n "$PIPELINES_REQUIREMENTS_PATH" ]]; then + install_requirements "$PIPELINES_REQUIREMENTS_PATH" + else + echo "PIPELINES_REQUIREMENTS_PATH not specified. Skipping installation of requirements." + fi + + if [[ -n "$PIPELINES_URLS" ]]; then + if [ ! -d "$PIPELINES_DIR" ]; then + mkdir -p "$PIPELINES_DIR" + fi + + IFS=';' read -ra ADDR <<< "$PIPELINES_URLS" + for path in "${ADDR[@]}"; do + download_pipelines "$path" "$PIPELINES_DIR" + done + + for file in "$PIPELINES_DIR"/*; do + if [[ -f "$file" ]]; then + install_frontmatter_requirements "$file" + fi + done + else + echo "PIPELINES_URLS not specified. Skipping pipelines download and installation." + fi +fi + +if [[ "$MODE" == "run" || "$MODE" == "full" ]]; then + echo "Running via Mode: $MODE" + uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' --loop "$UVICORN_LOOP" +fi + diff --git a/openwebui/pipelines/utils/pipelines/auth.py b/openwebui/pipelines/utils/pipelines/auth.py new file mode 100644 index 0000000..2b09820 --- /dev/null +++ b/openwebui/pipelines/utils/pipelines/auth.py @@ -0,0 +1,77 @@ +from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials +from fastapi import HTTPException, status, Depends + + +from pydantic import BaseModel +from typing import Union, Optional + + +from passlib.context import CryptContext +from datetime import datetime, timedelta +import jwt +import logging +import os + +import requests +import uuid + + +from config import API_KEY, PIPELINES_DIR + + +SESSION_SECRET = os.getenv("SESSION_SECRET", " ") +ALGORITHM = "HS256" + +############## +# Auth Utils +############## + +bearer_security = HTTPBearer() +pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") + + +def verify_password(plain_password, hashed_password): + return ( + pwd_context.verify(plain_password, hashed_password) if hashed_password else None + ) + + +def get_password_hash(password): + return pwd_context.hash(password) + + +def create_token(data: dict, expires_delta: Union[timedelta, None] = None) -> str: + payload = data.copy() + + if expires_delta: + expire = datetime.utcnow() + expires_delta + payload.update({"exp": expire}) + + encoded_jwt = jwt.encode(payload, SESSION_SECRET, algorithm=ALGORITHM) + return encoded_jwt + + +def decode_token(token: str) -> Optional[dict]: + try: + decoded = jwt.decode(token, SESSION_SECRET, algorithms=[ALGORITHM]) + return decoded + except Exception as e: + return None + + +def extract_token_from_auth_header(auth_header: str): + return auth_header[len("Bearer ") :] + + +def get_current_user( + credentials: HTTPAuthorizationCredentials = Depends(bearer_security), +) -> Optional[dict]: + token = credentials.credentials + + if token != API_KEY: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid API key", + ) + + return token diff --git a/openwebui/pipelines/utils/pipelines/main.py b/openwebui/pipelines/utils/pipelines/main.py new file mode 100644 index 0000000..5d33522 --- /dev/null +++ b/openwebui/pipelines/utils/pipelines/main.py @@ -0,0 +1,153 @@ +import uuid +import time + +from typing import List +from schemas import OpenAIChatMessage + +import inspect +from typing import get_type_hints, Literal, Tuple + + +def stream_message_template(model: str, message: str): + return { + "id": f"{model}-{str(uuid.uuid4())}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [ + { + "index": 0, + "delta": {"content": message}, + "logprobs": None, + "finish_reason": None, + } + ], + } + + +def get_last_user_message(messages: List[dict]) -> str: + for message in reversed(messages): + if message["role"] == "user": + if isinstance(message["content"], list): + for item in message["content"]: + if item["type"] == "text": + return item["text"] + return message["content"] + return None + + +def get_last_assistant_message(messages: List[dict]) -> str: + for message in reversed(messages): + if message["role"] == "assistant": + if isinstance(message["content"], list): + for item in message["content"]: + if item["type"] == "text": + return item["text"] + return message["content"] + return None + + +def get_system_message(messages: List[dict]) -> dict: + for message in messages: + if message["role"] == "system": + return message + return None + + +def remove_system_message(messages: List[dict]) -> List[dict]: + return [message for message in messages if message["role"] != "system"] + + +def pop_system_message(messages: List[dict]) -> Tuple[dict, List[dict]]: + return get_system_message(messages), remove_system_message(messages) + + +def add_or_update_system_message(content: str, messages: List[dict]) -> List[dict]: + """ + Adds a new system message at the beginning of the messages list + or updates the existing system message at the beginning. + + :param msg: The message to be added or appended. + :param messages: The list of message dictionaries. + :return: The updated list of message dictionaries. + """ + + if messages and messages[0].get("role") == "system": + messages[0]["content"] += f"{content}\n{messages[0]['content']}" + else: + # Insert at the beginning + messages.insert(0, {"role": "system", "content": content}) + + return messages + + +def doc_to_dict(docstring): + lines = docstring.split("\n") + description = lines[1].strip() + param_dict = {} + + for line in lines: + if ":param" in line: + line = line.replace(":param", "").strip() + param, desc = line.split(":", 1) + param_dict[param.strip()] = desc.strip() + ret_dict = {"description": description, "params": param_dict} + return ret_dict + + +def get_tools_specs(tools) -> List[dict]: + function_list = [ + {"name": func, "function": getattr(tools, func)} + for func in dir(tools) + if callable(getattr(tools, func)) and not func.startswith("__") + ] + + specs = [] + + for function_item in function_list: + function_name = function_item["name"] + function = function_item["function"] + + function_doc = doc_to_dict(function.__doc__ or function_name) + specs.append( + { + "name": function_name, + # TODO: multi-line desc? + "description": function_doc.get("description", function_name), + "parameters": { + "type": "object", + "properties": { + param_name: { + "type": param_annotation.__name__.lower(), + **( + { + "enum": ( + param_annotation.__args__ + if hasattr(param_annotation, "__args__") + else None + ) + } + if hasattr(param_annotation, "__args__") + else {} + ), + "description": function_doc.get("params", {}).get( + param_name, param_name + ), + } + for param_name, param_annotation in get_type_hints( + function + ).items() + if param_name != "return" + }, + "required": [ + name + for name, param in inspect.signature( + function + ).parameters.items() + if param.default is param.empty + ], + }, + } + ) + + return specs diff --git a/openwebui/pipelines/utils/pipelines/misc.py b/openwebui/pipelines/utils/pipelines/misc.py new file mode 100644 index 0000000..e0c1e1c --- /dev/null +++ b/openwebui/pipelines/utils/pipelines/misc.py @@ -0,0 +1,35 @@ +import re + + +def convert_to_raw_url(github_url): + """ + Converts a GitHub URL to a raw URL. + + Example: + https://github.com/user/repo/blob/branch/path/to/file.ext + becomes + https://raw.githubusercontent.com/user/repo/branch/path/to/file.ext + + Parameters: + github_url (str): The GitHub URL to convert. + + Returns: + str: The converted raw URL. + """ + # Define the regular expression pattern + pattern = r"https://github\.com/(.+?)/(.+?)/blob/(.+?)/(.+)" + + # Use the pattern to match and extract parts of the URL + match = re.match(pattern, github_url) + + if match: + user_repo = match.group(1) + "/" + match.group(2) + branch = match.group(3) + file_path = match.group(4) + + # Construct the raw URL + raw_url = f"https://raw.githubusercontent.com/{user_repo}/{branch}/{file_path}" + return raw_url + + # If the URL does not match the expected pattern, return the original URL or raise an error + return github_url diff --git a/src/config/settings.py b/src/config/settings.py index 10f43e9..80f05ab 100644 --- a/src/config/settings.py +++ b/src/config/settings.py @@ -61,6 +61,7 @@ def llm_config(self) -> Dict[str, Any]: "base_url": self.LITELLM_BASE_URL, "api_key": self.LITELLM_API_KEY, "model": self.LITELLM_MODEL, + "model_kwargs": {"stream_options": {"include_usage": True}}, }

xh4 zQhNSK+X9_c(=yrt3hM;Hv}MF1-WzPOoK@12ugEIo`i3xG)Y1K~F!2Wv#B@i=MGI#5 zY_4zIJFYWPI5&#Jhuj$3J78N$0=-Q;R2RE!uh8NWno4+$!v-7IXT6g)D2BDrqHHM&@sZ7Yv-g|PYtkksYR}O3$Rab}F zy63w#OXX`=s}>ji*S>S6flVe#q@* zFKBM=l&ORlw1{GV1>kw04rk6-t7vpBmLQspac=q$p;{$*wT zwy3qTPl(a5&)F@mU0(3iGaFsxp>!q#a!_}bX(58Ej~p^Hs!l5_M|u#gsT`~xdzLHQ zN{w~CBxA@pZ4d!DxGEAW%bG2-Zhz(>bjm}Vrus}LfQV~LaZwu`>E-jgs3R3CYwNtl zMeRCg#Lbx6D@#D5y4%zG-f`|(h2OiIX;g2{E$i%03VxDiQWB<+n!M%%9+1Bkfvt?* zJ3J&Wc+IryY^-Gj%cQ>uk^z9AGLG1zfN@QpJ&@#s@A8kvEIUU1Hq*|}0;tN5u6sv} zugcqVudJfK#RR@sR4RE>iZMhkSV$Lt?B)4e53khdJ(Hu6v${G~rvOd5*$qqbRe_6D1=lI;R>=*zf<8B6I``<=N#zv_ z5YX7X=XG9QDgfEU`wJl7F*LdYdfMY9_l%ko-jtLu$HM0Tm!WjFx!IPbcvxLzSzTPS zI?!}%XNri%>hkZk3IaXL@K7fi*`aoVuh;=W&5l-_R(95XX+2r?x~}BgvFoH-XgbVz zE}~qlY#n#GxUxH+P$~szFQ2~R7=3Jek+rvk%B=w(W4C%=w2rXqco0uM9`00)P5BP< zgWT@g3Em+J;AfuX zEoxJo9eGs32hzBm*Fo!?EMYs?n=0z7lU5<=gZC(3f_|`mG!Pr#%p=qzpz;DyD@PI0 zS1<`8a3Vrj)712dDbe5l{IN0@M7drq$DQ|rKQRVP#KR!I^NrpEJkdzv_XmJ`y zQtbVve-(WAxP#I<7kjXAaTz))PN*igbIX0ix9Bigh=XNlTk?GvWf1-*+jNNjr^q7< zDhyGSb%vHP{9F1!N<0$`%-9Km$6mgO){H?C@;;2)dyg&yX;Yv)SR6si*Anr7=gK#P ztn5#c(E!)Q%qFuWY5!MY%Zxc?_&SGHP;ur+(VzJsO1PHw=_-xs`Ms8`)F>%P;Ra19VM5_~%7vmJ!OwBCL3u27sj;yq^j45kjc|jx=4t zNXfU($h0(h#L+P9p)xsn%#`37A9tYtRiLVNNI9OgUjNukq3MjK%*1PPT$wBL@rNHh zE-^SJQMYD6rdnyE)`K;3=d`8#7OU7gt=UT+e_bJSN?+L=vB`o=5DSIMf~pa5{(vvQ z@$6z?4cePP4K3mqHq(MsUuS5GU>e85?|xZ25@h3rbactf*JB_O=Uzn(-e#9>_!lxi zJo8}*I<2iWBV(+xzm1z6`I~4y?g51?MBgLlr#Nf$_9Zl2nTwkG9j1l&br}t{+d?wl z)f~>qP>d9!T&<>Xotk}!3;sb`6y>as7Gijv+2wrp3%W+!Ny>KmuuNRnWRpWWZfPRi z{bBW-YHJF}P=?QC+S{S^l)#f50rbWGqxh$bhoXQfvse7y6_l_q4;_-3;WxiIN|H5h) zgUY8|-jp-t>0R%5+}2kO!TDJ>#&>VtLHCIIB83Oax}IVw>oo2%>5Yy>{5yYKaW~4r zmn~udNOoq9KxF#rm}R}l0vqr30z%wzWWja6OLc^54&ucsP-VrJz7nlIT4eS8&8;ox zB7NZIk%sCOHL}O?kX0@B>v#Kz(>T$XKn;6bAb5tNU6?zdN+BGmEEe~s8)VBq%}AD- zXasHo4rnAv#(rY$mqA=HM}K|YO_dE{8W5v=m|)~5AKE?oxdslWLX z2Amb-C>ZN??Ogn3mBDH94F(??dpqlundV$hYLu9}rBn=NHU3TY#yxL z**_X_hGq0s2iE+Y++^>RJ*vL%Qju}Q6Am>+-eJvir=_KcgOCkXd_Cnb0}G4}2cF*y zHH>2BN0{#-<%E6fRi1p_op`d^PQI^juE|9Y9=uSJy}5b5IK)`rVoINXx)Cju%aGbv ztre+qwe4}gO%oCL57Rr%y|?Gyc;bjOMK(UA86drf&w%kRi~m|JR!hesOn6U~%{s$| zUdYycDpzaCcC6(EagxY_J%%R4f%sJK9CV6~hp7upoR{UM4{bIU#LVZv{lkpB6Mc5b zISE7rG#49dR2d39syH`dQT9D0m|vR6p&EC|tL&7oJ8@DA%NzB0{!HeMG_AQW@@9Vf z1Qa1czkVCMR`xjkq@aZp>r?2>{40|i%Up+GtcHp0NA&UR`+V@su`-R#heE-og!<;Y z|KNjS`um#vOu1V!@9Z(7KQ!a(O*6zZ%6=_XVWo|(8K$~V?qeL<zzeRQJ| zU#X}Bj?L_fp86x(ciLAY{6p%z2?OI%D0XuCBl$`}j6Iig!!|V|d{t9S$vbnTG6WK# zEnmir7ea;n(NB?gs%)&HTGTWESr{j({H0A~0;385bYhvkRa-B;Y|8N5W^p9gPpqkG zD689w^Q%{I`-KQ>Q&z(EQpL{#J9C**#qTMoB|}qx|0u#(SfbT-a~AG9I5Gh<6nOZd>BuN!FLiq}C3gTf#2S}%SYpdQTfDgq>T2-^G#hTS zk}kh+nFitsby8H8SRa1#T{+|=k>UD9mz|B_X2(P+doxbaN1a-+ie$yEfZ_p9zk2xW z{S>H_{(GglsgGt{a5k}xP7?N;m!yy>{vWruB)Q0O33;&w8L^|OB{BPLB^rp0sIt(# zhu*e-il;2Me>lcSDA*Sb`6o>-{>f+5tADIg!arXEP+Id2fU4|>ssHvL8e1Jj-EP`G zg&hAaHvtHiy50Qut0`VfnDZHb7ngyx=WJN(z}eN+uI^rO=iV^18|VqNejID}HNJu# zM;b`hqZr!3lrt(g z9I_=EOWU|Z7qdHp$)9U)Ec01mF_c92#{PeWAP2cje4Y}MUUp4hNl@+TVjP2gCujdu zLt2~6^{>hNA*V!=!Ibu~5C8N#mV;&X@}xD=YJI-fPk+q%v{HQ^E6)c%rL&CHO0=HL ze-rx{cIGU@U@@6NDK$A!A0(satszB?+FtSZter~6yV1UQ0C3lpjiFAv(kRB+$VP&+ zhLrE$?QR9F&b3DidGhP<6)ly6fbtlXZAbg+(v&ew9elsPRF3ZKd*<`9VlQV~B7azm9KbMR*-&R}hJ32CaAY+w`w=l7gWtA{H$H>C zg1NN6bdzzQTh`2N-Xi7FOtPR#09c*r;a@;cZLy5VvbtbNbMhNmI#W$qt&}qTVN>4O z+y=zy(#>;dyG z{k~Q6ekUZ}5*SbT1-qypixlQmR`H5GZhLA+;Mn!s+-96XDGn%PU^G1b_TehMI9?;U z0<+4^$8c)DacEp){_0v(C~YwqsJrA`JO`)6naf}DbxW%&J%b~x$9l{_D}_gj(Amwz zpF13$2gEZAg1AC({V5Q6RnZmYI(KG1DQrl<5q1W~Dqi3J@~dJ-T?Bkr9mkCX#wzm7 z*-Ht=va^KrHQbJ<1qLK;U6<^=?h>gW*u@v!*XN7DB+lFRZ#^D$B>pR2#1zH!1&pyz_p+8t-yX|$hih9g#ipOlFEdR(3M2~bp&B;eX1^q*`ZgfA>6g|@@$33E%}%b7;Sjk`4&Fh5%dYHj zgXB$%zZimi+9?rt=s+*arR%gVpMt&rs^KB=&<4)bg=?fXvvNA!;16aDI0(Q^qX*`cU1EjI zeBQO;8P9ox-Xnd4ha9aId{C`eawB|e#Q!+aLV%elH#4xph9mW#tPx4*Nax-*U;XD2 zSEC9&?|(Zz0aT{-^&d*I19N7hZfxN9hQ*zcuY@6%mZIJc18ERG3AjM zSEcB75e>~#9ys=HrBvbknbKabYy#0@-$XtiD18} zzI<)gP9#3O2n4VIIWJEFJu@=FntY!lug+C_@#~rPO9m8(6^`|VE>6%oq0BK=B*~GF zbjIyI2F7_Wpj3w}hW)L%T#z zuGk+xRvvxm0MW(mOrvt-74+E;+ksr&jxAUA#1u?@`NUf9L51$`R(#1`{@zkF#LjP> z$W&UjvE^xpwgTcR{Y@vo0|)J^K`Fux@w*l^bdFaw8utv4ubUPPr0*@DId?hwA;yy$gd%uDW6AIoo z#@Yw1Ggtwqrdm&kH+>7+jnutLhHIp%EYw=J?72cl-!ySUqZ>Pce_i2SvQ=%oi7C3C zPL#PEr}pjI%ppCV)6RrV<@$riIrNX$b!UQ;e7ag=icv)iF|u1j}!0I1#`|7Zp&{N5M|G6r*XVBM)Q6Sn;7dI)v|nzldi&J3|L6*0e_%e@%AHW7p$A z2++E159aYvez<-X>s4f28=ZbIH!PgyP8pC11SY-ju|9@nUYNYqU}~j5Mzi>pF@qM7 z9Y2E68+!!@HATw&nR?>w*_+A(=hK?EhC9LE`!wghpsBF)wR`LW(d`CEFvBra|93XU zr27y>WU&@s4!6TddsV-ky5myA|FgJ=0ddUt>FuYHyQwz{jD5$K=(K`*gCq1?x(}-y z(2|{afPQ(p$nDsnPS0uGGZTN!&fX1rDD~fZkX;J<$ilh zz`f_--DSA?(MeShhN8LY`~0*MBCKD13GhvWZ`EUNIYb}Cz=qW8X~l@Iz8m}vAxgQw zYkwdRTq5qxJFni7zp<|B?9Oe|e08{)=4y#ikkCWvtOB)`O6L^RAM7or~qBeIBq*$3%=@ zRt-z{3OGFto*%kaNb%G@Qt_os0Q6jNjh8-rnubI`m;aCVXeCzrum8-Y(KjZ7!`%#q zpwewwn=<}?Ezrm5W&%4Vhri{LDoq@%22jPFCDEZJ>YB$HGMDm&b+T9M3sln|c z3_^;SJo=ndn5phV#rCkq_E@c~LRSV1SeHdNA_>)EXrXSZcS{f{B+Sjar?RAbPG`jp z5JX6<=bG%Ji0C-obc4X`Pm=Hqn~2!2>?npAxzVQY&q1NNoSvwpiYjtP`ZIVz^dAq^ z+_IVDRB{uBbQ0Zn^09J5&f-UfctX;Dpuv8*nTxO!(D8Y^3qpyncUa81%k2E2)X#PK z%8A+;81A-iJ&eQMSW(MNE^JorT5(4Iz$eMNm&I&Uv5`&VpZwyL>RN!ky|cUXYxb-5 zrgh$j7m(k&6pW%E+PHwqIVN9ze?>H9)oh0sg%x>S8#ri-LEVE=UX#`=hONa5EqZF( z3fNI4sby7tUyeDha*G6qP?zwU?Rxz-mT?BAy^OM{~y#ms?h58LOS8acn{5l~8TD zW9R0hET^iQYf0Tf^zX~;7=ify(r;~I3U#&b?xfHQy$SdAMFf?5{{6zcif@JIaDU6k}7K!&*FuT zdGras-)rUlXL1h4m9?VXcjp5#_X~RUf*P9osQaavuyD22#iMR#rDcCW$#M`rCB6*u z42*GG+DIYlu-{a(bBkr84hE(exu_-B|2UiC#_H0|g;DS3sm8>_z6tRTmhRvds)lpj zK*r$&*bjzv_gbl61hFzGS=YOd-^hf-Q%fy*ZHTaHW#9_=afO%CAa40gF2x$Wq}>PGmBgDT(s+dA4>9n{T}%ZuJxuVbTgx z*yOYb)#7k=t&vv`bwQ%w90|?_D}@cu%e7_6zHBpC{yzMWbV^M^r;#6}tY|I00}fZE z37@Q^)(6)-V02U0p*Uqkm%N!8y^;409G5v};i~(I->Q6#FV_0C;OIZ>4$@9R<F zF`V;67Ophg+tpgq3T&31G-2UiBL=iC$x)-M?VrtF(sfzk_a{foXQ`v_7V%amR~6-X zP|A@HcTs+I=EHzEF~%;8%nNG**%g03;j*QCw7R#Yrjkvh9zfM*5#+mjDCYyroO*mY zS&6prndzm<bU$vK_m*tY^&B8ztWDf7uedmM6qDROGKWT562iEK0(}+aDWLh{0uT_b1V{ zwKGLshUuGuLgTfBC6mIUYsXhQ1#4JYdR-+4Jg?;jY zVKNAs?rl)?;~w#>KuI)WC_Q`9ghoL5E>XU$oRKtFZhoSDK?47a|JD`RZHaNsZw(yx3g_=1haxw>-(Mr=Qm%CMa?dJtXESq?_-3M~T7N?9LD}n|kBLMPX7;8MPG#h^#lYW*kb}N(=#6??{R1P$nTQqt5U*GR% zD4F}e?(KEjjHKIc#gJhTs|aU)9kuWGr0t0s#T@K?NJ<)XUQX-MOiB6F{Tfaj@^?pX zPZ8oM2F$`HDM#?(0h1rFO|s)=hYks`TqT zrCNq*>4iEH<0V?d*J-yOqb^SbkRIc|ww>=xNZH6uv3HOC#rl7UVx_vL-FXuyk&^h} zH7;WU-U?>#(zOrAVR>!~;wrusRsEzx!M9&xT)0vrAEUqcmks;`@Kwsb=+QJwO9rTK z`_u2}tz1FJw+4d)%a$*}<}od#0h^Izu4Br*G-Ewb%N%!1{;7X1e>dRr&#I^}{IZH{ z6A{`2WlqN9-k9g82kDsF^Kd?+wN6~u?*Gy8?)hA8rSuX-ysM1Zgm?Prc*nbY3G9tV zD=wvd2iq;64Mb8^T3^#Q*m1VByT4Is3o%?MVH}!3q4taSJ!Lvg&|sfb^t&D76|9$SoSCTOMD^fQGdP8#ETTl6&ww))lek6xm3TTO5-Rx*0I z+S@lkzEaq+-&IK*xnp=u&8b5zrl(G7UYqk1TebteFx8r)B$n!4Ol3AbP-<6`&l2aM zTx{z)--jzm&Q*|#N<~3dY_F4#>9-(H!BH;e;%31kJEBFW6bojw5{$zM>j%5uD<5Pa zs*Hgs`-)?H))UBrp8p&170lD@QX}||YlCt%%Fd6%m^wT!_f^o^g9%b|Tmnwr=Ymnj z0-Y(Xwuxo+oadRAS?i80qv5}Y@5_YIL3zp+#$K5pvG@Z|3ZUL2dV2)?Lsj z#A5n6y`|9+^jHssXe2N;jr$OY;X%4{c3cD(R9ad?GNIE-iG)%&8_G4IhyPM9xAcT% zhTw!t6RtQ49Z(|%kGpLrgd@!nZbk7;Ays~q@5Ke)ZA5qZNd9iMD#(m#0zHWWxJ(M!|jgj1y%Cu zF0%ZMKx{GhOu)Sr&DL1Y@LBN8TgH7gh2LH}G>5>_yw)!~>~a9~@RAEy6XM&SiMat1 zGt2vElEw2cS6)cS+_tL)JlCk?+tC=Zc)JS6K(_)FC9B!}c}kkm!d26T&aivh{{&=k zk77>xydHT)6PS#gkR>FJG@4xM7R6gB?la1nT=q=HCXhw?Bm#EE9hyj_I`1YvHKN?U zn&hWK)Ylg)E{k{)>|JK!{_cfnh&*1&{tIa97~oTiSPd#o!E+#QY!Mw;Np4(d_h5XY zaVxI)?w5gPI>9KGiYAjEe&2!*0-5h}X~owixr;h55wD1;U`K$5AV2PydRf$)qc>NC z1@B#Cfh<}ursMW&|JRY3q~3ag>94+{$@ma)Ub+#|egJSv(BGx${tMOs$!E~L|I0{i zht>37|CvepH?z!Ftn`1~=>Lm2wfaz!qIOVhimPz3;7j+|AWyrM7O7Un(99PO>*ua5 z%_U(e!hnx?E)x8?R)i=vG^5IH%1^~j1@?DAJyx!FOL_pdCzdy5+uEw1cR$y#%D4+o zF{l*qrBtb5D)c#0vO8Po($rse!aoYq#@}xQ)uf{-xjr#)b7bd$+(Cy)tPMWVz469Y z8Qb@oezCW&>#UJEz*RmYd~A`^!1FDwLRL(d^T~^6h+%^`3?zaxOR%rLx)?L_xNY z>mt|m|J>r3lxy|*Wi%r4voMCE;_v@PXeWYK>C0v=%>~>bXH*z3=Zh9Iy5-uPna%>+ zSn(eBJn!ezPveNzNWf7(KbI4N!e9ANwe{knyY{O)oU+fBEes}H?++h~wg0ITXP&g2 zQ%5?;dQ2(Hl}nafTxH_1$h6X1FleDYV%uMdgw#8QAq3Wk+QUT$WJGcc}fv< zHEg-sKi)MVu9jO8fW*0K85kva?zex1gMZ1U@deiXc5kCzEUtZSnnJ~9I!Z`23bo5) zSTZc#Yj`nEw=n0|*`#5xHKSo*x*VP2UFA!c;VMm<-OiU8aw{4ytJ~G7o;(`}JDB$@ z#t=hnE@~}x{=47sFl_0(03NyRp6D##sd;M%%IavqHmcT=f-A)PpH}A-d1ZH_RYsMhuV17>^x-1yrsFhY>LZC zr}{Dx#=X3h(VIAdF>wC{W=v2rR;NL*xy$BpE(0lFkk@+A5eeRcFqF8h-w@V?6m)=!h%0mtG&R=nd&arW}W4wLZDnXhP;*crmkoVmTMV~!e)gU%5 zOfbr{Q5!4EjWeydG!9^cw6FyXJ6pN)TZk|2%dDD?=4yZr>`j;MtJ0a{6+XhwuOM7^ z`|EChZq0M$QoB<%sH=6C(cQs5E(k{lBHkn1ftW}7eLC102?vc1wNYL6VCNoTr*(Vh z#HM`#LrXuZ4KphCZCJP0BVumkZqb{$*H(kHOHZ$Gj!cB>>O8^>x2}ZQ3tf0Cm}s(h zdU%?=A0^CMRC;05(3lp+6Tc;o;a2L%%iFpE*ftJB#;wGafIxVCtfzAAn%eB_Q~KqB z`AKdK*>J)!o8x4&x25MG5~nxhE4gL!$dr=bIx|KT$AJ4cRh zuZ}0kc^V!RXU_K9Q4>12=Az$uAU?+Syf@WKO7-Q7z#&$@o6l^ED&v0=?#-n-nSWuW zKrjtfX=uMkD+50yr$=7q7Br{8GYh7U6XE;EJP!k0OiNW#`9OnA6`L@2lP#`IX4Vuk z0}R7ZGko(rk8gbI zTi;*rdhZ{1-F5FhwfArDv-dvt?lU=w$&juv?v9i$+n5NFYH9VCY6H)Y=B*0N5eJG+ zWLBdTc=B1=KBwRr!99#BoA+nl}BNu52}%S&Ls()Gn+3JRx*RTs(c z(J(V&p0BSfq{=DEM6!CgG9urZUqMCUb1ol`QjgL8X;ueKjyW6J7^jP7NlHE5#jsiQ zT}9506yU1NBo=il7S;eQ<~aduv+LKUHc5NKH@rO>O~!}l5pb#V@fHa$Zd{@&#-jNp zStna<19`*5w=f?&E2aM0Y8Jve1Qmtdl9_RSLaXb1%jB=>NueMRJXtLJx6|ualzU+N zVVReG0X&IqMD_(sd+?^FXomv)K_=6eYdTYP()KPDJcQ$Dc^sL-pcM&R1_iAc@Si=r zZ`>vAUMAZu$(pfy`4jcel>c~bkCE1vui0q&5L?t87=#n&nCwAt!!fPJrLh|Ih@7a! zumGI-c$j=*_kipS3$!fOye0#oCR?Uf@PP9!ChQ3Pv>K%;x`ZC<@K6alp*FCW$y>6_ z2)#s{sttSWrjtlafy2I_l}R}jYxqz%897eqnDbFJ(>&COADbC8&IN?*9f=n<^CRXs zGkc0gI9seUs*yGi+ypY(JoIRzHKZQe+nnekm;p;CA$g|MYp!$*Nt=#AqG|gyW7NKY zP1qEMN5Ih~Yo|`vuvwBwW$-yiZFEeujqXvkSd#e7DUYLneJsYsb5#m$Rc$l;(Xge! zbqi=aLn8qVcJ`kdT2Fh;z^rwN?TtP_%U>k5Mg?M+#Jju<&~ef*zFthsy3Z&IR(Q1% zV++hpC?{Ph@v)5M(=l`LG|k5hnu%VOfUXO(aShbEpRa*or~9JHH||GtJWc0eQ_`EA zvOH~v`^z>r_OBDvMrKItu`rS*tmo~#;lbf0yB@0f;fp2jk>fBUU)Multd$l+R7LH` z>;mCfo9v_>ks2AC8xMZc@{G&J8aXjS46?wsC=Zg}gzL>U9W9=7Vb2mBwm~I~I3(bLp^|sgXzeTM9f~(T;}#!1%k$SP1i}RE8nLp?5 zfd2TnvgcB4>;c_oY z zI#Fl}@KMoEkJ>%o6YvCok7`b^wp0x`x@qR? zcS|MW0LI*b71vF9`b5?u_kuQ`>0+G37yI~Vnw$JlaBE8kP=~(FSx;TKoS35wg>kRzqhOW$$5$XPFO^DD7RI8r0f42IYeWl0-`#Q7D zje4tp3Q&Gtg4=2{dFyzHZ?xOlVRBZER2qw@16X$#De(&x7U*7PQc=>A6_j?#1JwnwkpK3=8xMcNVO`yZ3oxXualC)?9& z6#;B9$I>+w7+ysBIalizcP!rkAezP5K#rou_(6pc{-^P>UvuG4$J=V%S2gc9TJ0HN z>&(WEXM)EOrZPiG;nR9~Gx;Y|(%#1>F{kxckEcXxQl9kBacn?vshQs&#Hl<-9lUc< zH!r{Gk^&miUTAd1_|}y@K_dpem}Py;K-$8E~wWW z_$k+B9f|9L_K?@%R(H}(#MczxxwT11rHA2++>c%Sxbby zf?Uox(*~i=Tcz_?zCDpw){O&-fZ7E%r5Z!;G!-rFL`y<;&|KPF6&p^1VY21!{fNxy ztseHo$ryjc53#TOh3>ERBLhP3lZSRv$z{2F8WCS5%Qu!lWPRpbrOi5ar{qFK{LGA0 zWI_0fx*tBKizbPj`r~V})+u9)RQ_tPU8;VR!DQJ5$ zQuI_WKq+rtUf{YKS=}Yhk@%|0aF_uV&4s*VS)8RCfn67^VC{D zZ*5*zRtrovDD6J&Pf^_!rvJQL&!0fD%dxjD)N-UGs{gL12?N1>(jA!_*;IFR)Ln>< zo?(fXh6LZm7bQ*7I!u*+`Hg!`=^fi$# zOc$Xmucs{pkJWvHZ6LmS@bD+BrXi65>PY#El5c5r0P0mY?o^a#(SO}uO$VxVB&c}T zcfbQjYcDPu%S@yJjUpiOfoEQjNt3eECBCFat)PvgANcZ%&-+a_)|aktUrm*%0v3M) zOEda4oK8O5k!);B#4N$&$y!zSoVY3tTk0Q+W_Id}sNF7P^;W;?&s!E{qf!;B0&2k& z#2W=d_<6}u5OP!t->%R;Iq+P_6S zC0Lwx$?9M$wy_Lliq|^qx48_vDl(X`OfDwF06P2OoSoRgjZa0+2Gc`qU8aV*vfE~z zy(45*0kTfb*~sMt_aVk~@0_zfIfC?I?Qe&3Xkh2wab;5CE`j}$Mjzi~ROMa;`No&y z*-sYuQ%h#5zI~44K6`~Tny{wyfSSx52)7hM|hV^6u5u~17x&UsYb9IA*#fgT1lffvKw9Y}oS)0{1<(t8iW zm{G!AVTBdO3~NBPINx&jlQt56$q{EIYka@)LKL$uJzsbV)1cZ*`3A};BJ)eznEG0r z9X3%$N+?WsjG)%wXb57Z{46Tyx$I@yhCI=O(4j2?%{GECG{QrH`$IK(cNSsC@7-zG*K;?F+-WpfASD(Gx8KkfW!cl~EImGzE@8pAaOsn+7EvW4U zPLWd3N3K+-YY3 z2u(hL*C-jCk7&&Xsu3-XhSmnt;S9Ja6XthcOGzXn`0s81DT#5Am5p)mi@LfPaH(Y= zjI1pPxP-MYCBn%hhHg;#LO&v76HSQxE1pe$pW^!|c#bZ-^ae!MkMP!Vtk#E;*zZL| z)DnjUHPX#bu3dGF@txP`7;3ZtncTjn6n$GTs>{jwGMm^H8gZONT5F;dQOmD&*#+>n zBaBS6_mkr1rXMG9W*cJ!U))upQ}Sa_S~MzVjb#7{bskljAx0=E%ixPb0s~__JQESv zIa8J4^xrVMqdkV%w;oM(G6#bhiIQa_*Ag(WiM^HMCMu*lNEZH_w(p`S^$mlK2 zjd8%wA&Qw8tEoH|W{$+VYtLPNw@dLhoU7@!Py1Tcw(OSNCVY-0Hk^rQHO$JF9m@UJ zKIfXrX}ws)pIw&a;{9{67v+H0|IFG%ICApk+#e^naZK6i@nd}_hfOhSXF81& zyO0E=_PU39Rur$e(unb!9jDix0S~ZFLrH39_Ayj6`Ab9@ zlO)jV`m;bjGbxakk&d>IrZ;83y8y?=r#xqanIYTJ%OnQEC`_bKQvlxFp;mr?#B~z~ z{6Z97?IvZDHpWQas3!Ck-q6eOg^C|J?r^4@MM{TH5cgJmGL&spnOw1?|Q%tf% zc@XQd%#wcRB_2sr1L6VrcFh) zsA4~T zQ=54OV3WQ+x8^b5$`D*Pft??k`S?g(^xpev_2b4z)gOU+e;r` zwu-v$-u=<8;AHfFdM&FReSz$I zXv9xxu)a>sE@wzN;}gXH2vf(6k#rsxS7f^gbt?+KM)6c3tJd z8Tc&f8_0i^u!xWHsGSS*z=A@XF;%|N46?0jGK@q@alt`XS{gwF&OSV6EqC0zX1#?j zmZFX&_^laG?x*^v)%d?NEC4m0v!2cuv?9(e9hGufQ3eiA_|DL7q=!g@c=9mt@Np}u zrPhMa%^?-^(gV@oX?_<@ZyDgihu45F-JF#Jkqky4r@cV&woWI0$Fo0CQj5RcjUvZ{ zR;xWT2zWDtfAQVL58%!_-}u_su=PIqQkve0_9VE*l%m|=VxnLQWW>IqTu!bjB zt&QtM=#vh4<^kxDhxd;h)B$pf&hL&iOzpI@b#sr0m2@%l%Vp$HB^7@@Cl911#Q`8w z;vx6a^$AU=l~6N7=tHT6sC3nv*xV3F_2{m7`S(!OsIjxs@jU?imf&S!*0yG>5I*vx zTD`gX_}-OV+m#9MH?uTP|GmU}vqhpYgBaJF@U_B*^*E-5V+j(5hRSX`xxd&aM`>3!F3*ZIXw3vN142phg}X(!qUNx{kfG%( z{BHx+%3;#ifeOcwE^h)eJlVDH({|(7-QfKN!EBbR z0#m}y%lDCA-nBRty(^p#=`!}oS5{h~t2j^|TRL%!#nnT{@Q}nvB~audL0z)yMcOq! z|F+~j1$E9@GdD^4z;iIRuro?gXX2}`RZT(8J)QAqt|H%VdwzDZ|8~HtDbM^%XjCY1 zA1lT1K3w}I?q`O5Uiqx-PFD{(4}g41E9%tW+LR-@s`f)9s3mrzp0dEH+X^Ki80w*( zqq&s996ojfx}=eU%<~r?in-3)wiN2^fJ&m-_3WNT#WdG%TFxH1zl8hL2}5pPPY2om=`Iy~odE=Yf(t zReK3M%zsVd0D2Fp5Bmai>cNwKw`t{;_tNPfnbe4ZgUH&|rYeAyVBySp+YQJY@u(yi z^fsQe7k?8jlWTZdc~KctT!#)tciiUBT6?|}R@x#bCIOH)mk=c0*(JBk>9j4KP)?`g z1*AiKw|tDo-!%Eqv$Ha+w_tZ$abP2S%QSnVz^S&brSlS>v-T$dUfvVh|J^(T$+p&b z^As^Nn%y+zcOOFY%{lSo6`3m{Rb%7vxt>b-1>xGZqN0+q-JAFm=(!&9he(do-rHjC zk8u`Pn$q-Swe`CVt6 zc7Jy(w?6s?NiF!ouvIZX+Aww>`^lfj{BGH})WZ8VkH>UA(6>rPbWRnm?LXdwBe)Gy zL&N!egf66{>@gn%acPhSI4{m1Z>vsvTpyRV=nU*8UAjp%#ZsBtWtWvjC7LL8%Gtco zNvt}nsL^aFb+n1!#zY=ZGb`TxZ8fMixIuZY2Wq$=e4n~^7L?N+6S87NFXXwOe#PhZTsf?I%`?pVW014Ro7SNAvhPS1Nk&-SetC@7 z+!O@3)Aq#_qyMtiZx=9jM8@kp^kU#}W8LLX*CvHOQ%N?#wx?>CG-UJJ@jBmfjnL2g zJQ-T~k+?;_(`Cy|qUHTC|12?I+lx=`ZO$;t7A5aqI=d|2Am^c%;wBxSY0pUKc9pR+ z&?(zm3wXgr@-#FZKoN}wJoDJx7WaCy>15!V!zw_euqD0KlN3bq^0A(#;c9BS3W?;e zh5RAsWf9g^?`p)?_&JR~&`3xvBMAe63*GKxG?kdAowqj$#C6QdUbFQeN&xz#aLdA> zqVEGVWBT{6J`x-MTrl0)9;v1)F5^m=*v_u*?~BE24K>qG4}bOz@{M67StZQDtVw1x zmRput*Ez4Rlf$ld#6BIW5o;RAmnnFIFVWG-?6YAE0?vpBPz7f(8r5wCDS`uET9DyJ zzF>zXhDGPXa>0>QDh0WUdEsC83wo?l{hVQ-kFir}{6(MCqN0(~!~e-|AoQ2Y1^<5b z);-GwM(X=ZR~kI?%^|x8t5|bVsb@%Fm#KHMwt?X*Dh{uKes8(Hr>_U1-Pa3u@}dY} zYTe}xf!?dxFp2wDL8OSr2Z8NW{hydV@%yQgKt#Hqt)V#CP2|7XY!Ov&i9BC7KcaVq z39qRUHgT4>dm0u^xj~MBD;!fEaTmc=)es>%UnEi;Jo>?=AuS_~;CH5zVGkP)87YfP z$rY0EOOS4d;mtF)f`#AzLF6(B(0(xjOk5}@lX@$aCeJvSgU*{6_whR>y&HfxjQhey zdn(%VyMck_iZv`z7dXs*F6Ef#j0ybuu0y`Y<1)cZAf2f8R$D{d?4{~Bgp2?Z*!Y6R z&NQ0wB#Y>7S3nEC!wjgqCEe0e^Td@B+OR7Zt#&+8TNOPSgAFBu0T9yd0Y#BbT)F7_xc|*DljMfOujSRd5S3dSL#DBG10jy zwYwF#Ifw#JaI*Mi;@A)VzJi2T7x5Aw3v7nv48c^&d!NJdqf|}9|&HwFeyp)HE|JQUBx=Dp3(ogQl(s+qP4?thF!T^kea{t_yZ1+@FQ&& zs)K_&M(Sfybd~wr_?b-KGBS*7PmawlsQCDBM5#@#RKb2teD4>tm#w}UYeQ=?aIunK zf4*^KtxNOvIQ_vqeIH`EWDdImN;b!!aGyc^GAqsLnD?=^{4PV9jt>f~U)DQn(H;&2 z6PgWzt9x97pWcD_Gd2!;axSXcn@QN4zBc{=YRRilB%>VCDzaQ@F>jQVpHE~O-s20h z7znyJv1VwF!P|XG)#B+~l>3@1Mt&2Y|K(afk;v$WErWNMMenDj(jUq>x#A=9nyk#l zA6rsMz1@66=+!*W7SOsd4hISixdP>HM>2jMx=dA{+XzY;D$~){6^?4+d+ptqDa?s} z_OiakopK32a9ChJ?Nga{c&K_NsU-{h2r#h5CdvQi-!!lE-z9mw*cHA&&ZwqmEXFml zr#~fTs)`Oqp1Fi#$So8v@k;FO{^H2IezhQQxciJ5ks*Q@`%s=PmxC70b|AfGV@)jt z6(#+FV`Wy-pqFIZg8_tHoNJDlIlfrhp`I%>C)OG_@h_i_*2@0 zZ&(-KjFFEeW*3EbzEs|Ua^>qGP0~aSI3|LmXTt^g0@ntX*O47-9up&OMW0c1d4YtZ zZi~{Nb6q6|_OFWAh&K^)0VjG+mLH~-c!=QD2K69GB-a0P>dCT1TR+v&LS3kn67eAT z?9(K4=r|J*c%vmTOUaK|}USp2@?8-FtQk3E^${bXgUm;T0%(V(ln^x8+!M_iRv+?{CX>!8xAM3Fp3D>GE;+PZ z30~t7mgoX7qWv|uVr1kilSKvaCYg|AEL{`tfOlE;vNaEbQL$^I?wBX^Gb@2g61kb# zpUb@0J4-r`lPydPW!5g_3x5gw*Lf5y<{`TakE!nqxo6DxMuGUx=@sv9ks{!2V_>olFC!I5X`dVfxs+H2e{XHcI z_^<4Y8I`wAR3^Fx$yheOP&rg5E#ve6M|8e>Fj-ac$n-F1O)H>tUB987{%EMYRMbP!M(`$PDJ zNk`+jV8+dus#Wux=e$ZK9P6vy6)=uuSN8fCAEsGk(^e`5W^m^3!^N9vJ9$#+!!RB ztemr%Zt&>p;}5~_3#Cki`G5Z5oRLsI^ZTwWc|6uy6mKHzyq>U8br)-SG0aieeK<$G z=Z5cVR`T=Y2(&-{c&l65*-HQi44VzV0BNj%(KztIal=|UPkyvut&TinHu+{y#EB^1 z9+u5wd#$XtE}r@w?GtNMi+P@8m=i~=6#O~i#izP&rR7vF7qM%g@|q^oD!^y&WoQz_%!S9yyK4V|iYj`b36f>E9FFF0-FCnx~B0nEF|5JRs zw*8FRq($b)qb_pxgn)KQZC|yVr2`}H^!->$?RMHiAoFOba8GpC z0`!%Y2%=t<)qgb1ooM()4y@PHL}|KANACN()WKCKU%R~K{APcB2k;H`YB#(?2{tPm zU%p4=r&U8?(SH@P7ak=s^@$$eXh(IU5by4s*YQ>Td*r~48;4**ohL2pa0#O!sc)6d z0;Bk6RthfxnZCo}ezYScH`KzJl~-PBYmP6oC?@$+j0XD3u86azt`&BAwzGb9e(uC2 z+xxvY#6Z$T-)^|q$+z{iV8c}}hll%H&s*s9zV{_sx7t?6A*4u@`3L&|;3@{)IJ65i z|8b$Ju6d=jm^?(Md-*6G{?5XRLZuhYMSNxJv==&OBL`u2Y~~K%tKey;NKhR>!CcE; z&#UbX52+s|zp_>JYWv9Iq$rF}!oDt{{!#KXY_*d0X44`LzTEg9wh~Xvy$T4B&n#md`w>Y412%C zc55lWoy^CN4F3=Wcoy>L?E~EDQ_%O7(|@mUr|x^tWLo~a|Mmf&jKU+Hzi+)&eni$y z_&>@#3AuOte^|u-c_^kZA}Hr_F!O6k2Ofp2#`jgK|LXMDc=1*3^NA{Pj8tQospDo$ zZvsN$Nz|Otkufir|5ycLp|w4I#FpR(Z2Z&XqH=V9OvfX5B`6}N(3mTX+xdC=&w>Y*Q`{@0mCU>60zS-VOh7%f&y+Y%^UPpp}XJ;U-@fAX@)tvopX_Mo7!rPg>fD)4UMXJ z1{n9%D8au>IR274F4Kb0W=%(+U=Qu{SN?dewy;>d5TVm`bt(CX5}(5DP^Soyw=bM8 z(mc6V)t#sCC-SQgPCcO&8=@5wG==jUR5yhvRUBlCXR}VSV-Q}ehcW4Q``i)5*#pN< z2{+HDTU+Ag_tf04?jl{2^#u60jNmZqFJIO&Q)M=}RQU_k=nV3I6;)wid$TpI0WQ^;WbdWRujQ@$ZcEvEgMr!WK_U_f6| z03a;2D|spW>vsjavEu7mD(1Eb_Z36>sT8g4eCRKd4CmZS=P`LP%z2R2iXMgbz6e=y z$bSN$Y#ev_!T-0YaMT31G0vjQTvS0DtSQevPV;vO!nvQ zDbrwwh1)Fv(MlWw{e>l&m!fmS7B;xO(S@f_T_vkVLOm-kt^#f9vy}ihFV`9SG2WwP zwCXnakYr!)AP?18{6?%~&sLJg_f|dPx3jL^{gbTSMqS{VBj_)aAdOrZT zG(J^iiHc0`25vJ*E-AwjJsAzMR~Jn`^b~_mFZuQzEQ#m5gyPX?jVFI9Cd5fc!F|;2 zdtSYf2nCCy^AcN$)w|ljcNe;{3mw>PWc{$Wx)wD0?#=zh%LGW%~Yz2LZ&5jn&a-(RbyPjy|W?@TfV^HxjuN7>cte2U1cxUSQrZ{k>p**pPtIkt7g-PE#HzrY~WTs%4lqA4KL2WbdS^RC% zt4TzbkKFBza@jwbyV-W>$Ktnl!x_e*?&0%z8}Ui&oqaELT_Oa1OnNZ>EP~QHeHSuM zm)wV#KBBZ#PD%0gXwKl8CNc-&mtq&n(#Lmn2KL-yB6jcOY^Ci?{}{>zXwRdRiu|PG zScW-Y4#@eUE8V9I_(#l!V-Am;W<%tI8ixs z)X^krNdOPtIiJ$?Za~MO4jL)t2wrFjZ6fX&YvOC%*HMoA^6xa0?D`V?NIL6BE0C2V z;^g!Xpds&7#-8~90O1uqulhr{)UNhtUk1EK{9 z<)B*M57*T70P1-}gPS`C9Bh39%oVmYbH&K2%-x?HyZ!;B^36IYi7uAM=u>~A;nW-c z6HVcbbcTJp-xhaRaEFIgGYBDiurV4Op5|6g5 z1UOua!z7L7uP`84j-+`Jb{Or?n{xg;~^`bD!e_RUrcY10N&eAT5o5YJ)+& z#{UdcQP^LKw|id)D5j`?Hcxv}a0;JckT50m(h=UQy#ulRB;ODWS6_^;EDfMTIzIf< zk@-U=V1m$*^PA3z#tU_!zj{PBPWr3QX6y~|+8yzWrNJ-gr5K5O*fqxaEe|#}lFV?& zHsK`5imB8&o1+`NdW+ihI6)}g2zo!7&V{$s+#;3X8jGV^&F?hKnsbD25v#$d%PijMpoCi7`G8_xkInA0JwFTbYJvldl4xwlyyPWrhMgP$dj$I zH*%ro22VlPf_+E)AZ|4ESgiM5nw`)!m8|mo=Vcid@)`)hcl@rCUYQPzXfGb$jLjdG zE$^@|1xL9on*TcqGWk4CBRe3fkkhrO(_6=5GUPy8V|8V>s+FZ6IiNaC1i!X~##H_~ zo}G8TEYK}E9AuyKOp9_rh>Ll^=W}Xa9(j_KJusAC7 z=eMZ^ft}VdZh9}ikg?PKRZm4Y^^|$c&#o)I*EQe?5l)jH2~IbjysnL^uOGe1feQLn zr)acaQW@F$9Z=MlfkGU?RHy{Oj*ICjcPCmAs(tzdX(uPA+8$8urPW#8r}_hfcZ)V$ zr9i1OMByCt-h8w5DPLXPs5Ri~l46B_N@p(V)`Y|q19h}8*>k~{+B5%HgE*^C6(^B5 z_@qq9IYMe`r1Si7JFyw60c&ZNE4V15;CT>fg;cG7HL~PfYM+!SJo6K)hw8(D48NfH zkw*ta{5AUkHF_pNM^n+c7~mJHxYuYY{xOfD#oFX=%wBw8c}ZkFr^sk9OkkoNJQT^Z z(Pkknm(*B(|D<=2PfH4*3h1PohXlq2sM3MkK!8V?D_{KkDs_d%`dr_F!2lZ40Q>Ty zzvx!Rp}Xx+;A%=}MdP!%^kQ(`ID>ZIt5;vIYo|5ZbMUijprMIR6qk$E@OaXVN(xJz zX&*k__s(D0u=U}I$O+9*y;Q3o`PCZyyTUu|Db|IRxTLrcl*e>anc|8J|!XPVlyn`k(Hx5%Ew)K+K0O~o4Lc9n1yX5peE_i614af-`*tfqJfazcV%C)w8%`Z z9(%f|(f&lKM#I!BYCuO#kC$4+XV*lGtCb5wkZ9W3uXAy}RGM?T@)3G)>^58+#ffc# zs@|sCOIntw3+~73A*V`OEk5`(8)Rd^v^Th>adP19MqX~C!sscjr?e-EEO=umG*Dk* zvzSS$u4PJ_>|t_o2iiPY=~T2L>-TCc>*ED~wX}nI-GD5nf-HQMYjQ$6<;OcO41v4< zRgvMNLMk068cx4Ir=OZ@9TQ90+b>)g#w9m8>x#p(gIL*kp}x{Ilmjp4>OzCkT0I?$ zIyVee#xCw;JSd3^7+2G|-U1N?IO}u%>lbw2bB)TkOZ8p(=_w<7LBBsWQ)NKxjPpdc zbgqTON&IT{zFi!fRhkn*c?V6*lo=dsOEQRFd*VEm2qrr3Fv(q|$$tYLEQt;I{>{%# z)HS5g7P!;_hS#sw+m>NMEPV_H#U({+(iK^sU~VS+ps1bXPgR;i{{|@TcxnE}CXS_b zFj+;~C)kR@i{JO)j?)a$A~Lajk5H?ad+tyt3~NKVW5-&|(YxIR^a7Rn0B3p{8g-eAZK=|Hl^<^X=d!)T?P>9~jsDu^ zsK|GHD_U*&dB}rbD;zY&`DXP`{uQ9r2zL}w!C#;2!LyelzGVN`yuJT3pVBzjF4fwU ztrLYp@i|O#B3JonsHs1G{CMm0>8U4tY;3SZ8Q|QIHa~BST#et^+augCmsm?7`A@Hg z{4##=H3s){WK3x2{ae<9sXhG(9D^k<9C76v=2ZOr{H9NECHwpPI}`I#8Ba~4Ki4-l z(%>EefnruxR^LsA%(RHS0@CaXKf=`}3j_lDV@4I0rrO)v ze;vyIr$pVX;+{$oNPeEWE6^}Eh-(l={$?kTtD{|QN!QHGjP2>*;9y>5CGUpXCQQ6g z$O!81J1wQ@cK7|3ce9T<>utHoGdjsMWIF) z7gcd1#^p*kH-F~bAWptr5CKIP)L2K+1}x6cOW13->+kFcGnV+a6@5=jqa&~#yz%U9 zgQbSzh&MYo_X=0jRJjR`1}O;~dey#2YXU+-I~Nz5gW0;x3YIb)mFpa)LeC6zbt#)} z=T2!@Fp&M#)&XSQY1EnRNeKyjyu3JaE+uEa*z3wdeRSODPsj#OOiYv-H}Z3CGjK>Q z0B==Duf%j8co$e^Hl3ZFjVJ_b$FM!sm%DfObRaJ~I}mrMi_+v36lm$||K1!-!{H&T zu~Bp%X(=cy{Muo%bVMa+etv$WmJnqBrg%X0e{p58ow~gO*U;&E-_owiL3l8-00nMX z@J{nh;2X_+AS@sdatDC)zuMfio-8+^BT&#n2`n-|IGjm5)6&xHY;Ch}&2MgQ@<~#( z%_BRnh`J_F7;ZBt9VOCY0681TRyIftr<3#Q3TAA6>);RypQ-Q8F+wi%7@#gZWyVl zA1ow~ActF2t54H-He1H(8XCf!lX-iK82_X1*YI~i=N~BB>Fy3G+^%)jbuIKWbCrTJ zj<8zbcq1SzJT^a15ByZ7gqzdT#Ii``nRr_qo@#{OJGKzzPR~$9O5uRS#>q)EkqL!D zM~J^w9eN-)2OqmYw{^>n`^rt4-`@OTX8IcriW}2m>Q>9YwfLLfwU>-bq3$i1))7*` zpjO=>{^6$%6Erg;gAt{2cKp8C?xV4_M854F>+@Jd42$$%j6NCAVdz%ty=6*@J@uU# z&3J&uTcURHw}&V!+a@Xmu+V;`+fi$po11(0Zf0+9uPe0HB3U!5?fu9uaj3#+EP*v~ zM?=>z0n}f0IJoNm@xj3Xrx{e@lxLC-f$|}KubBBaxOLKKAAFAUK`nrdsf_D#K)ZDD z^Ei*j#>VW6sU#&c*9YM0eb><=!*~P=_z#646b!r(|Zvgd4!CMz^dnGjdxY9P8P& zQb_+(#@!QZxJ4+`azSaSBOLEY3^eU)Zf@TF#*~x@b+Mm>oJN^n7YRM`4>!SyRAGBS z>FFEX=zPbCA4_IA+uA;rl$2z-%AkM{WG%VcJ3EuKV%j9np9J0QSStIg`i&*aM+H<~ z^Fa0!dCXLC>dcn$c;mq`O1J?=ctlG}`#b8W`Ow0OXjRx{k^Xqcng7)8Ax1roz3-<6#X;X}l1-9Sr!GBFWRNRVxi@Y`=5qgp!72OYj=H9;Q7 zs3wb_OOVI^0oT~1yyb1HFq7^hFPC`WLZqJBNLWe_8udI~Dw2MBEz=v)ef zYQtS#e((G!M!WU|9AqW5h=k(@N%tjXWJr}?{g3KqaMfjyw@CVPN?mR<|1k*ohqH-! z{OwTV)2vAXQ1T66WbCmM`RBi3PGwI$tN$&``Ce}$^58FP-+m2O_6 vFTVb-sq?|Z)xWrY`}N`5|64`A-ryxJHW9iH669Uo*>-$$(@ ze&0Xu4|VTz&pGGJoNLxP*0GK;P*z&x0Kz9U+Ts@cK8p|ryOJnFQ{F@%GIW{C-X zP;}JZn|Dxst%Ch$d9hUSLy5O=n>Oji_l8St{X@JE@`)*lu{OL6CcNHkIgF5wDVIYn zw7rIJ2=CsF==EMcXDn?U*;}u7x(J+eF|oHdR<$2Y;!?pN%4K=$^GXnb-@iNYI{!6?EGIh zt-weAKMaReIAb`+^7Y@o4r8i3-q1^j?ij;53K$Gs&=s;`OdrJ`!Veau+Ud<2{x;F_ zSfb!*jQUF~yb$LAL~o{>`r+-6t(VW%#;&a&d$+(@OIl??T^ag_%I|J>jGyHNmcMIwH*4eZ3SD(%} z8C6bFow$* zhnR#HEc|wT4y`*ATp9Xg(wFBJeE2cYqUi5>1~81xc2PLu@BZ`BQnRNbFa*hjuoYH> zrLtq~L^6Zvf7OS~bl&hs4PA)$!EEt-MI)z&C9x{vdL4T2{Prb)|b*D)7synl4UrTDZK- z2_Fld!o}Z_I}-O9g}g{JrvLG!P%0oZ&Sj8k@}h*X$|6DyX1%T*$j?X_KE6eG5%NCc zk%k$Kc>!HX$wYcZK%;RH2TnjhaEdu4FY`{p6FY_MQj(B{msp5>)(i{0W&BiRf;C4c z-1)Q>Q?u&fGYi7=mW8VgVF(Azh=~c9Li|+|gfe3%m(qW;mdZuU_Ld_x?bDlsO^+>vDh8T#taF z^7tH2s)wj8>XS1XL8#9AVng}{QoN97G&B+fd;8;BZ+(^TD*W&u33-W+u55bxE-zBv z!TbP8Tisbl{CRAVrb*|3n~#Xk>P`Zo2ELhgJwn@MxIm5V`cJ)vgiMK8<%T%01Tw1T zKi#*}xpoRK4-OihpWV7Y;c?ifGzs04OlFRW>BrCYcT-%Ld_Z~;vh+!oQ#nR`WNP+D z&xAN%TmH}ymA0Qyy?uwh=Jv?cvaU%2^_%ntOXCKe_l$Dfd=oM11cc@#O5>c8g+~a4 zp3TcbJ7=nDUu*`JeiEHXYaP`KrMkCX{erMOyEx1`aJQtGRWNemZ{*xlal6At zj>`j=j0!vD$5h1PO@|^!chg7{cMWmsY2EXHqR^WL+@5R~T)1%5}KS%_KW|#L++F zf~de+CNXo?gD(C|bf9M=qo`_p2^`r*bRKjNyt=xwy~%%$7EXN8db*xBS>w9S?R=l8 zlZ1Xa*l@cb2?iV(X>zjpovo*-Ke}5?w0}nl!PPeL4`tQ0QAe2MjVabgcbqWkYz0(I znEg;LEkfB@Tn31daoJNyT02-Ar#PRX;&*~hq&gIH=DxA-JIklZyG5@1iUx#*71>(8 zBw`QsfX?O_qI-a93LSHtXsxdXiGOr6w#w z=eln8Q!mVb4OXSub+T92)>0X1{#Gp1W$!(9LqtL{*(|(AerQCcOXSdRwi*?S#{D+^>y)~NJ`^v0|H6I(d_3{vSb{k~tZ>Jr-SAWauG8pX zhx&5+0rUCF#EAC!YKK!^@}Rn0Y&SDM)a_2sd3_BtZJj$m+*_5EhUs?_@Yty1VqCIW%{GcS^@Hx$yf(>==1MTKZHh+TtI6C` zSU4TOm32yK)0VKM(}{N!OY)|s4P0H)mC3^jiRZbce9&m}O{~gH$_5VWIQROpsUO%H z5pk-Bcp}AyzE)Ur>TX--4qy_%Qt1DjpuHs{uDFXA% z^KUAdM452Wsuy$=;;i_hsO&&(ZefwTrHvJagdMFRsWaJM$A5;;;-AuLJYqK`=?=Ba za1H===RoV9ha`dB4>P~72UC9cbrX61^(yxEm1}+BS~XO6haQPexUbuCh#O6Si=@?J z^*0L2fHr*{CP3M{E}p&R_Z1O(-n3^2`aae#PuWD-hbYLCF zKpN_)!Dua}VxeLR*m1-D*TcmhEY6P@)Xh!b+TotxZ}$1cF_0rN+ff-zMkkeu2xo)? zGh?*auzF1&m-@6xiqgOj8TjSQ?&@1q*BrGf@=d<_H~|MdkG{m_``ZOpvvJ&1|HiZJ zd0*hZ;D1^qZscoK_z1t;iOVM?35v;Is%(Wl*`kAiB6p8PxJax9m}P@06!&UiudjFQCXnM|Tqk%um;(^y?rJY1Fs z6XSstnOl20)w(Z;`?D2G!=xUO5PG#6?8M3g!-u);ub(HsM`jNWpCnrHx}UrTJBY#V zVJQ)%p}4HV;Mhy&%H|GLv&P=Lqn{i2NdN9y>16)Stz&`VA{o?~@-ICVyH#7W9-N+@ z3^1?RuK^*U;*4uYa@Pa*4t=Tc@%5(}j2to(3v{_`*d9xJx!Q>wa=A5@)^9dhG#E*V z?c9*!-&oYR2Eg#ukv@ysVm99`N@S6>6TNnA`hl0>S(AmgUuYI&HpNGL{ri(-UZ+rH zIy@G$jyA`zlXY=w&l@!s3g*vCRpQ3wQ7veJrt_7QWt+5XvA6=ZU;|7PYWK~I2XVO@ z;(I^4*l5cwv${tK0oH!Y>*|wdcHxM7{^cps+S5Om;i70K0M|2IWaU61g_EW!yER5~15M_s z6jPR3b;!~v@-&usMyFHbH}`bGvZHDEGnu#vKxbxoS0CorN-IkhDM;NSzqv&n^!F}B zQ7L$fxIgfooMsEf;{_)(Ds|7z*IHCXd?2{B(*$8n_6EJjI)&ei2z+L|cG$Bm!5t`B zSwQ@G;1a8k`aynDTa($u*m3s4nApN>OcB+eb60YQak1g@Tg5KB+;9W!twT|^N|^Lr zjUhd$->1VmRgc0ee@dChs!bO%&i03*g9n7yrX!yVwVK+ONhzr`YM)@zL_PgeAWEXn zYL??_EGa(Pac;U${s>^GWa-W#JM5Lcq(G@%m63x)EM%f7f6seoo1-BndPl95k@Y+r zajhMm3*T9Sf_CQjl1Te!cIYYou3>PpHs1+Pk)cTxwttp*710KznjZ1aj*ZYA%oGuo<+{OoIkA z8(pR5&xhq7pN~?G{Qk5WXUl~Y&*qly{MoCerFDBMi=nVa!uo8A8gj`KIbyK0x+;-p zqCY2IT{+;)C`%M zMngdXoqEAjG&C#aurV%$XQf8nnRl91>bQIQ{GtLb#}Rsw4?j1H<6#jX(ZMasX19u1 zuJqVn>c-oIA-zd=zJ9lLYFpxZacBRGfIU)Sv#0}Kz-U0H=c#9L0K%`n>)t&(uz5DW ztgOZN80H|}&sCVT2sFWP$;w3q3vH4>BHGMgutbYE(Fi87C$?ysSf6?7NB^|iQpuM!enrFDeVLBZM!LNKL za7oM>A>uw9-(J$}}sd{&;T$wUOEUVdpRV?_jD zjA7YdiZ(`)qM}qHVW}&hbl-fEP*S3g`nrdgEOvN$)L$AS5&Oi!>8$j+gw&xvYgVTzIl49$NlSPXw zX@{g6&=TZcMYW+Bu1F@cM*}IBYsZnQ(b0~?6Cx4H(igIq)TV8KQAMEzu=-?vP`IDndX zHDTWOC;&mQwS`KnmUF!)W+j#jsVlsSjK%#r`Nt1gRy?VQB64b_JVo7h9kYe)Br-~w z6s3bHbbwo~rz;H@q~n=Sm^pD6inefu>U4*;YnCAK#l;+XjK|yyop!g^0_!~$_&C8< zF+WZl+xl~?#+kWgi7z+TYHioKIjrW%JsNrUW*VRsP54=g;$hpT*bGPYj<^jEqn!N2 zWs%tf%x2DK$+(E!F?2LdeOKWa*$diSSs(ypi$$vE3dF~K`)0Cr{%j=EM%IB`qmnj0 zARxeSTYzUw%g0xwSohjkojs>z1@|K^2njR~^ZjMm42Vc!Z)o3o)kNNuId;g}R&$m) zueuveZ!&m`ZpJ0Fv-rtZ3VoQc5@;Duyo^33pFg2CkV&eO-0e_O<29gqdRkq2J2cCK z02Y9$7W#(!4SHe-5&j!FC;@(sJoJu#-5Qc+bSf;){zbD>CPfb!@!XS` z6>_f^3nt%ZrmmNXX%06t{ zSr=z(C*y%2-fHN!@zs1$ow`{e!`bMuPCXfj)!zQ0k7O&p^T1u7Zgh-~Kh;s14o7Wo z5JiJ4HL?Hx(4=q%#k6v~YH3aHjnDCJmaA2HWS%h`Ae~KWhGjC4I^#d6B@-#KxS%B! zjuD0>ek^-B;_MGP9yhdgj04y(bQ0H7xrS)`4^2nJADDekyNYN{s8wy zxYCK`%I7{_6-ITV^e_O~yy5zy87|ARiSYdqpN>&~IuW1@S1_<~t|H){hmKTPar8vd z57eF~4!4U@fnn+YxyVI77}vc#fT!rC94Dq>C5xixduDqwrUnps^j^*PGLuEU zGiSa$sG7{NJR7WiR_~;7?}QfOLqj7l*>N*=QkTM&a`VyT=L@LO{K?XTR!t1cyZX`5 z`&`yrs$PR`rx#iYm0OY*n(f`-cKAl(PqMPF;3f;_Gv!863P{h&67kwLugx>=5VOs&)nS+E8<=|}sf^go1=O=s=CQo1B!hOX zU=aH}A}Uh0%b;geo+>J|QwD=K<#2Ys4i)F(zex^A8=LNnXn@l8%q?*Eis9eaJD+Q-9KbCpQ(?C~ zxgZ?ne06k3ZePh>Zl$cy{fA*aMn?_hjaI^#Vu9>yeXj=xBHnb!>hGSb>gn5Yny5Ec z1Lm*FQm{Rn<^4nuj@9e{Ir1bf5HTQoZ8NO-4@pL@VgXviEeg4V=SaQ=@%{5lH?FT& z)!jn=tHL6OsQ}N`I4tsL|Kw*6fO3x!pB2znd<*Z&nH>BQrp^9@PtJ@>S=xxNgmwgs;W?g}`B3OX4SgLb1N;LL^{W2T6Ib!LK>gf$(+_lR3-yj zvPCH0mX=;sn$LzvROa_3@P}jd<3(`U370v@%+))I#DB$Fs`sZHOlex$ZFw5C2jvLo zagZL#mgr3*?G!UIF*%uGHt7lQ^?e#19$szO+tX)?QEhWgc73{T417L<yXb?q#l^)x z^}J`2I7_{!Stx7(+_K+aNL@1%3$23au}pw`%epTr5n1izv@?C({KkrWzOM1+9u0U_ zy}p#w=idGy1|ZFau`n3f1GXn?YU`WLtsPm_ehrfH#w_L z#o?|-%?{g7m&*HvOu##1&J_*7ATHx))@_zdlf{&C*V#HG5Rm0>GTxHf*!HwlO674v z%qeSZ>zIvt7<@kYB)T0O6*D&OPa`sfdH1ET1xjhympb**3w{*D<*?z#9m!rj3!Kka zt9XKhgcK}HI%<5^QJL&A)#!vd%Tc5cSRPu{`Xq78-jm)w;*yFAz4g>nwQAjG*@Hl$E|(sXjZk;6$?lxY~NyFgSG)R7YOA*ym^17kufvw_sXWD0+O8q8&@E_=ds7zJSy!C8_Vmh#w?> zPIyO2^z+Y;n5lf+Tqer&=2IQD>iYu1F`5M<(+$+$c<{&EG2v@y;Je3PlWK4jy#07n zjz~0kNoO*!RfoO$Y|j8nID1)gbm1ggQz}Ne`$_>lTXQEZ$T+w7eE$80f2J0z-ERTV z5z!&%g*^P0pD^y;v7p!uqM_yW3J4Ho&U869;w1ql=uR^&593jOYiDzf?+VHETrsIO z+Dg@TP9ZA9b3i@rPAA_!zAGHf6QL9iPRZ0hOICkSIjd}Avpkc{+GyfFD!(gYqlNZ< zz1uA^MrvLUW#A^LQjHY+;q58PPcC9NEgh~J~dsYX@?Z#W>GYxJhD0W?R zFqSElR2J@c-xg^-qpUeULc;74srZdK-LMG<;F-yGYToCmeE81LRCe6StW^%qUt%A# zE$?wS4OyWUQ$2&Dn?q%Tx?bKN;Y1S$&?F=z0PKs>*`zn@Z=x>JurxFKczBSEfExDG zvXxyjo;wlBZf?}JC5B9VO$v94br%NWSMkPT2<^QcCXf9Axk3jr*uyX0R~Dt0<#a+h zlXW&|m(J8s_x2Tz|J;(W7~c>4_ISwn7~4qk;ab)GA%Eb{T?GD)rYUw^$D@!ArKiKXwAt#i z`K?^|Q~Sxc<&QTux|h!g2==Je=0bjctLr&DD)~5@qgkR^s*?w!xJcPG)G)GN8+>ie zOJ(~U)wJ5J$BH*y3c&_&Ig(C8`2{xpw~8!G|FEQQaZ6VVZvKGWU}MWud24l$bhb4k zt;V1GPz|<`DdDKDg+)kmGM#ihi~bn?6u`a}UzD<>c8xhrx+SX$ExMyyqd~mMRcUUQ zP^oq}eAMcHK?-^q%TW&BQKETwZ=bvOdA<~}uyDF7qCGHSoCbpc@4K`~z~x*eJNxVF z+Ei8loqq?q%);p9OW4BlSD_-8^wLazwW#T5<7vc$2n)FN+sb}bew$wB6vEt_^1 z#h~(?#e|n}=OrgMx0mq8o3qeNiNu)Vqob`8Z%@xmv1C7OKJEx=1Lw%eGZJK6{*)Lx zy!e>LK*TwWNG?L2ZgYA1wkI3Cu?!zOkhiIH8ei{nn3M9ZD4`dX*%@<}TLk8ru_tht zb@l$#jpRXwLDW%-^R7f5O31F_CP+toeEm9k7TSX_(`1V1llV5fbZsFsaix4 z9xg}wu&CpxS)KD_AO3Mli33!x(sB|0y`#^y?!)fRg%qE;dH%7}*^_VjzNb_egB|=m zF;rc-rz+DfYf17GzeHxA?I^_~gawc-Iki*?0b(iuWvtWqTho4^oKENc^M$XwY>W%5 z*``hU78V7rIC*jf=njW`vZbB^nNx^Xi&%E?2PG9dt#98l;jCOzw(ByYW3em-*PJJs zNd$zPCz{|ih0+d4R~tL%t;JPwgnj6a_ymB3YU$2G6HfD_d`s%@(d94#&_HS8F~pxU@#0{~wCsMzLg zd)&9wxM!(ah3X@;kq?K>{>AAAGz<)Bs^MPeWNjdq=tiBEjxL_fBHdO*dXV3Hd#*tM z)$P9PAmSzPSD;06W~zG{q>kv0c@wqP-m>>@@mIWL1h?(g`y^(QbeF@!X^$m@`x|=` zz$$_P2xbf)ovsb{-LF#;2{?&^9$_>+hz8cWgMNlGcSuWXNw`*4#N77xp`lt$q_n)j zY#t2h;#ntRjWJQy5Qtuj>ZyYbA$RWFVzm$h0=;j5;woIdgK?%sGAf0b(w=dLid<3-vPHU!DLTO-l^IBfn{G^-NT4jjOP^JDO-s8ik}~a)R3}5cE;{_omdASrz~q{kAF&lw}tI&AC<#fO)OsbcyE^rVN>-5CZ zw1a9uy>iHnDtB@Uz#7)86NKV1xt?YR=)i5p0E(f8ID70jz;k%}d5mU`j!(i2#8DrA zrYonwPSbwUxF+^Tp-`*N)VBT+GmjAkFSFOtlbtcjuaf;@+sCfN8xJ=aBqSt0K0dBd zp3dKF*ToZ}xW&H@e$?rS4uZNZL+a&<`L$}xziywsEG(o4m^a9jWs~W8Qzx!UmpUaS zHdi4JPw8Fk=Fl<*0q@;!ECumoZV#n+IIKUL!x(q^(pU90jVmctsbU`a{_ZkjyIVh_ zO1X9+T6{v`{1$jgEZHXe8{Xm_4+HSoSz{ru1lyQwP9zT&FDd-)8@|hWnK^HL8J;QM*i}Fb3v|gkk_gGIzOh3|lq;$y=>= zIQ*HZVs*k9X_m=5n#a7qcqfy0O`^})Of^vtd9Wp!bLM-SYIjG6nO=I_a}fh#ta$a%SXfx)rsJ;#4tslQkuY5=P|;_M z?Lg(vf1ytc50rX>@%f_Qns$5m1xh_wKv(S`)i4zZA-&7uxM37%(Qz*O{6K5;cuExo z<)SZzOKf`5&RB{>Dq&2#DLNW+@qGK&UUtqaol48AL{}#}8Up|zY%V*v$XWO%QYALf zUt7llPeP?O*T2RICMA6O>t(e0IVg;YuK}V&!Jl1xyP`U*%85h~#xX7*g=0+3X6X+W zLXx;@{8=b3i**;i4y2r}^hEE%BHLK*ehp+Zr8Jm-z~=<)RPqpSJ}x`M&6V{$@#MRs zE`*e)ye!w6XuaebSg?veRP3wQ@8Qov#MzAxmHRF@yso^!4{c7R(FNb>=_X}KJ$?Fg z__i+X39m~>?YIP&y*0jC88^*6h_5j4Z@yUn0@RjZevR2=K=S6R>9tJ@0@2*DERmTC z14VTFh@-M{OfU}rw>)hTz&H~MNG0}Gm~o7^F62$U+5Mrn3d%xsMM@d`6Z5yIoRA#? zqzx&RfmFV_iE!$V&novBJtn`1!z%A4j|QDyW%pefKsz&SiaVJ?GP+eGY$V6!op~2y zp>!#&P%KLKwC#`YbP8YdqY{(nDUN`gTS$k%St84Nfl3us*DCdU$ID$t^CO3Ht648& zDZC+2zV}1k`B+m*o~u5sg*er8u;y;*Yg=;J#g&!Rp9H-ZIYbHRN@T&`9{rW3;_sKQ z*`%=@Mi;J7@r>7e#0LbwTMV= z7N_n-V2gbTz}T?5>e7!MJWv7MsTQb$~LzVOfaJ}H#cxqJCT>SWMWVG#RP zpTbLEIjr7oyNSE)o@RqitMS{Oyu!GhZf;J2uj8O%YV`g0Ltk)i!XX$ zryMUb*2^W2PNw6qT3CK?BhI(THvtrn)mF~WbuJiJoTpL5{;dS$~c*m3FqwROL4Nx;L5*GdHQrha3fP2q`MID#R0EYfX zbZ~IM+Ddz#kDpH|$TSEzzao1Nzpwpj8f|g?0~U9WTW2{zg{~9T%4PpO-=7kL?2_`* zbi=^(p6f^k8G7+3>(!yO)gHr!qP(t2->z^{o>7y5)s6i+*!;addC1qb8hI++k$x5;g3YC*4K1vd$cRT=o zh%F0VuS!v9RFv{a;f=GIN(4?J8XHK=^(FGk#_wUmm?GH6ZDPPW0eC=-r>K^`1+(Cf#S$0$61uuli?$x zBbub4;@4MKQachX1kL~niQRNwwo&>I_KEAGKRo$uYkDGx}w z;xA!3LD3o-l|s9fHnn%kv}PwY>;@n!tijaZjnR7Y)R)op4nb3^Y_!Hqz#WGxaHAjp zoq>ggIr_dzSw%_OK1Y9z%_)2IZ?=`A;YjebV6@ifC#PpMQ8q=b7yB-%_10Lv20a(; z(L5?B#{Ee?lkIt9IpYLjNU=HE`%03(<-BLA-LwMi37d?rP%*lRH&0`4Y8Z4Hqz{Rb zLDqzNpo9C%na%nb-6B&s`37kTk4b+pPa9ZHt{e7(9y@_V>%c4nq z;Nd>#TI1k^WR_O6OiaAs(ZJ6yAFg1?ICE}kAqDCFcY3#sVO_Mymqo*$B^#%5yf1fB z4YH(-KH|CH)6(``A^~G*Yo~rFDCov~js8K@@wJ?dWso!?jRf6Ar84zE4is`A_^bae z1Qa12FD3Bddq2NObD6w2Z0H;FKj|7weG$DINh)GrnGfAt98E)RnYBWW3MfV^zpM!J~Z|{%gRce*WOnLkT92Px=xl{w_ zJy44*=I{>0QZZ81`ZXGS;ekwwtV<1&!?3ZP`m(OTSGG2 zMQ;A8WGY#o)2ObSCiOjQ>>oC)zlzwZ9f+6VN^d5L>aVl8wFnQ317B z>a(mTyD~|<5{dSFu#!@Ne(7#6K-7Ve!<#C=V%|{5LfPrbe*Yh1dd`T03@0SK9S|^v zMuhgsFp8+5`5TFGuy97;0CONlQW$=5SQg5F86EA=7rNrg@;@Cj=w>u@$}z*Zw{LB% zSpHQgGl`r2kaVpXdnCc-z|XHEUcAuRiJj^nMgR-*POx(?2b&al@xnDz# zM;`ofi9jBd&>=XXhCQ71AKmeaUF^u4_$%97_!!#*SJ%cU-TT{L3ky~3&G|41B){~| zAD6w>mbbNgQgi;!uJRE)(*Myo2mYfuKK7M;z8*OXvrvG%c7F`U2nJks&$)!wREGK1 zSp3@vH}JM|VUE~ce`eKqyz^l;X)RyopPuZ>u}-e#$ol2+1$a&WThnU00nrF7jfH|^PZ(}9|4cKwWg=k@-=9#NS8rllmgMg@#G4;59MX2hcKl;owT~jeB(3ocTzr%7(n369)m?}^i$H+5h$Hu`d`M=`BuSa;j)r1e;Ko)Adl&E{04&kV*bR~q}@@so@J1+jU4IZSSsE9F>7c5F2CIUPQ_ zazfp_pd#>@tWHP}pTihwTdX87i@2(a%C-*iAog&(Z39P`wE7D$7rA*V@PGgr2|>`Q8tkJskLi~-BeI4YZ+1=$Gw zKeGeUSFf;y%f8}E({&X@$4`~5gDvOZCJVv;Xs8E8^_$PyypG7q{&x8H=R?1L zf0LB|a6a|3{w6#w^X5P5`eWmNH1Mu|3&z&>7KIZ3_UQ9!;~%;Fl_WW(0Y8&fZT)|q z79sP`J~pAx`fwQ)`X9l3TjD5Ah;T+N6Hw1sG5+Mh8OC3M{3D)!Z_;(C9`-~yrIkAWe5-GqT)jfKjhaH06+~0A%SKQz&SF2>Y^KgW@oKW z1s%u-b6M8blKf*w90pp0#&B)l^;*tX@-*|Gmm&sgSprS!PNC54*w|p|RAu|Vg3J6Q zv5NlcnA^@~KpudEJ=N8>?;AoVr$eiM4JKe8@&??_!Gu1ze~%^rZImN*^Nnb-Csjjp zxys+^k!eINyw5zCcS~_z+mx}kMgYkcAeVtCCfYu^k(`s$2h?j)VmAx-XN>fAXQ8$H)8H9Kb!>S^m{Atiu-3SRqS z1p>;p9egLJCe1BI6c(c<{i_e~R*zHXD2}=70z00lP(oE{&oUJ-Y# z>^*t^P+!v>KN>C4=nltwa^4D3U66J5)JC3cM$`~YH9P_uB-nfC6EmNksWdG-i>6hJ zBH$`bq{W9#Wyl%O=sC)D@#sU+p9#eFvH>^F2;Mok%4u^#d*8JCh^aGQ=CCk-wiq)) z)kCuzm&3O2HFvq$Cs2eZ*{De3bc%@|rC3?g4fFQq`}piaE(PeuVD@!vQwBHGWGvh; z99QZOv(UE9fzPbz9Q6w(c~Q=5yZ(t9dLZ*{MW#f7wbOlI0VTcNYQ&7phh`vpMe;*giHj3T62sfwXf@&M7; zOv!lA$M=Ga>&R_QW+^DPhq>)Z-zYb?Ao^|?O^FS7Dh`JYo`4LIMQJct*U(VYtNlxI z@mW~q{FCm_Zq9$)Ra+Mw#5hWad&CeV6coJ^nWkdLW%DxJ>r?mpc<{oak>1izm%hV( z@vlQ7<}mV_&h+v5q|b;`Tsam&UBCxU?iltamHG!olE#nB7+B;2difPPR<1kVzpiSk zTcZEzz-o*-c>Bv>!;uHL0e{pjaIcTPBon~h1uZ0Zm<7u&6=WO|<$Uoau;HOPLR}_5 zvxB@m6-WizE{F5Fb&E9XJk9K59Xn}q&pW^K94w_7z-rUXm|uVb`NBK~uM+8e%-J!Xfb+t|z?P~q8ga$N>;o4-?x z*i7=rrj$;}vN|l>p*PYW2V#`?yQVLg4&?YQeL2m>-j%Bq(yQS*U#+h}%jc+sS;FT% zJ9>vg`mEMqzvXjeo3)pz3wL! z@OV%w^L$u57qz{&q0E)~tT`voTe9x>9gDgzq?dA4YYWV!UBHZesKkvX(*8dNrPh1{ zygythO63l{^5G)VAMefwNuB$uX_39S>c8||mBr>jLtlK|`}sTN`t91=Gr~LJURZcq z={@V34C~i$Sc#*qlHVY!yr=`l%aSdXX_37EV#v0@XR&M`FTdf~ok>D;0)2a(e_+Qy*5Tu5C-!VNGGj4m8dwAeDv`+K zk)$+kQdISlQ#fRo-R8VUE#)~c<8~sa)4NE@kaed&@XCd%eGFwD8J6=2`42AC7#(4T zyBif#iMjnRtk<@fC}i+@C;H8}Y|oy+4Z;&G*+JhWvRie68DdaM8+DX-^P)Qb0S!z? z%Ju3Y3~|t)(mB8uM!&hx@hHj?TfKq(KGNRAV!TIIhK(|2zc!7H6w8Z7Z{ngfO79!Xk<1{8}RhAearL}E7eXHP}kQ;nB+`s~!`Si_}5dtbP z{|Xp8%HCo}PtyYl87JAoZlyypM_ti>@E&YM>T9ad^NUn75!wtVF%&K~EV#dJQPF9z zH&0Wuz)9gv`p4}Z67pQg7C6I?7c*|n3+=&N+;z1Hv zMfZmK4=Ihx<;_i*$d2>viPO3#EX#p4aX-e%o4Re`2xqE(S%KvN7`m$%gkxzoo8lG zZoq4ab+xN{e<^Ro*SIiAtz3=(>aS}X1V+rh?SbEC zG3j;P#OO4ldV)oJEKQN8)O0ZPL$>Tz{oif#*DtoLPz8O9FYCHRC@2ab@{&eemW~YLP)L+ zT6~7$T)@m(vin`jL4RkWAP_vr(2h~)RjK{5o@Yw|xonUW_6qRa@ zVrkW<`&zs8zVN00p2o2;?3nBoGpqc(U4rU-ZO@C^Hs7dPpNAB zuEQA5KGaQMEi4-IhT;I1)b)GZES{h3_~+FuIyyni(6RxpfVlF`T#^LUz>3p;@2Pa&jdzZMp+QV?8}jz<*$2QELYNy%nF^dGCoxSc+jAs`r={T)z4}!?aeZT#3FD zM5VN$x9_IzvVOYS2pdqp>);p)?@3TftT3Pm$aCeV<-qpsY*@uX~eitXh*VY!D z$1&A#h#(S{ym6Il=i}#>>yJRvsH>L563G)w!}eP#(=>y7t#3J<2z@ZPSfP)9MlI70I=;w9`daDtk=DRTV8v6$P~#uEr67+`&Z4C1n=3LNAvA zC)|+o3(C8Da{gwDj@Y3@JRdSTEUxsfemXw7Rmy{|*SUY3Ds_5^IWN$tdq2FI9nWpY zFS0861tcD+wF%?zPFjZ=qv+KX=b!$Gw@Z;sY z{63rG=t>PiIO9=+T(s7^mC?^bU*2bwys>k3p{>|Jf^@mN`A0Y*w`I%l=(YGDf)mdW z7cbh1;ViRsv;IU6A)zhN$QgQV7jrjft~A&NKPE96Jn#~RAN@7>OqO}e0RFsbF!7W3 z4Mb5QD>x)tdERivNc2f43UzOAjmuzSF3V;abcqQP3u5ia!JNyV43P&{Elz=WW+TS! zI@|MSksc{Dp3WpfrF=&hfMSe|qSf~KYPw1(a`p6y0V58hC?nUjzJt=|>d>(Z_8rR~ z_csH$AwR`dZ~|m>zI&hBn6hpVqlI&uTjZ}){0(ugwBWUJ0CM^0LhkYT{!FDjs!WLk zTE5QlB{vp>RslF8A28UG?1E}-bN$`cNIb3_+F(p7p_0#JiXj+~IalizmaoR#=_2EV zOCT*2CH=Uks2N8CeHMqW$p|QG7Q?0tbXmU@gecNo6Nr8P%Q!Tu3Lq1=p|(C}vbfo+ z!prf%e*2bmy6i9jMrrWV0E+?;!vgfa1~je+XyL!IQoUQ#(y=!**-63UJGmBc9`F$L=@MJ@;QOm259g{`3PgCp+mg@KN}ey{|LoWZX1y(-K@_gxf2)&aVpYg^ybbr_c= zWZQm^Y{V-3oU1)?_%kboU1yKG8*~hG%gJg&3cn*o@-1L{kcqeo|1jZXzS=ZO{|q$n zx4pcfUaz1usiTQvXfL#YF1ZYPyKqRe*zItF|4rc2C9`A7LIy$a)M9*f@7iEu5Qcvh zRp)flf!2)e@tpe|kz)d{Nyrz>>%|rw$xxl%jh9>=U$DUxYiyU_fb%=jUkO|NT|1Qq zKO#0RWabQ}`p`lb3F;4%Rxide?s!e3rtOq1=6BkA+^;KDC@nR=KQvU^?UP}2q9~zy zWc#6Bht8_V#MKXg8Cc)$1WqnEY=G0y0pKFl=JE19ew1A&SZ@=Vy`LV7mOJe$fptwH z$cKZzEJXt!2S;6UwzQvpE5KKVO}puN3c*}iXaxBjy4zvg9PE0+NWqW{y^3`1`JFy@>FeZC6uem^f9!5!;6a*Tp8vNe069#n~Wok3R9Eq!5~eTP;Bkg~jY)1IlIPJ}oB#-mj8;YX#si z3LcQBY;;G$L1G27M_CRp+h5bj1zOJEW?gw1&uxPLkHBzb4VQq+S&(lp+4+R*+YBU; z(naTBt|B(oJtfQX;SCY*QJQc;hjA4jI5up`vLTxkZ3SF2xGYe5`O zRdKU-^gMNIq=bm(2QZG+k-O0>@i}jBGiW zvCS>fUNW!m(Un1{Ke&nMSb8~SZF_FeDk2y5)DxIsb9jcUK zk9S%9-;c=h+>}Tz@=bCe_N2-<1?@H)$CZ*e}Fv#>OD(p;zdJ?`KTkO)e z#~kqqhfxV0(U}a?KY%qNJt=*uQ1}`56a7fVRc`Nz{_d@tNCv94c_-8px7%wi-ra~>gZ zemN#8m-WFrXX_a}2DPaVGTFX;yI;OaZ)`XqWv6k+uQPNA$Kpn9%Da7BGA;c(wkBio z?;g4ojWNob>O}l;adLQHc}LUnqBnvY@EwR$_n?hI6Uwo?q`K#1~LW&$ui_ zzr(RYL|P|JWJIKOIKk_P8uafRKji)b@r*8dL|}xGOq7vEiPh|nSH04<({xKch(^`+ z_;!~%lC{@}M$br&%wlF{R4`nlh+N{;7xJgr-3-It;lu+X;(ZmKHh$}Vl*2GQ2xL*4 zr^$(kwE}`B&5OvZoo!R=Z!7I-bM3{A9j%~-S~@D@qgG6j=fMCGvvt$jIzD!(?arMg z@y&*hcVOlTlCAG zWp+>a*sge^xc9PfKJMy>%&5nGKZvq5KW)F&8C~`Aa9vFbmL(EUU^BG!NVv3ltuTM; zRt;vVlst4-%_+P%>cY}dBj(7J;*b+Sc4<2h5fWpR#paB|>P=MQV$^-xT%gw#Qv9R) z1uDa<>qbUK`QbcBi%x_R9mqmW_;mTUQ>D!bsXN#n(E@kUi`i+DVo0hdYQ|Q@?2106li| zXzO>+TytW3KGFRupzmxc7SED&#vJ_~8j52!Abd=I=D6iogvfqI8=tivH*~&3%1S2V z^Lv!NUJ~zK?OR}z^Q+4Si-WJDDwTB5)URx+!_)K5)29h>Q-htinF#=Nl{xLG!ia!d zqRgSCykRj~E3BzYM-oy0%odYE=v9XGf}z?qh1qJfI7vud``r4Wg!CqbI3{RX)ag z@dmQg<({IL-ARkBCIi=4ZWJsIx3|;mIZ9RGASMZkx0be%(Wg-JLlB9rc}J1WIM6Q?SNH#u*L=e8ZF{Iy+!nw zml9aq52@v(%Z*uJY)muFY0g7t&!tm4ZbRwdGp`GB!D-x;!r7K!^#;?Qb)tKE zozZiPSzPmNaYAr-gpJ2MM=}&di@~(gSQRq1Ae)MUjaf3)7L6v0#=@qVn`c>wHHddxJ2wxq79A68* zwq)$&y|p}5@(9GnmQW7Sd~*+UBYLsCPU7w$iZq=RyD_nGeMz1AkZcC8EP&K-L{ z@zi=M@-uGfrhP=Xv*7C_wQBOQ_6VpG8u2jSj7~J z9C_elPcLpbS+DJieO?OUfD=W^s|f<*@yeh-;V%hlTWf|a&N_L(SgRpvr+$0Stu>{C z3ki>-lJAZ81I>#jBCp$v9S%F(Ke!z%B};5ssN`uxfGL+(lqYT(wbq~hJvoGo%Z>=N zCNfuBTbH55~U>u}6 zdKu==CU;shQAu`*Cha;I;7r=7Qu+sGEh3YC(@97fF`tt0&dQb6Xdf}_&w?fvs1f;| zSCHGQ3}5uSq&DK5DPr~U_00y|h0pxYN&1S#q{$>>9#}2u@o!PyEs6^2-deoX)wVud z7ROnXp09I(0ulqwzCo2Af6gQ=x$oM4lU@|vUmuKxR56f4OYr^32w7PN+S(8)xX7$e zxP?Pm9ppmz422|5o@gkSngjlbm2gSK+}zX@NpY>GxvO~zGk#{Kb%!j#hCM~3o{i`% zSAE8^nl9LXQ)SFzVHBoHQu=K_IRZYMh3f`IN2fFfeV~gC?JecN@sJ*XA55OVoI}} z*G|&J&-90L=~Pw298MVTsHyRR@_|x`b*9XZm}hs+|EmaocMY2X;ron=Ysae@nS@-f z-)qC@fr^L@(&@+Cold?z`r2oHDxhhKG^SItSnA{fL1Eh=tr-MzDs^z*RMY+WJeAS|iQo6Khvd>JKIgnk6gVAm zuMClq^Ibx|avg03ZDeH9DD7qh9xQL{dCoR^=Nk7IlMRzlgTj(sud}_(DmI#H3*>XG zyMMMA94u?7BBm{7uu{8}r%!#y6vh;SKyKitaweT%OceBU=mMGd%a?Dx*;>P(9q5^=Yd z?Ww|X{yG^A1K8 zliX#0!k<*-`%6S~g2J(0=l7t}dU|nYy8!xt z^+~%wNoM<=pXNKFyk`>0`^wHYg!IBxb9a?g{<+$BFlZvR zNNt+Ul_7lb~)3*5+{zrN?fns@xdbQ+0`id={Z+M`8uaU|A!$uqsyO zum+Ys2O_qTM=mdpROL%0dEvCKwJ~)MpKI3%G~aU>Ine-`z-hi+`!{)*c&c7oG0y`! z)sQU@KY6>TV~`lk(<#JiCL8T+m3IWws_>WT-#>F@a#+m?rdA5n>=QV9cm~qH^(`)ozBDGh{)wTcHE9_P~Lv#WyNUAhghqyWRq;HN#(`mlld4H zJG;J2N)xc9YZdbxX2&adU6`^$-aSbBsKBDn_IpTn<3I-6ak0D*P-z0DAr3>v_5{!r zt==y)U}4ARScQ(5b|bHeMFN2BnIC`58aulF!lw7F<)Zb>(u!5jigmi4SJ(KKI=|+X z;3=Q$XS&W?oehJSXT*71(^4>Th>`RriFf>&xl!tJaI?rrwocf{%iY~2VadO2F8clf z#~JA06qFP#&gVH0cV#v?-TCmV94Pa3n`FWV60xWuZCjEzasl z-Ul74OxF8=d$G6<=f{*_ZW8Z!&|0e9)wC*mm1rp2$2H;XLbAj7!t?vXHVuQcMiA@2 zZyvvEwLg3posgY>u4$?}*+`A}jBh5-ZU5ZI;*H=NDCTJz4w-b$Q!;_nUnuVPy>XIz zZZ*yOB<#_VMNyX9-BO=Fe;yF9pPs7^p&r6#b*8adW!2jlJ>YgagVqv ztx}Z=msBe-?QiMX5v>aL%J z*cORzjQx1x;z%W**43_P^)8H#HZ-&cmnX4cIjlf;L(mnE^@mK_`k!!05NP_uu|Lf5 z4omi{`Y7>4A;;}JJQwW>lhKVBfPp9G%a8dNyS(Mgn|9ZpYI9pgixT5n;Bi0jt-1km zb(w^3@y(JSgjYkXb&0(`X2RPTWIx5T?AO2@1~_koaQa%R_>>4zXPVnpv8OhMNahf= zEpcgbxH)Y%y8bZmG1xS5uWxLGU-seMb{>l1y%5SZ-w{SYa_42z*cXAoQx|Z{GtXPT^br@LBblDOLBgo>R)V2~pPfTz_Uw`;0)XV+ zRR2zPRyO>EvY}$gLg3m@yp0$jk#f)NmISrUr<<ko+=I2#IY ze-`5hKa2u{HPh>Ax0^EFR6+U`%Z>IJwA4TTI5lGk)M))Vs@mBt=P!wM7j2D?2pZ-{ z(={;gs#AZNGejNq%W;ZB&sKA=3C)ng-Zc`6SjXL6#uXtU$t_!BwU<0t=M$MyRZnqn z2+)am8UaxNYo*_S5d zFwd2EWB*z^e<*&yut!1R=g=cO#$~011nYjY@mQNH6DbT|ioChZ&~A>X2M1IA{)(5^ z9l#D^p?{@7xxzKbasjJzP+T}iRb{x!$bI*@e0HJZoqIUv7)C~qip><;d{)eO`FcEd z&l~TNi^aZ5^_wzRzrrCPWn_C5O4FICSS2$;-b`dLEIa+bK%Q#hih-+c!}D==zc1X1n#eJg}V)zFe=6!GrH{8IhriC zYY$gPK?G?5_`W%!Aa85TUxvpCyUdK9Ug$!daT1cUZF;6WN_k=cE1%SKKJD0YQ$Ml6 z46;yOA6G#v-12D(SoZ>PS^TV^)Dq*dVi$#b@O1>CT zseTGH+PIXCg)k0pq(3odvb7svh}sp)m#W>85KJ@E^z$0e36(dKUYA3YqL%Zb;UG44 z?$*hMBtYaJr591bulNqyy+lKuBYVX%WOU+Gqnuav_R0-pO4jPXm6bB1O11)7W1g|3 z%Oz>wWm_H0ZuPExDIU#f#7LLSX=f}}VHO$0XGZsd#mo~PAib{Gz6w1d$Ae{006k?h zUQY#>uN|s@Ev%Q(VNN)Y9RuJ-S(&M>69oO4QV~^W{s>S~czPDbT=%LDk$ve1<0cx0 zpa@TQ&iy8{=HvltZ-VHGW>3-goQe0sHyT3SRwfc}jO6RitR9S!2mM-vtiY|~raI&h z;6;YWJwwLZ0$E+GX=jv5wi5Dkf4)oP)V8pF-$Y@}+o*f3S-kSm(Gt$ozND~Qq45|t zNFTLD4qUps^E~H z$sH6lo#yO|yQWMGbq-Z^KX0xLfKWfw>Nyly?%`y69z_$47J-p(s0FrOu9clNK9Lo7 zRrWSutiqc*EpQZuVa~1{g(_5!{Z*(x zh_n=*AMX{zcb7=7=u4*}3^_?`>1`3!!(U1j1P;5E<%x?P@l!pS7CgYul`6KjfON3T zijDK<32!vJO{iX1T;ES|8?u7R*E=~5^H*Q2mJ&r8eod_rbKAcKU70>S)d#EZY1}52 zosUf91yR>LR8!}+2G%+TGucw1>S?{z3(uE$p-Epb07!sOz04vA!ROI@4D7|_ku07F zCY^bN)V$j60riLQMX@!jg;BBcLxKhfJPRCf7yK}0Is_%1P@FJ0FIF0b_RhIcfNlpYoQcNETWYfe4qrl_0hc-AcLE+}x4PX-+7 z*YL&5=ey?Nc;nq_jEF>66NvN)P#`MR?GmivRmm$Z)Px!p;?`gXDJdc3R~h+~78J26 z;PQMm+iWvGltB9_|!}znKK2zUZ(qlK8MTbvlbh~Vnmd;k~B|u)4SK={22eUs-Rwwn@Z{Z z3cIalzEAhk^MFj=5=?pHHYy?KgDO|R$a_<)8v6RECmlP0Ak8A*?vdc#n>h3c5s_S7 zFYO&7Lb<+Y?Q8~fFmbv(fQR|rA<@k@6W`4b*Ke6B*CZDjFcYlyr?t4PvCS-PNwi?S zxz;|INiUO&lq#{=y!M+xoOmiD7=3lH=mv#&bc$>g-SHV8Se-?~A@#1mr+UZefp%x~ zr**m!1bwhxNc@FS(}VmB@{AI@+*FaJL+jPy&^CG1DtS-(At91XiR|vidw=UM6KxtQ z)z=4E3^nhdKUw{Z+_l;-Ngm|Vlf`s;b_C1;uG1ynsyw4!nJP{MU&~Wsv@1wxXFD6Y zBqvrK>hq;mT~T~EIn!=!auz48Hn%a>eSrdisE)vrm&*$Ghf>9*@-8)mJ-v&RjrFM_ zbT>2l6mjAX$k|8oSse43HNkm+QIEkuI#qY9DP(`OCm^v|f=OqUFlghgpRVtk#au7L zc~uoSWKxaUu4^@_UF92}e6U=y)j3P4*{)gwFWo)7T^F;>DejIe>Sl1AU|SnBV;jR6 z3Qn_Pciw-%Q(mG6S5A_M3I#hFx$Aa0)be1kerCYzacJ@r@F1EiBU-?f$OKNUeF!Gw zpxQ23;7A7EF|BYuru@N2k0=8Y@S(!UVIbi=TG!RUw*|r|06Q|H7+X`dP*7AXvV8{B zr@Wmhod-75N&P$%P~vR6`XI6Zb>Z_V%coDDD&*>y<(q)VNPFBFlR)(q+>-3QwdnbY zcaG=t_}AT48TAJJ3ymi~=7tC2!O2h$Y6vox^lbo4+S?Dg$5HqTZ#xTn-MSZMwQ*{q zD^Aq>*+ClNBQ`P?70#IaeXqIF0H_XuJ`ufpYvYaM)mHR9CUlaI>FHH(TAr+;9jpeD z*=-VPw};1G(Vmm_7|)H_Z8Df$#%bG~d>1%l{Xr(8i*vO_4~hjW72vRuf|;q#(V^R) zi2AR5^4T*vvR4*M*=$fo)mT27qS8Ua;&Q{{>@>U^QvkA!rNt~KM37oMXM7jYj{Vr_ z(764=hLJecE{NWY4(cWsO*KZ8AbZ3Ge^!1ijk@rF8!JPp)UN(zvvH*}`&lx!7JuJ}_~Dt%IY|?HPF5EVT6LIUij-XKiS&`9X-!n4~qoeW}b| z;4#XQo@>xX7n*g9{1J@vr-6Cw}*12R7_QOQeEuZwLz*kwj#2-R^W!W_EHV-;WKHbP~=y zvBQR2lWI}VX&*i5+a|9VSXJdkDf5DlPmG`?EphBQcn#eP&D5Uyl;~w@7617JD$TP^ z9b>BlWFq`E?u&@S=@Id9uKA@k0xVcGsTj$15DHw&EzH-PE7vJB90`Yry0d2H4JK8n zn-7HxbmmZKW?%baasJO_8(mH`0A52Wox<*9cOjyi>hroW_>M3XR?cvg3MR9{o$|>w zZS3g%OS%N8WS)Cub!1d*_Jf5$L-&&l#ZrCB1?s`s%?T&=hTuJ;(WGl-d-Dl$F)5`F zt(MN}65FfQ*fP~Trf_NEg^Ru|Yi$zt8x~n_4F&)i{e22Q+U9s!MkZS}Vqa9P!qRFM z(>QNv*b;Tdg=m<)E)Vs1b31y03|199UQOoY7?A6Yxe$vKd?UvQHJedie48*S*A{6~wdxO-+VARFjt%!=N{b zNT*e9sWavbm5A}ahzPAJF8v&Ed=ts1p648a%wW9K)%$zM>ia_bH;0>CZfFt*eZ((OE{mlbvs%rv&18G;?a$eB#;2zg#iIEt1UJgv~JHPnX zj^oY_dgVsDBN!f1s%B@uyAXxb?6|f?XZuMISFU%&^A|ur!&+F~`6IE2kIkP1uKl+5 z(c@myi}GaRl@~V{2z<%sUbpHqN7~0}>C{xKE&2ha?d`t-HBi)2S_z`exBVmG32!QA z3qx0f&cV>}GF35C+AsGhr5>x92EF7#baF`}+P%#jx$N@m>Dd8~@HC!hm;0@cgZ!=$ ze|g4}p>_u&=@#{)tK=}kOaympLc3V(&Xd6UaXktrr3|}6#SM|4McwLVM#-k37pKGq zEYQ2+{R9m!_J1vH?As;>^fCY}mz4f22c$rmMwDcO(O-V$5h=c_l6m|F2tgicx^B_; zLX(Nt6FypoO3eE9Y;l9UW&|(YZ#VbnY<~Psc6M-)+pXWNPxF>~p_xx<^e#5J=nH~Z zX7)}t$>5_lJlasxO8%EQNtRpOOOBuGhL80rQQ(}O)5`*mMSw{|%?4Qg82uA)>s!+0 zQwiLW_kZdK@UPPKCfkr!h{(m(*tY;`gm?p{M2F$Sm9Yw@Hz-T;cGuMPjNT}GLHGUo ztppeC%*TLPzX(S`LE#5S6%Wo`=gyFeQ*`(qE%Q6#8#^m~cv`w{cl>*yL5%!S4Nio* z9D=y5Mg?-+V!01CR@{^^>a28d=M}4Lv7<^h7e{@^7*{gy&j!iDWFYYVx!Xl85%q5- zOlAQ`V>KJ=h;lU#W%(#1^a^xhjrNuV$dPLOdH@_uDF) zf`2U8b(5@i-TtU&X9pYKd2XZI$|b$i$oD_qe-8Smp0}@)VxL#N;vsMRC&Kik4+t5& z{>X?Hr{(zlE_vN)DR>x*4TeF7kK1qBo{thi*o9N$AVTkH#YpEOH$`{6aL}970?jY1pRP|lHcTCKd5Z{-*!Z>(7v`{ z5TL=NApHG{3*Y7srvD`zhlv`+*NygG=Q0v8&I$~Z`cDRUpFdnHQ1PnxZ3?tix%1@p z?BwE7a>u^qUvr5%p)$GU1 zs<)9no~mX5A?zJ1dX~%Y%sbrSj8VOQFk6I&DhQ`7iC79}zrR{$&!qpFM$KYl;yv)g ze1;|dkW%NS58ho=6nQ%k;(eM_|FI`riTMoVu6IzpB<=qFnD*(f&j(H6TxCILe<$QP z@b%EIFpO8T2g_)J{J#_XJ8pWs5f-enr-pVwbs7Qxx;)(p5GAS9bpch}lNqVkEBmY2 z+Iji?nr231*aV2{&ISbrc8TOc;L+*rqVKO0XfAcwV?_dhGX>5|;I|Dd)tyeRxMq3= zJXCU%*)}Wa)MGkcdh2Div%|0VWuG12<|CdLS}TJ8)VFWHVT3B?^gY_XcW3d9c`4Sf zA^rDIZb5v2hNTvLAM`>LY58nbt&N&T108X`paJXoDP?r2S`^yd zeV<=I;Mi&40|B#P2aKz~J}3#0vx#^r+>h199_S>g7D2_C=W8UtfAa$$TxUV;s=M@^ z5~7O`aV#lE+}|LlI)!|1kk18EI9S}jC^=NrV8g<~l2mCim`6F9uqAkq*hqvHy6s4) zeMyf4Vmr)-H}*ergE{r0=`J3RZJ^_?r_Y~e!sGx1byC6M~_1v|Q=LY(6lG#+eXe^_I?u}iLo^HOMe(p~ zmz^WGgv_nU3uNo%ei4U`u2>!+08)Aa7Z4On_%Q~(&Fj`uoJfluO0V;OMTy+9#b?2Fr?JnfKsHvMHu~k zpQZC_!E}p$N_m`xu?~JehreQt+J>aAAy2cKW4x-)FvxO52SP__k#dK?0YBxqooSI! z-i)gCUSC$CIsL;b<(2HVGMudpf5WK>CGkFCX$~y2aca2TOCAAp_}rx=A~+p7FR#q- zyRVc&X(HRE>n@Hp=8uO^hX0b+J%{#QeLi^g1oQu2eE8p@2V?dSB9SXLj2x)R^#7up z!dQ0)%AIHgvmB2MCNXiHiT;|W`=Disc-**ukuMAN zrK(o``Ucn0@zj-E?!H@1fyTwu`PmkEWhEU&`FdN}tMiMYr{Zy=5~rvu$7c1qNMNPJ zsN1d$Jo5S#=`+Q=a|}cwBZli}xX6g)(co3bVV2QI&D9Hlp^&c;tgYQ%;=gVCUw=!L=O_i`)S0ds`BSVYk|9~xuQ z_{7u)GpA%k{>4zc1^1h4S`@{W*158FRmHY@kt>W~+H&-$VE^f#9 z=I9YV>0j#CqN5pmN~L(fIKQJWwF{5amL1gRK;|>(OJOlMIyjiiyOLfV%5aBr6u310uCMpb z%{ABY@$jIyg|avYZp|$-tNii5Pqz2VgO~GZBiR-+a`H8ewEot4+E4Eh<3dEY)}Okq z^ypD?|~k78BavR&`rs3ZB6IUJ6*I{$PfR|fA<`dA z!ol#5_3mVpFpi{}Ak+1&5%2hR($Diw$lGFiMf8aZNj?_!jhB(b&QEw=%H`|4 z)f}7SEdJ_-!kpj=MI$Sl*jmXj9@NW~=?qjCCRi^oBl}mhE+5`?%(ZBJYgkKe;%H2B zP37i~*y|3O&198Zt;{B`8cyAnnGAewVe10dLSgGmcZ7eydhomae?1{5gD|i%{j;Rv zBn4ewe*EFBmcx2=!xS)KN~qCBLk zEwzw_mN_$9A>J)vd>B`mmn0MAwvWL#iAD)4CWh}HRe}~0tW{Ym@mS8bAvIi55gffB zTX2B)iKx*uGWz>)mmk}O(yKMbx*(TXEfGT}Ii0i6A_NWCcU3n0Xbe?2gr;+xqYt|Z z`>}jBVK9|!G=rAg{HGHC_nPF~+&;{HX?X?S;`49AczNVPL&-vJaJ3d&2zBUG}tu2!u60kB=uciF|*+=+Shxy zkJVzi?@Q~|h?cDu>W_RP_g$M=CBWYCom=b}b5<$+SRRp+AGtY|a}QUNJl2J8)9L_g zepf}};QeN4h+(5AQ40R#m)m>HT5{FjFMe%Bhxjn0b_#5r<=W_5Q--nbv1h$5cIRN! zAjK^gjhCMsE}nNf-lCCf2=-=ZbED2UauZt`WT;#B@x>e6AfC%8pOdp;7Fs<>P&|ZK zl5M0K)}nA~#Ms2DP~K}WBWg%UBzClLB!)~Kn78#Yf#1rw^D4nwxt%+CW>e1aR7r$(Rs@o?slsrekQe z0PR{XQhN<{Mi>@Fg6$T`k93I;WxRl9mblzB=`OdEWSOPIgMI6#?RN_W(wVumTyC`=Dyk^q~ zk0JQQM zNPj3cN}EN4&tv$UDdpnNXwllra9K&wL4kjiW?U!76I$Psa z5}@a{u%j0WY3&)a+PZXst~}a<`z`CEJ0!$Q^ya$__~NnL^_LeO!$os^euR^C8)J4L z9ea!f1^em!mJ8$KE&5B<3wkKeOT|8800pgH-{QvV;!%4T7s!$(-8+Fumt%>GZy`?#RV#X3XFxJ~nnNp{PGal#8_<7Lt>9XM{hWhL7@oZ4L|410U+T5;=mBaDhQ_`%9=T*l7t+~bb*z@zU3u7Mex)7fK9Jx;J zmR$MZD+6uX6iYOy!5{}3YR?6xAQ?Bi=1hlf<%qOL7Q7rG$x}Q%BC)rc?dJfi zo8$3=d2lX7_%?uHN0V4|iqBSG1@*IL(QDHSZR(XS&futOae8ced9*|Y*yL=LDHQ$w zQ(ABN7Zp7OR>LQQqFeVaJ&KCZ#!51En)X{(H|^meXiN5%zjMbkj*>7;&gc`owc+M$ z7`@6%u)d}PV4&l$tAb&yLeS+3^fIq%(cYw zJc1T-do}_5a6<6U7&86A=cbykZPCOaM$j5c+n6Yn631mVZ%*cRS*gszlx8+rSymS8 z^K%6X%73ERO3F`&ER!XPe@SIfrO`gI3E`BP<+YpBHC6h2P*pMRdr|7TL}%B_(V*?o z{B}XOfPBI04$5_Bj`D@u5+&%3RpDF*x>KH=)oe_!GO*U=c?Qu98=#61B*MZwidLL+ zl>9ymrInR?zx=f%Tv`}Sq2hf(F|{2a3X{bYPnR!MVt)kiXYaI)ks3`|#7OH|cIQS? zBc)}?Bq!wLthl}n@xN_jX?}Ws@L+?h$oXU!(R8bvgBU=^KVkHz+4m9g=+2I5Rgn6{ zT5I?dM=QpL^Qq_t;Th5)$c;|G&m*5*D0Q&Vc<{bveocw-#G43|B8a!r&}0Mr4I(o? zvcH@Yw4qewK6UMfc2JBEY#{=(>$Y~dJu03)`x6HZpn8j?xBfE};1m!KT0jEVX%Von zx%ON!|2fjDy2X53(SwxU__i(elJYJoG0a^YY!v~#G| ze3HIhMSZ!osYI%u3H0BSHRpWs;?%VLi{W6i4#;iys{r|uAO-bc&H5|n)R0Qx>RpupF1yaGXO7&?1R<$Mw@Wss^UnsHizBQ zC;DAwy_7KGD!Ko1``>z zXMV3v5-8$X*j^c3M=2z!W8>)0V%UsP-|7qOr^%Xc+ml6?XLsW+N0pc-8#Jqw=xoQw zo;WOJlDm4<-DH5|Xtl!Z>z`VDZdd?&cNG=fkG-ehEtY5G=dExdzb>v&c#*=(414 z-1Y*R`$51a^Iu!es3F3Ufg4yasG%STqbICWGEf z>3yFf`mX$Fz3Y3(Rd=o{k*iwn7peh*1UAx`=QH!cUK~~_@!mcO1_ib<89km&9iy`s zwH^$K9&^xiE+T(mWu$_y%E=~Cxvcm9g6~b<0id}$K+Rw~{-8-BI^C6^u_LM^O|gP< zs&JzdtWcVP#Dw;!zwt@Y4J|=5ot{}f1e^H~)I5d?L)waLCBF-erUC&c=`9!28!a(= z14r2-TcCn=avmoVUR&O^Bh$ zVqg20gui%0RWO(7!_$H~hnqD=j=qpPzMIde>)Tr@4cak6Y4Rv#$2V9YZap?fH7ysOsygIAF!r;e>$}vR{qUh)iX20jED7z3OIV0_jl|#lHZwPiAj zO!fn!a6VY4&jiKP|NOX2PyURr zFw2A{yi7)&*TMDk9&DyyR`e3;W6NU|)V`5+KcUUo4!JS3tz?tWp70zZUW0_9P5!bT z+Ae+$FW=^}-H?tIpao71C}iiCS`sPTdvel}wdXwQ9at}03`IieEW7(e}lb=M8 z(IsLM^E!Av!T+>J-x88BRBF9Kkfp>TYv+OkDNYlBD1RbZye*q&VWZtXaB0Ymzabqh zYX&+%`H0@|c9Mk+5`Ek1kHMpCy)isC{s51ZFEJ%?HA_5P3^18sVe*X z5p(2&s*-;ohJ3qF)f7WUcx3zx=Hgl>#RH$NA9YXbs)ubFv$+`Hd1_**xaSvPnONjq zz453P_{p-=m+Iiew?i&V3tJo%dzqrp%sRnyO*B@3#~~BCPXU+9ABZUAYakeB163cQ zw-T7V&aJK{WcCdVe1+HG{-xWT?Se484_XpMw;Hnn0+P<|P&!Lgci9}+A1(^6J@)Ckr_nKRHg0o{;{{ICM@QM<75$XdT}S9mg&A<;&$$gI}6`SeGeZG9MTzZxFhN95i1PVXZXF{aN*@5I#IkI|)nEo{K7 z@Im1Ri?m_TqL73;z~5q{U+h(&1$@3XGf(s1C+_ubxP-J@^ZT(eXlqaQKDC+eF?$`j zir?|?ru^hcEn)&yl|1ayCS7Y>s&M)@pH7Eqc7}y43pS>T{B6Lggx}M@qprUB#G+Ny zra<$gl?7Tj2u_^Wjf{PWFZz)wyW?1hLjcvH{totGMXe`GA@=uxx5_?T75V3%k7QCi za!V$1_X--f3#MUUV^A*_`J8DPSeg46NNc2BJRTzXJ3Duw`32g9BbxSkcsw}%_Zv@v zb9>q;0omigH5r4)VMcZ|-0i1_9?~hkYiYPn8LHo2PcN%yn2HC+WIcpV=FB(*cQd~y{lCh0E}!GV zHjFdx`1@3k9n>f7dP+om{d!v@D^zp^i}%0z@8wm{t>Us(XyhiSqx{sEbo z%-7+C@hW@fZ_;pQ#~|QeIj!2BPdvg44Ef-FLcCM|{`JWh$^ZVj0r_#9+27R!e>CXC zn;!QbW8nYZJJ}oPSU$ffQ_CVK*;`t2Ei#0^UGv=Doi$B+Ki(M^=dm%Wap&%TCL)N@ z{JnD0A6zin+1}+pS`H}37kGQ;(*1UT#r#`KwHonAxvV?kyhI<(B_xzFh@KuDtRJ)HLp z=kO3o0rmkJ?0hFS#1L&S&vls&{y(l_o?o2v>P{)SLtX736VUv_hl|g zTrVrJ8YsxkuPi4x-^*rtc233Nx?Os$&D76VyT(|%oKemF&xiUuFwZ3CfV$JAy(O&} zF1EuWql^lq+}sk1IZsS(VwUe?CwqIdU3_`8Qv7#kAMx|s_ZLtYd;k;P4m1%83Yr4# zI=Ak}X&V1qu0&k&X(Lb*cCOYZ?Chl%#*nwo-pP+zcV8DcxwJ+sWSh(kxcmXM`MNxKUGgbBR4ZEqYXBAtKG2YPK^bXni#W(os>G>xR`Q)T~ zWiP&u@1ywD%Skbbk68)dXEfZYSXp;Xe=^=)TVVWMAK-LAmF~KI4PM4i)R3s|EqYz> z<5ZnZ*GU!|c~q!5U+e1PWm4ot&adT#mcjuX{(EO=@B&`GOm+LH9Fz9gg}2pJyL34n zSobtFG)iwC4v+Doj<+!=n2{^EU3rlFs8ql9xTTyeBxDN@&vTVY@9E5hU3`VhBUxo- zNnlT2S3H6?RWTu&34Y2}ZQM#?x}V*Ro!Ecu!sJaSJxj5+4= znx&;pH_spXpleP-PPf)geCfjnze&+A51WO6k(}FDsKn(H?PB{hP6uq)vWhJ)*?%#O`!8}F2y0x~qMelnbhwI5_g0<>A z@CK<{@dLU)r6oz82OGlgVUD4OsI5v2v00*%ZXw(G;djQPm1qVlAO81h{MYu+&pp_J zWW!6-4BqH!X}Kd~>`LKK*F}9uZY{Dy+*+znL#O*M$9=7ib>@W6El$lUuj*3&w6=y~ zIFVY_TwtY2tb3#i$^OqJU+FNYUyuf;4t4xDqPh9`n`TA0r~9jZURP&#QpGR6MwMt{ zV&dPz?c&7aIAxMKF#HI`L@0J^we${1I`n_zPZO*u@pu(vk6c5H)>Y*^pZKykt>?$NDCC&Ple0#dP?tt{t))rmR?-JGNh$tAN zNFO?ohx3@_RgPpbNrmkUjFSWeM*6}0?2Pq*?cSiJGpEgyCJCi4zudB~BfWoLq;MpH z#~ZhEqsgW0P61AKeN7Krqr)K~yH`s==$v4?Nn%-zS~dO%2Zv@<5nd)@9i-RFQ0X~f zC*&yC7m2uOY@~lxC~~}!;(9<^Z2G}>E$m9r*qECa|LRsG@4y|j)uC6zmmX??TT>Es zpQikNO5|*Ar%O%f^IBQ;=g1=$8J*0CYCk}l{?mhvapkm5iZs2irq`XXBi+9*_ozb% zv7}y0ho z7k@@aZ4GYXPSaY|_Ew`lDcMx>$gg|L+vis1*48f~gAhyDQ&@gK#7O^$-wqxM`}9Ye z^-Z*Yy1Im3Yd?rA)b}8EnG~G()}?-BCe)quhO3fCpY(w+%V(#9W>`{nIi@1)<)>aX z>0QP+pRXsL@p(7Z>T}2S-j&Z28$diPBh`NBjET_im8h z%hbM^I(OvdQ%gN%pcq{#SNd$<5Rxnrr)4&H@K_P+M`$(+myoL~FDLhv8@tmccLLqH zx6`{8`_!woZhb6^nK+SB*9-Dd-lNPO=fBHqUz>=_6~%O5?Nz+YGiwR<_tJeYj;ng^ zY#w4+_Ri&6?-=`BXfs|OGhP3yzpCuJvQe(s$4z%|d`!8*tGd=llsD8hyV!x}%wk2{ zdFg`3M{|zDN2BKL<0Hz>#ienvO4hAskn40Nact2cky}tu-=d|V-?L76?!ZVGMB(~* zVvO~tz}Zr+)tY0bO3{7b2!A5qLGRsEY5w|W?#_w9Jvx=@??2*-AD`D^Dg%d7qk{Db zSH&`!-c7XCle&y@U4$ZpRvg9Jbf1oNm>3bQHa{x_P_!wYWGP^r%HP zvq9rNGo~&rFP|-O7n)g}{Va$kmhXUAo*-gn*EEQ1EFPbNp5;v20Jm8)?!B5kXiY zKrNEXn*8`pEKZjvyEf{MSA{(#(e3H$AV^4&XZb?J zes|(o@b>>KTTIgzYb#Q+=hscX3orPX(${zvt+_RQ^HqWetO1(k<(UXQN2=}Ucu0?I z)AU^yIN(jRH|<>B@!pD#iJ|F~Dk=UyjJ*duS8pFb{53*ZQT8f|l3Df+5z5F&*+j_R zTau9xk`W=vD4Rm|9wDR1-h1yop3kA*{k#A7^E|KTIj>&Hw{y<-I@kDIpZB<~BU;+U zowGV^?v$(Ss**kRiZK|2!*xiX%cS!>Jl~`;F;ZJf6rhaaVmBOneAb}8EPD8&g5l?A z%d9uwTEBcz$@eC|Xl|2g@GDIQ?Kpk5#oi62|0T*nkoNGizV|V%SwM)?hBQemv%h~a zX2=(|yTu8n7mM0jTWdC>o!~18uw7kq2fNk0M%c`)1y^Y;>P>YmEt$4R&UkMkBO{_` zhR|1k16zXAY$A0f^A|b>mM4l#;vF>#XRedTWOmkgpy={kSEl)frdKlm7d88aFeuvWSLX# zDyAoYN=bCFSAfOM(p2 zi0_a$OS{AiG=h`>`GVl$^Yd^b{UCYF)^I8O2! zmD_15Kl!N3X`Ve~ed~yZLf7DycIm|5n4!H=a6RJnj?3PaC#iMf3CXt?HuGoJ#&GqI zZO!9vtC_NFE%iN7+4g{G61ki?!Zy*CC7SobtPJn@o-h2UkxX74ZxK1~!XdZ&)DkC^ zWGE3l*OlY3No;no`;f0_N57@vLay1Wrcl?XM7OlA;<5vJ;A5~Nq-ow)SP^Aq5r(^d zdAKf_`nqjAIV)T5sM=8R^ZSnd2n(m}!(4KnwF`K(JB;xzUx-Y*JSop5Q)X|vl2K5+ zBNwr|M9GyFC3S5%9BoPF+LzKAdbHbt*<7Tya@#5U=(n^%{Iec^bw8FPho&CaE#pLrz3=4H~tmzUr2Z0+uL88JLe^^40y zH&uvm!+{9V!H_@ow;DT=+C>|s<>R$m5H=!BAA_%_x4No|K=p_2`F*9?4VM`^mx+nl zM9)w{A91Cr7n=nE%vL$dVoG&RWz4Yg15^$aANfXA@v6<*E37v5E=B3okBI%C)@3#I zhAr>SZmyX-D<|jLdfswsmqQ=kE6WXt-@44uW=Y?B`SUh zNIbaz=v;R{^Owr1f^!!cZ_eWuyp@1PK%sael1kHiUVeZ1y*aRVofPKYqSD>m4%1s4 zz|3ATCZ5!)L(oLri@nC!SN~~w|BJpX7RlZ;3uYua-@f_4)UmLzz&s)0WE!_c7;`ly1%&Tu?@dJEfj&D&^=$C)h^vYBKdppTJBO%$dL%ySK%7 z0r&55AGQnOrN#CiMJ3tqxP4R`Vu`uMegS@2GZMR*%H6QI;2fB!_yzuO!b7#h7m{;e z<_n(L^ny$(?ztEzhI6UXBQJ)t2w8_U!`3*f9)r`R{TmSBt0Mg z%wyCQvOWQg*9-WW?NWhQ&K896bHVeR-p&j4emo8zZ^33X=8fb%1wbiZI0C{Su^-HZoOT=L_;WNF3bHQG z+n7u{w)h3}lPx|b*%wh~HWVqu1<+)W-K-Yfx4Wp@Xmq^&fD5a1kjz{VlYx;(o4P;o z(6*<{9D~t=QyI_q**A(Ehb8(0%K_t{J!5onkG(t8%M+h!!-gH`M+>Cz#xTTQFm@O( z#cBLWlHR_Qq+1qh!hSUB;QS^$><|7xTyf$bcuhbp!&w%q!%VXZI+N+-X;p(DR<#elR z$JAH1{Ikg2sjqKU31MgZGqBH&be+Z!o@Rs9x-L-zzpPX1zBxDQIf$;_p~1-1dR= z4^p?5A>HoI;%?}crDx<|w0Eo~YpZ{EEAM?E@NeKJdGtN|5g}@pf*&S^njtkb_BOvq zqanFx>l8#AAUNIENEPZcn?eH8X zfbRmr#Nl6!ZY^Yn#U)+9*mNj3myW*K8DtAMi7}XNXyhA9yRq$Xa3npsMilMB^S&{NXA^Spa;spb{fR6 zm0|7grtt|a)N{aNN+pqW+B5BHL#q1!1V_mSB#gx7>1pWSQqRGoohoZ7^v#qoWIYx{bP(+dGr~5cxKwW z9jH1LkNdHlWICbYi$GyE+-A<8j?GY- zJ<-maHZW_lw9CT{BBZ4yUd3h`iL_)@*x%TCb!N`%xIE-1b&xgQX<6s5Gr+g(1TTpnr1Np5&KO0AoS2zbs2?2#@nob}>+9JjbQ( z1rsImct{q}30s@U{C>8BKsP=Eexf_e^9d`*S+jTW6EJk>JF%AemE8G+UZo)A?DE8p6$rxAYb!$XO|)CSDh zZI~~_w_hf~#25DNolm#9cToLlM~UEtS25mtN1>x~6*7~my@txqE^>To=9es`^ncr2 zTTscdU1R<#%xU^P`m^WyigD5^G5{l~MfL;>j!LltD~_==JO+J)T?zf&0TWuW&FQ>9 zoacyS*}+;b*9kBe+#(kqejBCjh6dBt%LlXmIa;wfo6}uZ4i0Y!VFN)pm~CISY(tNz zJ8o;^9b_AD>VC{?S*JfDNQiN~)==rVH;UuEsW8-9lVt7UMWgR=Ol_~jtd7MrIVCbs zQba3~$wbl@?|yuaI4SaDU8QG-kr}P2HwG}d2)YZOn z7vV1BH0Ya7n_Lb&I@G=s+wBF|DYRq#zO|j5n5v^ALOng^R&a7r@d*wNb|XKjDy4T< zGp|~<9*dgbbw`Ev#gQhq_6Y5W9^0Jvic@JCo^?*_qVt(C`IccY4BS zM%X>ES)TZ`J@uYGd%CgQ-0`RO_~ErLVu^|K~ zvh$uU#Z6la`w!c9x(-)7kEkS%)T0copfjb_&TJAw>Vt-Ic6M%TX?z(Mm#O4gK8lrB zc!5h_@bnq)JMs_7_q|FJQNuJTrRDYJ?|2}C|>5e-KW*@Vc%%N=t?6Q0>M;RpkL(ZtKvY_1Du-V zquL}Kaw;Q3A&JiWy>a%1bAx`rJmacf)ZGY*pHH)s1P=D+d;W&l=2OuAy^BAM5Z=e& z1?#{+Y9~FN98{y=`jAjM8^)=wgZ_!vyDTr24qDu74J*Is`hIDk4bi zNmF+-bwOxG*qgH65jRr5s+0mj_o&Ly+lL*K$z&WWgG%H_RjK4cITanY3HWBOve+H# z{guW$rh}^*8ymUiPJBR&%-^g&x3sD^(Ms*lD>e@@B*%{{+q%m_14|wLk8jBBRS~Z$%K~td{`PD7mm9D zr=R@igcDv084YlJd{F;SE`&6>(shFq>hSbR|FpONg9J}m>eEZliX?^;IlTpFJrFH6wReJ{qZao)y*hQk8( z;msooA#a1sCZ^`l)DI|6X=zEr0Q`kT5-UfCmt}F{Lyw$t%ITSx=o*lFUQOm3mbO{n zD6$pBU{0oa`+955;9FPlFZ~~on4bRG3Rb?)-L}TtQVz6hc4s^zb@2r?&b+5t92r} z|BBdvIaSpYWDK7(KV2I7Q=M8&WoCBt%)qFOLf{a?_{~>HV*-3YQJIjk($ni3l{x>PVAea+&0qC@(5z7nJb0M>u=J;Y5(*V-j{G56NS5?J z3^gmOE{F-E_yl^Yo0ZSFYMcRR{ORdLqu+m^4Uy8-2Hmo={{t)&z5-}E5k%gxzz;KO<(D5wJ-~|Go2MCFc=tdv_ zz?{bcIL~S|!`r{{S`w#tx7!OwB71an$R301Vf&~{jQ8Tb+2iOY0Oj*(4IhLwdq#H^_>?L3iDb0C;v>GfB-Fm3R=c6Z14hxkE`gWytXO0yrS)uNG*5i4HR zrbgkUmZW6z)LIw0+f92;0_y$dG~uhPG|JN%^~E|{^usYh*I>|nyqVPr0U#-6x~6r~ zWGDN&44D0^bGxDQtr%5^ocWQARLAqe{Q!eV7A;mIZ}vyUlxGXMpI@;pv+utI%wn!s zw=;Ce5{d03_#ki`Q&?v<00zP6#|NW61dFxyPGt&&mDO%+%p|6}`=gm&47J~>c+&g; z<^}D|=(X$Un#L7c5Z0v{NQ)=i_@1)lIH)4GNlK5wa;kFs$6|>pNPf2p5?IpiCn*9t zbjxoK!@7Sx7N4rHqtVn-jox46O4>ZyezUs?iI)pCC2yL~?6E81;eM$BIYKMv0=b6%9tRX=dpb z>A24*0E|0R&q&y$=R0x|(S>!aIb68R(kds7h3nntUizY?%nwF=dUT61@ZF|(qzj~_ z2dpms6SVD4D0#nWC+?O|m5I32*!LOV)HxrGk z(oE7po=vw=JFalyX;A!za7vqwAMCN(OeQV4(Q}Q4rai-2*m&v6 ztc#;%c%7YJ`G9`Y-RY`=5{%dVqzQ_bla$=J!r#{)c$^Ry>&NSAU!U=6d3=|r)p{)( zcZX?@@v)F9p}Wp34z{ac>9zwQ9$gPE!IlH+Ic`)I!;H^U&Yb1uyE1YOAk({Rs3joez33WpdGB#m#b;!Op~c3UzyA-(@h6V=Ay&h${D58&JvKcH zIizM{5BK~HweJm%d)17ZBWm|pKvOP##_dzm zU$cJ}f-w?dVH-|O&3JC^(ZIZT4o$9EQLCb+MM6}PC^d6d&1cag(p&RdTB>}T4Z|>9 zA*zwi{X;q^ZIo!7zyHjuyQQ%~I=GOH@kv2 zps9jmMuc(|bk!`QV5gIXDQ!c&XX0UX#aI0c%A-$5rSkrpt9f(X{7v+yJ6ZCCju8x+ z?`b9mRd6zSFfe@G{F^R_USIcG=1jO(3xs9l?&cL>0#tjpiM}7WT{^O0{_^nK!Qhn} zg4ltc2PB1K$NQUcFy`>w_eplTGG0gT2;v+rVvbo1-!U@cv2zi|D(k>+D=>Rm@l(6H zP0+Ij+k5YWwmBV)y_x|!o}|`a_d%R-7#l!w%$5E5U^n4NN$+Cw`q-Mll_0*lN6(#z z(u3H9f`XiTg1e`;Z>+7+(|Se{^TDs1V1&Sb7;&rKy`2zMyPeC%F63P&dUOtY;1FH= z1&+6{&Fhp-(EbNHxEbxJ^zmC(5*(LAZkO@_`JsDA_1rzp17np)NPn4(M7$5o#{eR} zv1=CIf6g7NXBoUe9Y>y4n6LVFR;~O@A!pm~^aHwTkAE`Wx`7RtHb?c^hhOv7Q*K-~ zw#Ra^IfaGZ+S@2zed5rFIEHt@{%ISk>sEd`t(1H=%*2h@k9pDwQFre*S9$|V>F*!c zwxMLxc2@tR{j;NrcFBUR$m0er8Jkvv&NSO-Li2HO=3=U^cNvDmf}n3tzoYf^ko$t5 z8|7zTS7jvK6}EtMSMUGE*Zod&e8?}p`1wWVi&$V>(=v8Ur!>RS_7M(iY2RS7mE;8! z2*<1gry}8XHE*DYcmy*Kl$>OL{?FEJb5CxErDK}FKV}m0^6?$(ul!}h#g)#qeKp33 zlP4UG=~5a@Vn&S|qsM;meX5$VXTs~TXHQr9Mc6DVAFL$XQGM0DO=oZLEYz=x{zOK* zzNfu{Bc98+;ipfgoUTm@J1)0z^v8(xEQ@-1;2}E3fgHV~Yav&6HKqDbty`zd34kxS z_b+Ey1FR)V=7Yy(z!`F`5xvauhUBzi>5R<3nvSGl!snRH0QQQ5HM!XjKQ6H{U|TRt zJKH8AJmSmUiACrnDh~Dw?W7z}dN3+8HF6bL6hrhq=w#Yjyz)D>Plg4YGkMR*flV`x zuKw+rmJD{ZtHN|h9rJhE@F_6E`EH)NCpZtlU7a3temwALik<7o6NG@Bj<8Dq)t~edSa?o{@DC@e)$^ zgvL6cv8RBN;-`BK1s;_N*3=BD9&uHM+FP2oz@7QFq%RK5)0uH05_fzC-1+>A&93yA z@?D|RCWxSOTz-a&IsXVNl^VQnRjumyN4&N^PQQBI;qpYYT8F=Xu)BKg2mRfzb(J^G{3PKIL!z`4vpC|5l~HEX64o z0e7jh-jM3$mzwwz&2bo3!ou1^ci?2=)0=h} z@Y?N2UfJ!1%i@ihxP7(eEN$OUvp=Aa=~4 zJfU@U%Ed*9@eWH(UO|F^*rBeoBbvD4kf*J}j^)zzto}x2+ZB~A7e4oG@m%kt>-;7& zyZwCW1;HwfT^w+RNtX>FljMhK%kj`R;YO+@?S&hi4yCHRI5i3_H~H%+s|#?yQ~DlB zrwDZ)LJFYr_>@8V_~61oChpB&V?+a)fu(`|Y0n@3G*xl5l3!W?u$f#lB$<}QoTFrx2(tLv?KzFGUW*B4la3Xe&+*X=7w(c{ z7qb;G&*;3t`b{WxuR<%t{Civ}hIL+ptXsDkioi|_&k_3}IC;{i$GlzGL=eX6qfN*u z(<5s!ba2(mX45M?osg2p1V5OO=;_b+1}e*PT*yR14eN1xSLCxq$OR4gy5eB<0-68Wy89B-}f$E+u;V;drYWW{c~cCC~*{Qrbt|wGoVnf zKpJ&fi-$;6`xb`urIcZ|jCl?cId=1g%?&5cbfoT+I&WTnSi`Zfon0sLEH6LVRz~hS z&ZOnU;?&*zIi{u`gLl~l?*92RkjL6fVT*SZ>-upt+r5ruzlo^1b~kw)=h0?oZud>q zqo*UQ*&`L-Vec2Jt48gyeey^HdOkVldw68;H7Tc)UkV~_+Q(DUFO3+t-xak_NL?M* z-%&*Cj`huSzsZlh!%YTAKIz;QwME1q6?%U37O5D{-y4F~_KgU!pL z2EXd*tcCaR^z{uAJFZX)NBJ|Sply0vf0)<*x`9{el@+K|7@A>vIo;#KE3;LDS7Fh^ zdkoZPwF6vxw^^|&m2EmX4r*E4_EbD;H~M|@%fuC-m%u|tAGa~GJlp5j-&+(X9CkQ^ zzGB0m&{`!XLAu{kNu*zLe&O|OA+Gd`s4+!xOc)i>S_M}h6`Nf#y2=3Uv5|R5PcU?Z zi{hxUx-Fspr>mm`t;QjEt_%%-(6q@YZy`lZ`NKimb`ym)J!G*+ zg>of!8Iq{~k%6Z8Bb4L8vDfWE1bwhh=PF(rT@ZyA1BXN&J`8;3bEo5LV=p$GBO+4`r*Onk)=7`{usf$rx%nAZ#9K-i;PY1MpN*8Y=6G>D?YfrN$@=F z7Mo(9z*ywg=zidJDD#J}_O&-W+Q^KtpR3X0qfpCOrRd}@_ET^c7fT;EmYgSbJXclT zhUOcH7gvTs6cuLd&>MNpoMzRdW(f7%*W#$iXzU?sJ~QpUWfM{#j!%eTOmZ+E|7y=9 z)n0X_g@I~Spc;^k##o?iYEj&457X|KmeVlsrItc57 zQgS6XYHMYP~NlPlx$J+ym<6ed&SO5TC;w9 z*HBrQjBXXZWwwNjBWqS;4RHmLkp8vXsgFzH=Rh~ERIH!sZdFH#-M7d8^eI_9y^G## z8I99hdoi=#XZ={6MRa|(flePb6fbhR{@Co*CPy zHv0&#N>={qZC=O=lrWjc(Rs@LY2wr9#vtz661@^Mi{Es;!EExPPxl+%!ngsvehId# zO!$8~$E~k8U5CERoJ-XSGDrm3Y0y{IhuyZM?wPSbPZ^o}Lj0^dy-Ry#dtGAidFtxn zN5dJSgPSO%XCOW0H@;JGWvgB?ug{?8$a&b5i-j#!Ih7!KbI~n{s^w>F1tzyC)+H`8 z(fH+dRkP!baY8f+2M^>L#rJA*y5o!Ut% zs8sjx_cGz`uGqn#ic0pEH;xhH5W+Er4^i_Ek5S+HnoNpKLkuHxwE=`DXFTTs^Iz0j2`jv?Rze}Ufy%z43SMGviBOha`c~0i(a?aa#L*0 z{Sy6X7#^-q#qtNDJE&43d23xBTZR9f)Fy8u_EQD6VDjeTmHgxNGP`doKD-p>mkVU-P|OyEWH z0)U6P*ZVL~2!(H)pD5EQp6aPvdLF9g@4*}u{YVErsJsRPUIbyT ztix-9&wXYSa~r^mh%GiP2*AUyHO4FxK4>{vodEUA2Up8>-m<9`D~n3~(&OcVf@^)E zQqOG_oI^Z$ZH-|F&Gz^BXyXg(t1dAJ5~9E|@YazemLrOz@G7YAPQ{ynciXT0D3R>s z1f>9&a;(SZSSa1UODqnqf5f~`Umm@cS^8)LNbNdj*5Df^os)MYrSxDU>9xxtEMRD> zGgnrDs|3z$-UbHoztsj^ylzo6@n9EQicJ|A!qB%WIq*h8hr3gGQ&Qi+K{eizfIXVI z^~UiPOYox-)5{`&FiYRjVnqC)@T)0A+(rL$v#8MjFB#&go+C}DQMBKG)PPGs@4%80 z11?73!wYC$5Uu@UU&-oHX}KHXAB2wgXbWsE=aTorZvIG~9|hM7RLa;uwZOXz#Y~!- z*m?^|8BsY4piunP?vo60)~^~9*DZE~mS1pGj9aEn=MR**e_MFCh_q82N2%|19S?6r z(op=O#B@pVVA4B>?mt;-+S&-dk=*m--L-YEBEio&xoB?B$V0yWw=L~m#{L9$k=vt? z%ic5{iad-<^2}@NbU&Xh>7($_#)0G-OaFW<0Tmr|w_jCQakY~;X_Jc z6d&@OP$2!)?fefQ9Y8ZVO}$gYG3(NNuMCn*S_q?VcaRL~lE|O8pnU{})nH@1vF_X~ ziVO9B&AwTkvf13R>fWZcnBpF(~u4o%2iDal4d8Hl{|2fnTL_Q7gB!fcXQNLPCP^&kg8C z0;<$C5s|Cf`}7GL+=!&+3Hjcssk3zalG5{5{EtJd69gjEbF?WnNzqrkvEXJ&?ch^t zY-xEB9!@On(QmS$soZjA-e)uCs}TR?W-VH3cC)f`xZQHz>tL0vf7x^^sD5Ffm=+a; ze&S$@%SP5Mxb|bgtE{VMoS}b2uWmZxJj%y}(~Ur;O(V|PIIOyOSzV{>i|h#&fYMI3 z4DR=#SGoKm(s!X35>6k9%~l03o^*}xxed3vsu`f;tIa-?bAbhvbIDVg=&+3r)E$l| zik`cn4_5iQpMkytAJj?%69d*woU?^&x&y~4F|4i(WZ)M))(-LoM|Ba^v|{tH!il2( zw~v8YBr~UYEk0hAW9S}7dCcW#%P)j)_*o6RqCWMXeP!ini*y(m)gH%(U)>rFw)Lk8 zcAVE8l46#wmT4nSH{h%p=j+o|ej=VgkUcJI!A_kRFFe`rB!M>oB?gwYfq+*e`&&z* z*RT%ASReVo(gh0ej_Zs5g;Wq|X_1i+b}d-D^?H`7F6M^U`0S%Pvjhk{jxxkQvT=B$ zNiYjc33z!Q7O7_Y!N62X?+$MxCN%4!w%nS>0JmB#RZHXsyb6Q%Wmt`$fAe{ZBj$nH z?o1b9*n|ZIYc$X?G?20y-CD#3Gb6l}7yPF6;7kj?IX^f!fOy?t=<9y`O88%;%7CDI zE|&dY7x3hxoAyQ*!oLE$H=1ShGBDq}H1-GIGt)sZDAuX=()>+ z+D_5U{_L+C_z~0X>7iKJ=I&@v9E1Br1xh4T%)Q_OJQ@e~-AhxOq94gsxhNV^SXVkpTvz z)Y9bmW6?C>Z+M5`Z4ABUg7MC?VR4MK5bu?~S-?vqZr z>)n|WZ(sqwXIi>yPKNW541|%f{ePzV5-^pgF#~ett37UQ7Nb=TYM=3Z%Dy9TqR-31 z8kXe7`uYtF#=d~ZHxUOrORRwNd!I9ZeaY_@ zDDTrF*qZ4for>}wc+<{waJ^jrIjZBPohJPRyWSLlt||4hQJjWzf27c5;Un)!3mI^j>6vigl1);XxFo>GoJKo{eD(Bf7AC_NXE&EaFhnC@B!7)1BiRS zNb;hXO(bo8%o}jw`Cpb~-S*J|$x#J?%|lM+qLJSYsg=rjQ7DIoBa2+Ey*);m0wo|d z-j}(qc3qJ2P*>7hNzG)HBC3AzquRGBf4EJ^;@k=lDpxHz6LNaE2R%{x$M@- z`dQw&>M=l~7!d7Ia=yzIqk2sKRkni5`?LL)K*s>Az53d6mO}TU5YIRHsf^b{!3qg; zA#iScPbQC=^fDG+Gk%~TQm){7XgPo!)~v#`@C3SAexLcC7++Wj;#bqvQO{SHY+|z$bgR)Y4VhB=w4DTohdY2GmD}r0m zb85l)y$Fq$U|~|&JJJx<<_c`Q+e={Q^Izn^?BI@(bgM9?$dXRD4Ioa5aev}-K8NSf zy(cMk<3I|;TlC^_Jn0$+0^ui(;N3_wA&M7ILsKI)W@Cyc;2X>mimw%I`r2Y82i7NLxOvbE zIAd22R&`y=d6wlq@tJ>YE4*Cla@zdTNC0pJL{VY>ML~YcHBrizdisHv{ed`uUKDuS z7DoAkiwRqw^S4k(J*g2K>x#`4b0(~aY3&|=FW-?(go8U0jYYV$g26XosZ$>;7k}bx zm|ZLAdhpqrU62F-JHp=aek-hdZUwznQ!b?8C8_t7d?CD@axLD*wARPSDV&>(7AD+C zz{n|59$Uq(xtR>zoZzX(RVa7>tYc_;4@5-m`iJ;YA+J$t;@6JXZTE|_0ttES-xEL~ ze?Ry3$_IxQ` zfj|PJ(W+hg6zVy_Vt@5;8{y+jQdE1%Lqeg!JPH+Nk^!wjD-By|9*I%dV)o$hpgmDo zj*>`Kg}vL2RKtRnC3Mt%WlN`r?pWIEUO#Xjp~=ls4eoT5XKLq*cRs`zsM_HkH|noE zy7WZ95VmDQIq{B=RGx<&F|b74-E!RraE6tht~M&;mp6@a9V zE(K~e($iY6QMnzc9HUP<1IctrBFg9CK|F6iTSj=!lFiviG6*M|mE2&Ud#I}^AB=_) zTI{`xcOcl$TzAPsNyhVC&d~WB`6ihfgwkjVO%w)b{A4ySEGE_>Z}1JU!)eoF3|Lub z0CVPy`mnFACJc{vNhBCJmz;&L$AHxdAbiN!&AOCo<(&Aqy#Kq2E*jb)#Usfv{`Yr< z`~X;c9Z$srQ}J`g`MPf>`!eqWJ~{N?iyN6b^`iSta#8 z5Z7`+g=`w-br)xT_x`G~j&_rt`55}|*xECV^9%db2ZoatFIg5K6k6?mW7U7YEB2?Z zr+b&G)MdstiE<=XK)zv?R&;FM_F2ID!6UxA&4Z0{PB;`zsjdh9yT@iXiz}^Q-Vdjb z9Np)kqdK!Xxd_upQIxyRLexH|3)xiR(cPAMwhYXm?`z^I2u%Zb;((z5A=ywR=8nny zweIU7oCiOuT8HYMl&tK1vTtINW|5GUr|j0juW_ll6AT<+&X863SbOxi)AURfQn(h& zLn3YA4Swhjy7n6uoS{QN6UbkQmxyiOX3;JU!-`Ed)cmy1meDMvVw=Hiv|PAudb!+} zyea#(E`9n?r+^H`_}cg;aP~v|dsi6?$n|eNXMJj-qX0z7v)NVsRwvsBw!iK#Da$qV zBBaY`KLKZ6(0+B}9~lqp+p@*%x=w7cXGp!S`&Da@PVlHi$!hA`>I!nk*DNYqqHpky zahR^5BOl))GZRyDczs|3DUJ$ST9-DT%gkt7*SE*4zlTgU`Hn++*KUCgr{FM;7IuGT z4c$EYmpJQ7(~?bofaZ(mf#P#GxB>&UZtIPrvK9s>XRd#_3k|}nXIM@*`<+)h!}-C3 z1LAHaw?t)LVyvkAaG5SoQW?ae0{bH@4$KQ@fI;l(oPD+<=sFsy#^CJBX>y%}{KXdz zp=0a2KkrNXvZc1i>iTNp*ED!~gy6HgnOywD-Np9yC;z_;+}Mup@N_NS-u&z;-`n#_ zy1mFW$-ZB%J|^`Ty-wvj34h|1SE2U_Yqo3)RL4dZD}w4{o{{0$HyPmZ(vz_Lz%d7= zUiU-d+s?xQI|z&5*6fMdRO;?7Jw?^rdH0ae{q;L9hI?5d?~~mJAwPv!URWeb2)r|l z9WwT`#_i9^<9KgyF?=EdiK8Dg&F+do4v~Z`SeeB4-i#0fk?!bqagfh1>G6J|5P)&N(A{y$C#ZdkGCWaiFpL|>vin!qOGT9IUnudA! z=X>E0)44qmK5ipqA3{W@mmz%LOn)Ko9F~x|9ftZWzQ(iPcw(_UUi#*Hh?Zd~RB~K- z8IO*KoMWJ}00Q<)-^GS?nqqp;JW!Q;U7{o73QSdy{Ly&|oCSvMQg;RWIlVZD-Q-lf z=0y2>U8d}AoRhbNh7)h>Rcz9hzuWaW_$;aNQzrlW^kKeC%TF3bQSvV8^ngjJmn{Y+ zJT?tEVdUzf>MUOLPwvV!8{#Y?0~Z3>4wVJD1~doKK($Slh}r>NNHOEV)yTVlJ{$Dh z4Kn+Uv{kUS#5&C~GM~;&&|EVLXL@@4P#_g_wIai`26jvA=dW0Ob?d)#|2Iu(CW<-} zi7(NvT7JkvG#AofctyWG_e4M$pwa9oHFXD065m{WxJFrZ{?+(Xtk>t|8h7OKHd?=t zbpfY74mp)|JSYOVZ*kwmQs=fP%3sHVVpAN_*Ib+xc`>Swj#q&>yFhbo3EPC{XaJWg zIBzcSS=}Vf-JkMx9}IqtSdR7Rcyi-05x2c+<|)huscqz5=T&RZeL0WsZ<3oyr$ybj z(d)V0n-@gv9j&-s-=EnRVT$Bgd*I@~I2Ek{UB&~FL7LLXXp9zO=B@0QfNEmzdw6)1 zm*##9ynoLp;ZIGB_0`0THb)2sD6!a7DvamR-0YYWXu3^(G)^hb64pW2;a%X8rBk%p z_!c3*p0eq8r3j;*Tg3H|w<7wZ&yBfhcMp;Cp1dSo>5-aK{HF*r;&sXPx0TOJSX8mc zeLxzXt$nscao8`d)jBgQZW25ES{jkm$+?|xd$sI?bWVM|E)>I^%c+iqZlc& z;ZMgn{o$GC8<#V46(m*EWIWL>x#xHw)&!Xo$#ruV#AH7Z$)Uupp?;UoA0rC(zz0&K zq!7t+Y>#E!_=?Ehz9!H=mRrQAv%zJHY$h zX2e^eaP4>#LKM8%4bjq3m48YEO3Q>$5@q|@WJfP~T^zWW!`4SfhMba~N(Uw6wk+U3 z`&Y?r6u|3#h)%1jg=q1OKYreZVM@=gc{{(}-U=$2Fo-umw!o8npD~T>Y!a{%?aLi*Is#vi0hgh+D8kF~sd8ug$+y5xns?;ej zHd(w8Bgo%F<;SU(^Oy2vHvH$*lYS6!6JAt^X6P=0gc?_1xFC*S1HOiM*| zG3`;CuKU?*^N3+XsaN@To9mEn zAX+W_aE4t`l4K+mSDwLMCQ$1tuMn+0$~`IP))e{UCF;3er)0EDk)43N*yV_a08k;w_v*Iq&j#4aYbu0D!@W*)d&5sT>tX)> z0jI(e(nB0?b!d{E7im)8cmgJO^EczJkcnM^}GXUrGYyvMM+8~mvL4pK{H3L)|BCh)p(?i7% znF&6rYT0}vHU+MR_7s<2=*_6-{-2ulIeJu9l;@ycr;hW_8pLeeGL6FWKQu(MorQF3 zaH@~>J0K~LyjJqsj$fHq3p9Y%4Di0znj_=SfhSL5tfhvv8{~ahw8!T#=CNyjRcV>= zN=F+!>Te?gcnX;$PX5o#3;XxZ)IY_1Ee5^>|5Nq3dbn2(k2dx(lN^tBe~6*ED496WoD>r|or3>=OrKwNosJY^u5wmP0TlKN0Nzn&YQH=)Zstx1677TQ*|Ay^<^?E!I z#bA$|DsTWa-gDiNF8TVh^_6m}_4*wDfrlBLe8TJV7zZhsj`J^o3h4N5fSt4jZj&6y z1rUG0e8*G8^nwH>Q6g0lj5+LaNXU(6DV`&kJu5JTy!QRYTx2dI!LfOq1o#SRz5l_S z^C)uk$dtbS(e;7cK$Fl#C<+A0Jh17^IWi@OhotA{r9rK<9X{h59G6Dg8DQPbb0Z)S z5!7ZyO;vaOY)Ts(tRewcjTB=FAULO$@ce)Pl7l0hTB^ck!xagh@82D4;hrlUv%Aqf z`Z%R;Y4uC8^U3}K=*Ec~F#f7)hR$$cX6<(`1&Ou!jye4cp5v%s~o-tAGkyczrmyAj0kV{wg}^~=zBrQjRB@1Z@}N=!plh=44~nRgL1sm+?Eb2L8vx0#>bM4t!e4$VSVN> zspFba)3(XY&xedp0}ql#?RW#i2cpup{z*ecu~Gt-jb0P;9pB zW|G5wadm6gU&s%6fu{j|uv)bG&vdu0`#$>!5HF{2*cgV3g(xuxCDVOD<%r^!QECAWl5WW+%Y+_)Eyf_7g?f%0b1LdR$+2Q`oku-mR z3OJ4FOfU;If98Dt&fRpDFFbOA=;jivyTt6YFMdzSKpSNvPVose@2UocO`xTS0_%Mb9WNyPrkMsJ|eeGee z&VZq2Ea}sHt4^uiogqM7S2}HYPdzG6sn?bdeYcE&_0?h)EPa*4Kdv=7h4) z#Pxb?yVyy5L)_#Y)Co?o;AO1(KY(BXK91qrREQK}A&42qV2AavA788;#eBA}UfJaw z!Rz`I-BcE0%x!K_Z%%M47J1&Z0rUx|3BRZ$?12R>#9dNMpj;=C_%%$-h4W6M9dDt0 zC-KcS9Rq54|4R6Zo$}TOAFNc|{y#9Y=FYwF`tEMJ#3u)&2hz3>(a%6_Gw7r~BbDvP z*&nkDq2oMGxG%-M`^tZ2RX3;nedjEl7>L=hKF(P{7)R4cothegIwynOypZDqS}N_Gyvn`C zM}2?9@%O+3XSDqkrY?jv4dhNZ)3Ts!+dzQ=htKHz0-~q@AAZ}6w_!k_%XalPaHA-s z(ezs(UuLEO=c@AdgPUQgzhJ`!ueqh4E9*AimccJVVoU^_hVMzVa%p*qh_dslI6C^4 zWuim-EG0krak37WL{sdr7?yb4364SIa~BZLpv&&Kf(!*DS%A-se=(j{C^Yj0bD6|& zd6^RLh)F)L-?#44T{*-WC0?^NzDuB07u!Ps7?d+N7xC@E{#RcuEeC_qUio8oW-KCO z63ku*6BfC-GCXhr2^h7c5{N|%*kUWj**EIb8jV)Gf3)Ef2CB^}fLtpGhgSAHek>1Z ziBeCVs$oUbr$DIbw?A%F<9+x-TK2K$0kFad$At(>Ka$Y$?0SvM$7$T$E$}Hzy0Gxj z=Kf+ysd>|^GCm~2Bw90#SYG=xW|q#P$Sj=&x#A2t4>2S*_jcN*LHNBFc?WX-H&4S8bauZ)HJt1UyK!GRU|gmxDn88nqB+lkImybJ zN>j<5?}764qWri#w6Z-;d{s+}k!zPf{%2#O(XoS-6d9OpHg+ybcK zJZ*3U2rMazZy~!+^u3V1D41hTQ_R##1MqvUB_h}XcCw&^pdG51G}KY0HgvV~n1acu zQ?w`s^Ke#u;+`PR;81g;0-O4hRJ2@l2?wlws)9Wx6WcodLud!RCu#y%{x8nn0xYVv z4I3Rq5J5yFB}J4@=~j^vkrwF^kOt{SNkK}wl?G{~8zdx^#*rb1?(RA70{0hxod5i1 zu6^ltVy$<*@$`K^mqi{WZ_Zh45fVElUn~QrI*5xQ8hnVm4U{9yL2le`py=75czWV5 z>@az;a|O|;OFu`+u(;vlU&5oh@!;JB{P)+?BExGJGvZWJQ%^Sww;Zi2ea{vvimA^i z^bFA_5O2o0sBh1+TPi2$g&Xa&xdi|M=gBL-L*m`5%mbw^)w0ciDi#gq0^FNfvm zPWe9Hkbv7Tz1iuj66U&d^d>XS!}rI_3YkMMz4|05_RiUtd%IHR;{Ovdzx!2}dD zP{Pnb)~$q{tJwE+EiVIdR9 z8VEpD2lr-@Ug!S%q2w`r_J{&Q0tmJv%f3H%CC8(0x>eUN-^U%*>_WQ9tBReSvFo16 z3_w`dVChY6U^%peSDHYzz-f&KpA|hJdvHDA9|4%!t1tj>{sJfw!)-Ku3gF_L`r9`c zqL#08lM1+F)eY-F#Bw}2k^G}zGvC@ZkZLL}1e`xXzybQ_1>tFkq@#~ha+D^O^JP(+ z5q?a2)CetJF#+YeU-`E>*>9T0)Ry1o{y=&FP;Tc7cwi1lALcZ=V_ncIoVw*s{Xyvj zNk+D#ZO>nEw9;lDsgakjq0!;ycmOsq1Y`r4QHWt@W$zuo>*`h6>3?Cse-Qv|gn;rZ zZ5Ox|ns*59Tax4_>uJiJCyckluPzW3oau?x6p%K>-k#@H#na83#aRRBEc;P(PpdZ9&q?nsj0;+6QC%hEu;#UCl zQIgLu8V8hvSXmX?eAU4NY*pzmS;d+-)WEdO;2PJ1NVp5j4M0(Jw@<{nSt&`(?gpSV zYf`{8LA=id@@er!riV-@oNv%-FHX?}F4~Nf;HSbeO5yyXk=H#R6G@}j% z)k_FN!SUKphj#@80@|YgC@HD!0Zuo^a{Bc+9>Y(hb%42oDJ*E`j0P|+2pw>JR{H&T z=?(i|?uoRAY|KI=zw?EMa+bC3>Rkk7mX7j44dFfc>;Ry?0dxm&1at*Vfy28cSsFBg z$2E@pZ>9m9W>l#+6b{H^2s$~Vp;v=g79e5=m71b|_#ol{u>+xvngB7@kWcgRC&hss zb#XxE?Zy}nthP2@myq-p>4Um|>QMBI?gM>9|K)yz*nexKaWtCw(pi^2HxrnEzMNCQvlw4c14PAE0zzT7nWxfW5&5fk<|01IOn8ecqti zfWcml_Y{O!yHkurmQGny^TP119mL>40m92D9jn<%-qw%J1_ePuJ%I386>%qnaeVV~ z`sPx@S@lFu{MKaftaaOOY(gX807=H;7?eK%q6berwJj%$yPdL2VUfx(Ge@!^n@Rs)GN~Q1Y?13&H_FM{x zLDIlm6yc-fYnpsqcakb3!E=YE2nG};YoPm2?Qr#yll7Xcr#1ZtD#upDONfdF^MKlq zyNOjB5xuf?D75_c%exI~KEE%mC>|*v!GSG&Mds<;*rQ!(yTX9;PYJvQh+6`&VG<*A7KEGg7+O2~* z)UdSk`kPLCs_*XZl_ysInnop{!vIvuug3ox0LSu?p~FJRhg|1CQv3cNnHKP@SNxH~ z`^vOSo@9X8`Yt$I08j>lL03RN$j*4Yw0Kts%Y~;(D~BX$=Qtjab6|BF-dTc zU2Qo#y&fc>Ectn~+cG2t{yCH#M6sn`$Z(lm($9d6o0bL$bAwoP?D`icMVHMaQN-{2 zA)Ht7mK`U^;a%i{B$@sj{KaA%lK2VWnS#}<0DmZTx}&}bk_2sTg&o(J9R#MKRN$my zZ16WKFxErU6YClC*zLmU4~^+;=>87%_B@zB4hDaL!TheY^5;}0)<6Ad^k$+|@txio zSUFG%%xC5XKwtdl3ogNM6+J6B)+gnGa2xvTy@^8@*yFtOK!7CHjX_a}i)U ztRv=%nUEApoYz~e#Z)-iVeKtT>9!(tUw*;>GThA&nPdwkFaRj;OVcqBs(SP9ac3}K z)11a#_b3Ehhyh&8oSO<(<6sp6asvJ#smw8-wsMx=gB-`qMou?$`vx+;tBtFI1`oxx zJvpB)Eqd#)SGFCqPSbVZV8eMqQH$H?CW}qPXNI|zzq~cTYmX?v8UIM4>#2&k0fPXZ z7er+iM3^hNopa6VF1)-je*Up^^;tm}Om|~>bALM=k8}G!vI%()`8kGUQyUGKp?p9Y z-+Q8G;}EyNH+{>d@5l6iWF8*#!8i%Dsq)o;oER8$f|7da9RS`Z)t$XOJHc0bCnQ#E z;p>+8XZvpfCaP)xAE(X|MyplK@XWs^-AAOf6lfdoXgD3fOphAg{(g3qxe`dsXjZ`H zGgd)MD5vqw=SeQax?f9DGy6SP3@P5*)z{aIqVx2_Yd@7ofS48NLeOv)0La$Eo&M@x z^GlkpGY*ICsw-hWCzXK7qCGRC0C2t&#Y?G@Kp7q&J;K4lb>gT2rUXhTX(2nZ?hVeS z!&!Zs;QnPUMDxwQBRKU?dZH>dfo+57F4)5WFWvr~(>k%2%o?O*3OnwD19c5-(kjOl zUg57bX?ck_En(-lr0;(x2>rn5`i}K@r)5Na3LGLJW7+Ia5#o60^k4^Jbj<>ShSv1F z;MF@A?EV0$I324k#qm}_{?M7E$E?yA-c+osR~2`!*cjgVV-&!nQj!31QBN`o9pP+F zdgaP8#CHf);kwpS0d#-J#FrDKJf{p`PVaA|1$1f;Hkc~P6j4(;x7m*`8LQgpSwEBk zBCQhH8Bn<|F#?Xy^0Av?Pn5*rWDn7W6e_K(1-lR&$_tQT2@Rn~jfnLSd-Q)J4`*;+ zu*%e=g2QXA4}NXHnDD^d37Q-*iBPKkmuqNzDONwXH5R~YZuytBY-8@@z7MEn1|pwi z;djjf1HhN6zuBB1Q?vT6KQxCy%InF-rc?dEcv7}|_SAYF`=`K6LUhurEe&NHUs!!X^8k8O7;xmZLG{*%@X8XSS*G0; zIJtHG5!wF65r$=|8*!O2`y(?mGT^X5m8x3|@wBHO^dI0~<6yR8GE}6}~1q$SW z^#lr7L;tsIcIgW^#{HTf1<~FC@Bl=pj|CX+DJ~i2^?`Tzsr-`BFOUCO>HX0EN_561 z)>V%I%qUX>$PC)7hB4HQioHUpT3P<_yrnH9!mOjPsi;Qavn;X2U(q`o7YV3289r>_ zLFMD~-UqgaO*e(f#|Cn&XZssxd^l09za|}@WRIrL%GMW~p1p<0sxcSMk52S^yD$Pn zYD3OJn+c+=q9FM26qKM} z@z*;8Ca}%+E7GXYP-m2nJU(@*x5HNw@ikR+rs%R?$A1r+8sHeifIUwZ?ps75S@z;v zdR+4qK5b;Do3^6Niq)BP$?*mV>nrorzJ0AO`e&K-X7wd{%8pVmtEhI4Hfy+9(=k*l6rUJ z31Fb7prn|}Q0c18&r(o%_v8!g#$yk^_qvpqK?Fdu7Q9E!I}3OlT}ElAbpb*N6i28; zw&MaO9h$jp-OsVFe`aT8=^y3oL1+YsaMGfG5Qzq;P2bvV=uGo@4X;oK?#v^RG+eX< zj*b-ujW4o&6cjnn9h-45FO?M0KlQyI&k;=5FVmaR%4oRD(~)74GXX~C9D1DrV|7N? zc+XFnb=J=@*|e&BO?BNdGY9a^ik5S9dI^GK3)?~0x01sEYTK;=B3mj=O-<0gV0xyl zyUOt_i}Z6?W3o5V*C)Vby17Hb20B)FZGr|CQ>&}Kg5_r=PLq!*L|w1AwcOO(VHdh@ zj#*V*J=GPx0`dK9Z&T^qzi4IxjBZTaMS5%3*Z+(WEaKtoRV)i^_0Y0~zF9yc#Iaca z%SJ_T=5TuA&DnG2CIvUoc{Kvt<9;k|(=B0pLCW9XZW1m4;=`t5nvJQ{;8^_4A=Gq} zwN%@J(d*Y~p0gu|R?SL)WF4K|VI4+JPX7GRg5~toEKRm@wB+@XS>4{e`HVXZ_;GxC zpm_|SmMnH&3WKtE(2K}V;kL=|kJT7dQeml2J8oJ6$=OY(d${)Y!EMONVdrzPD~bM# zq>kR(z=MDhf6oN)kpH21yJGXduRDABoL#KsW1oX4rxsN(u!YF0RBd_JO^VNdsdJ?Q;&l+Q4SB!or!5l^xy}eE z>hh|EnqM|QlLVtdM(|8h&waA{*7@86cFlx*jQNPt`{I&0bw7Gty^}t=^H;0B3-Ia6 zft&-qUibeq`P?ie#QlQwfv#*dDesdkf_}kJ2;Nh-8bO~!&Zirzs--)gXC7Pyv~z*) zT-oJSOVIaVpaUBYyF3wyH$vfjkUF7$0xm=})8XaF<2M|(0I z5cPTjC~f4# zxO2Vz5`XA~j)1g|hD+I-zPjgHp(8&M(S9UtGjqHxsyn_3SgkPtJ6GU15^QZ?VL?U~ zAfJO6p2aae0BLzi|8EQXCHEFPh|O!8T$lcVA4Ccr0Wcdyzt|*ZB>~H|m4ETEy?H<8 z2uu+c^%{*HCEZecp4(RDFqwVVm5RswUeou$%v9CnL6`HU**OEePk;hlElHl^?cpuJ z`UKLf>E;UbE|t>9WFQ@7?{*{sD?S9n$AIaq{y`gTOflJGzwwXs0yQJ8?on}6LjPa* z)iC}|l13UG9nWJ}8mG?wx(9+Ng5Wi^F?^CgK;ztVd#Ni5P|ramfgs*kdgJClTLoW) z7l5gZ{f46tcJtqF8Nc5ezR~Qi>=E8$T1eF(P;{{L4&wQ9SO!om4BgZO1OHRNJ`WRj zNu2Z&3rRqXM0-MP;X;*#9gVSRzcLhIF>JcZcdO0-91nt0!2&@vPm{Am@(gt9*na{x z5{O7kfU^oHzl$B*81xuC%A8P-A8j+kl#3J_v@-ySbD)}Qoz`N}e@+MpBGChA6dJ6v zk5k(Tp>^j!*vdg{?^lMcx3kYU2-5?^8s23mB6Ju)d=i3ITM&&Q00aW^$PfsF*oEA| zDHx=>q*(^GC@J91zu{cV{%@*szhAxT|Bd?UH6H0t;FO`D2Z-cyQBu*Xl?DbNf;xRc zc}|58Y66GRIu<&N6E?i8a?B%r|8n5(G06Z1)%mv#%!Lqrih9+Sxr{6z5Cy4B0H<|! z$(x&K`yWWLzz$Yv2OJ~PpyJMnkEn3?Icg^Kugnko<|cUf(upfC!9dV5@mbXWmOe^& z>rp*-u#R)oe6Jb-H`EQ`%{NgXk7E8(FhfBxrolLMl<$A?mk(*|42T$+e0{{@L_fyl zTn}+q3NtMq;Qs@OR;2)-G6~T40x*(K6}}~0=~)032cVV(p^d~U&klNbSz`xXJq`PP z{9a~Gj%LGOB63IYOjrIPJ<@fM(OYO8!?Gm zy0D*b)<3(X?@M}3t>Xy=QfZ&iiYJ=xqDGi30Jqq-!*6QGTU32}L8l!R#nu@srg;VY zz7DQKE-Bo~C*}N9$UPEAim2};tPHtlxIeVoCLCK<5Y&!87g5y1>6q!hYnHt=QFHs; z*~PQz{Jyj0#dq^IQ^wNGz1+pD?-*FM)0}n|DSS2W3?Npw)r~D*VG|KO2%!9)hosru z-v5MG7Gy6J_1%;AfGp?)!R8z_yQSD~(kWVI{Nm(hBz@0!69P%Yr7nT(FUCasWceau zvaYG>vahUU>U70!_A$|0AdYDhc)YL^u6Cb{|Eb`0I!#a>tJWDhq9U;?QY$PrjLH%I zr!f;8JZN(2hF^XUi%V>TEwwxb?CIoTSx?k`esUMIG85L8p{3MmHm5MLCHAN(^EWZ~sXz47eUw~mkb@~0kWgeiTRGddn8V1|pn9); z!g~d#!qyj9V^0At{*i(gVKLRKF;d4ieO8(Cm{Wf;U#AkbQIEPw%&O%lcCV$Et>e>~ z{xwdwk$bj6`5O55s10~%7{(hnPGH`>>=77-oqxBl3gtlk=M1az1-J9XQF=5L1wzjJRNQQTu;rg=rY<6kz-!7`9elH{cS^5&1$JD&$MlJ-kVasZ?B-bm zK_45Ft(HG8fpT`36qu|BWMeutZ*PJx5D9SzA9=&C4uJ7SWa&r%(YRFv6oR_7dX&ak3I5#}mt@`ell&_Wiv?gG5B8QjKz<4`7Y zqfF|^qHJVL-k%msXx0nV4H$>Ru}Rn|g5&Mj;S==pHqQOX={7{ahNZuA60zi=6@H>K zl_k8=e?UfkssUdje!~_&bYjMeAZ;61C(oe34ph<$f|^@v83*_-)7XW7IL_}mAtfoE zcnSGg)7OVZ+qF**Pwjitc#C(BanF0G8dS-+bzYreT-Mz?PY$erA(!XLnoj&v|aWc947M?7z?#-1vzfa5y7 zFx->p5J1yOt&m+_wF-noVm*W2692%P^{HVs%%Gh^6>Dr@Xo8e>0-u@nH?3)Rj!Kb? z?xT|;yY2NWE5i1huVy{DIz9-vwR&<@ul3evYY3+H)^AAbeg#5LtYDIGGP=58eQkwAn{#wudFgA)Oj?`tYxqu6 z-*`x7Qip(lcbVH5+@5`2L-iz;!z8TmtYl}DBYl)cg!uTtgF3zA29I0lqlcsNE0tuH zvPaeQ;e{I~8=eM3^emeEsY$9YW_T-i1?W9`x7W@aq|{ZAF6jXIkta)wfLQJ&OFrmsgW8pO`dCE`#2;Y`>44+J1`(_E}azD$;B7dUGn!G6sp? zGCr2KdkCv0&4LBMVU@W{+2}IM8|n4@`gkJ1Jj~Rj@|<#O7Qa;w{8f`}ULII&niRFU z{<&m^x_~wygzs3qmIH`>Ww^~wyX8v^;pY)11;$3k`g*7c;)4!g3xLZ87}?DjSd(ZN zKNUp$ScunL$!;RY&;-p20p)BjI^!Umjesk53jk9zHb4Q{H!<+yjAGk?SpjA)9AGH8lXi01 ztxwdu$!}Zi^stT&t+4hjz*zEYa}8a8M~%}>p5O?D_E=tny%{}6g#%@j9v~0Y%w<;ACTIs&_s@ zCQrldz)?~!mmNhlu45+>XUy{NGWEl5x zkhZ>W#m*3}MC$%5h_VQhV)G6ihU6&z-IKm2-E(ms+NE~`+{v+5?C+jp`8S9)<$Qae ziM*ciK-%{#BFcVpMs6X2*WBQn#WbFio$uBYQDI`64%OWv>kqSq+L~50iJ>5-~B%UU+5pRP_|6X;nHc`z6K`$c}HtYjY%)`2x_6 zlxv~9fzk^3!mFmPnQIsl28Un21-Hj7bARY|+oOENPO-a6di?b3PS7HBi?voL7 z3kkCG)9YC+0}i!4p@3c~t@MlH?6LO;u|TA+?LfYXIr`$0aH3E7>arQLen?`~bVY{vA=9zT`LkSoH5?`B)13x;GY6Vu`ps=;*Vs9w>@lyE;Fj4J>GjGWd(G0o&K6Lei6(`~FmqUVnG71#A{<9e+5D)`bZ> z!wBqrXMam%zV})~A@Er0smuxYqX7SSEo4@-f1`qL<@ z6=HclA20`72BA&A7rMi)E1#O5v`Fl2B80Olf_Z{U`(m(CvpKY2UJb2w+ca%-EM6V^ z9zgD={@T(DDrTMQ_)?B7gvfwTBv=|QpE9o6bq<`w{Y$Qdn+$Ywt)VqFH&gYJjiSL0 zUDX0AV*}g+BS5aQom@o70HoC~KvpXL*S&tGL`4Tf{#7Hj80)!aCtL4N;~8*rn|i3b zv!u~U+Un!;7@rhNr3~Z9z+^h_5n#nz0Y~aq85Ytvzq5sK&q1baCF72PL_|cy2MHXt z59975#tqlu;yN#;6|u(Q11#`xp2Px;#um1l*PENbF77#87D|8q2&{0D&BxbKO>(?Q zpibP6#{(WQo+j!1De@n{d`ADEuC6|)JS3mywIp_Y=o30qMd>n9zuWhfTB=_&bKz)di0sdCkV|uu0P}|%@;htHZ-jc+ZNDc zrIDRbqI<|fA>~&uCHOUo!?i~N+E9CWA+Wn6)O->(6fQB1O@CY%!l~&cfJbDxwHI~O zG|R4ZdX%ZZ+%4;;U3(NTdeb!z*aFMa{v;&XYvQC%3^ZBk_fQhdV?B0Q+G4pCYCc`o z`p2Y>XT@T@D&lr&Ck^uk?folQD;i1tn+rn10pJR=KwCpHcz1Q8poyI-!}rV`+2!z5 z^7KIG?$t`a4biqU;b@&6C`;T=Z7v$XQXht|diDP!@{f3BtmDBS2}eh^sL-sH?CYS6 zq;2d#r}b>e{(^Wc8oxz8gXYH@!iUih2g(g9V#A5pUL%NjcD{N|9)y%WkTZHl6dm5i zfcxjVy2gAdmmf?I)v!U*d5^F{B4!%%BNd&MH-pKxgFqtN(mf0DX_}(8y73_L9zh|o z8-my?W56?)B%$rWuLuZ#XUrHmq=9pvZ;`OVaEDlqr7%^Pe6sa>$n@0~$CHKA2a)lv zS^HlETDE<+VoKM8(%t&+?z*k$uN|df<^+i|AZb?B^o4L9$IV#Knq!f4`mQmGHL^q) zQ{pVd=GL^tJQNGI3&r-eyl266RD|pMInE{}L4)sI_3YU2MDHr(bk`l63wh6Zv<{tk zm(R{6B}EhWi@f{E-uNXw6G?nv_h?MiuR!Q|ol_#b5>zgY)Jb9A`H4zue7&zmmK8%Q zW{fR_*9$~EVxvb`^c)>^jM^o`E-3| zu`Y9SPeZBwd6#Zw?t%35S<@f4KT75KK59NSlCo5B zp1Qn54{GL*o5}4%qW750A?p_`wuH8q^+%AZM;|}yDhS+(~P1LB?1?yHcN$HcCMFy+%t;p6OiOxVmE8;`omrE8IA>J2# zEWMQ}#Xa`e$*gl<9C4Wjenfc#Y%{ga=BOZ=H15~*T*a|b(u(U^|p z@MSfC8(QKzaV5RN)4HC?n1;lJTwR>(XEZfxBu>aH!C4rx-|8@Q(Y&1_-_UT4E6tlI z_9-&D#9yBty>(^DPj;y$-e8^I3Q_l=O#pc8bxm#sj3c3uW%ldW?;vC1G_CYg(ccbO znijR(m|CN2_mANBV-;#4C5p0mCt{TXG#z1kbp( z+#czto`p{g?bRPlHg`9(2l31Y+@!)GiGJqZ@AqMYd*t?&tp;K?mF_ST<&K2iS1ijW z!I^7T%dsk8kC;9}3^i~bh)SwNjQx*1+E9KwB`(dA`UjxF2CBb2p`dQ11oAhYCq9JF zQ4d1_P`5LYr;DVwRin3Gh|nGQLtk~hi2fUl|g+pPCq8PBiq-@?{kwR?XEqh)D>46IO{kbf0~)j-|*HPDUZwDgCbr0P{e2nPh{HPW8{>(CgTma z)6Y7^lY6m`8H#*$wT0JN>>kSV&$FCuXYxP1!KcK!T*4A{XEQuXX^ZDi-~gECpB{;~ z9G3Xe_g+V2&UW6Ou8NNR>Y_4o+LC{NQ)IYqhKk_97nTi@>Ds<;jAdW!l{D(aS|jh) zbLQJnRj_6x5#tkGav0&qm)j#LFh`-LZ)au?gHx!~Z9~&5!g47{qjB2==$LATSBRrJ z+j2j>&IP}U@3`!wmiZZQ*gyOFPa-5*IHvjEl$Q6HeE#3@&i_|MqZd?5fAcN>m%Z2i z2blH$bX$q=RNZfv9Vjnu5Wxd7(agH91pevO771uP!y}Rdgp+j;MnwO+R%Nw`*rR3@ z|87+_u)Z{@y0vpE-7^~tSED`Yqc|gh}@Hg%c z!tKy!U~kQdKLw%trfgSb@Woc(2X0%@4!B6&Bxm37ytf}f=^J1t0(}D9_wHKR8~ut< z`XMNw*_GfQ8dL1hn{BA->U1C0I$>3VCeW%|>O#aWmXLw$0jV%I(GYX)=hRynf71kk zJH*6#tlb&!MhBcJC9a#PsYv0aK6+_P4ATQvAh9_xYr18~nD>ovls9q?HBtz6Cquc@NPA4-RfK z77O0mszp^btAZvjWfN|7bqii0f60)xKJDrm7BFkXcKnU7Yj_m+^n3}!8XvyL@1VgP z${k2~ZMa>p9<=)5h}|s~|6Fy*BXcK?nuyZJ=O}fin(_J~_4u`zAcK#j0$N&a%mckR z_4t!rO?CEmce!RoDEinRjm+Oj5{|@+WPGGmFbuji!`$6HU@*gW$>1_ozVrd5L4ex} zopTB1yj!}#qpRzv$*L1yb_?9O^LNra-}u4r5>QrX5wk_F9+Oe39uatqLvrqeLz1-9 zzakf4po#0DXlc%~Jk8*qREsN_{y52Df{P6Ikl%@1QiS3`AJ`CPiXXTZ_>KY%&0QW8 zYE{4YxF*(;J3x!|(CI3yCxwf08vo1N&&L{+0!fj{0dd!Dr8PLnv!8`c!Hc>#WBt5Z zNfxk+?c|AXNb1@W6%i7}Of?kiGn6Hfn4pDzUd!l6>8R9=omN@Gzt0*M0)7tjW^yx4 zwqGye%35XlFvV{!Y*_&@Vj%_qM!@=GqAc^%jZt+p ze(=YRYb3w+V}Ik%n`3TfDEez9b45R({Z@lH`N2+?qP5@QeT@Pbome{VtIP{ z&1B0inds|4>`bCH%Z%}BAtxmGKh~V{214Xy)Rck4ncQ{Q;ow)Ci%V7a1+kKk>iwGG zhBfg`YTO#irS&WY4r zd310Hu8_Kt{{}u1*xw&Bv!OZJp)CLW>3B843)(&}6l#9z3w|p!BBO;DQ$8W^IqxbR z^>5PMIyE*oVd06Dx@=P8P7AjAoA+O46h$03tOpxnZQKcJy;iz4L_nZ;E_Z!Bc#+YH znDY6%gatI%d4;fcVe6r3zd;`}j_L}ii`B+?-$RLi(xMFW4k52_jrOT8E`HVbm34k3 zW*}U;dbw@s++XhoIX@ZxfgjEMhpjw{Rr;W3bM8k<$9Fh?Cd4GL^&jwEHI$ootrXag zX0-hNjY|W$HZ1g9^`@6NN5g3vVY7BE?8%*|y(~U$Mp0h;R?N_tokHtB#lyK;v25cU zIg<>-=TZ_KpM4SupUiYWZ>zc8ouP%8y!FxVNA3%Otv!VP-503ERtp!Ql%W!4q9Ds) zI6;`A!9xt-Yl-L0-T?wGFpz`vW5bK1^R^1QHF0&5lo++=>i5^gSLQ>%uE~4QJuWY- zeYjHYRJD|Tv57pYz$U`oON6g{U2*L2D%U!U^<<*37Cx5##-zs*qO>t6hTZlcd&_7^ zzskgi8Qe=Q*rdhG}z5DtW2N^E-&-u;Mv&H1M<{0M}xmT@i@ayWltFR1-JFm^%7LBI;HK|ce zhFmDv7FMh-f8rnv*2>v^;FSwyed!xTw#3Q}7(%03gG!w}+VHUW@vF@RzPdho_LOUk zjc0XY^(?7H)2*0aYHG56yqyFER6LnK4k+0TbC%` zeH9i~gtt$5nj@P0aXQxiAvnsUf_mSWczUK!ue>&{qeoFvCo*L7M${#3=2;2ektssx zZ`(5Ia+{UEoKZMegf*obQay2g%J8;y=O=Ad>L9Nn-FNM1Zxao(8m2gQfh1k;+Kts& z#ZA&syL%(i50Q%CgCq^9?e>{U8~(;eJiW>yts8q)q1wO9u95RuIH(^eDQF5KT8}Um zolscWpQ{m~_fACfe@kjxQUd+*)2NYwiKThMX@4sR@!d{IO*0_da3;AcTjFbv0ZQS8 z^>b>_$>3g^xsT^ie9|A!lcLR7G4ib=60zBoTI=@ous?anPW>W$vpejb?fJ1Dw*3>o zkJiZz77DeJ1ke10XHLOd*+xh%4PqGKEFr;`Sy?Gt-L zNwQIBn!JV0KxVMGo)Hb7el`8IQq*a5HqN<>Y6wiJvY)1?uz=@2)0}-~mkJO~Y4eWL zEqGOLF{EV*yzujNrsQRThE*AtjrgWlA#8b4A(CfAcF(lg0(z@lKs=>_&q-{xLIdb6 zV8Bdx&*pk7XG$e?eeQUT-Lzv1G{onH53_ABZ-S0siYj)4)d8@7th5ISL2@LlN6 z26|8;y+OSj^WEZlDTmeTYHN>zcW1PbCDI$ufb#5U$Ay1ao%3LT_p4=2>H)>5;_%Wr zpVXw^GxOCB$xHJGDWM-?eP)yzb4p`phDVchDOQ)0oLdrIO$$Y#lEbHJcs_eqoLrFB zjc01o?&Bl}e`YB@qC#lnib!%|D{^D;bo+C;bU#ZS)S|&ULX#m$`m_B_6bjQDl8JLa zyz)krmw%mES&UZ@*jycOS{Mo|ji}f`9QWoSwU@vLbrBT<$j&WUUUIgVYUzdnkA}~8 z?Kv=@Q&>%b9Km)kwj~#|=l&4vnkm&e@hZMSl8>~J*X|D09iA%--Q^3+rpecStS1Rq zi^eoYfAI9!u#RT$eQ+;rME`r|bR4$}1q_-&EA)Uf+7rF0&z>;vX7OvCj~CjpA3A-{ z_+f|!L%+5c=@UQcsqGI2A$oKePC+OqlMGBL7>vVo`<2VnO6IQ8XOCr<8V5KT(Yuvg z$-Kmig~^Uv{HXOO@fbs)p>&l<2VbLjJZnx=_VZe}ip1RdN7;4hzo-6ob zDYNm`ay^Z3uAx)Z+M}bVzErgZ#BO#6=%EqIK(6{X(!AQU{~kZ6n>V7kF0O8DI@j2| z7jGl*WZOjaDjDlOK9!E zAKQR)=?#)>kznhF@+n|x2>!tja`fD*J_?aU=dC-bsPjO|zS@H{UsZb99nUo%TVMeJ zgGdl#T`2C#*3ZCcteUde!E;ypMorJ={DHL5g)Ss`i$q8E*k>@%!#c#3AA4&sMdD|2 zy$ZXRYQt$oR=fYnJ=Zj!Tv7Ub1defepHD_|Dm8TDM2}@_Ht1eKJm3KfMPzVp5*f>t+)W$&yy zF8o?Z+1yPEu-{;M0fY@Ofe6<+w{iPy;DGCXxT1R|A2YJzfh9Z|jKn~FIThI$&CYuU z42=-SY|`Mg<;A%lU^cmfoGUSN42lRi13cPfM-9kU%I!(8E4EI#B6-jUMhTFM&d8jj zte;7604S+>$<2!q^ps3r3n&43QPHdXIxURIMC+XJa9v3@W?lb8VfMD!M4SQ6k>QbP zq_3Kynr61^QtrdGt{I7LRdzd8phyyW_PiB{8GD1bCGrr(ue#<86|-0rsnSc&@2%sA zS6VqSR!r45-F&z08Rc@A`Df#qs+bSz(MKP8oNp05=4xfrO zR($)lapz~c1^Wz+R-#?wg@IwHSFF=v{rjj3?PP=fw|;Jpq*N2RGO%?N34|9@wY#panhxAz&+~BA0*SrC&Eq`2!f;(e-{@cbaK$pj%w@z7O zwYTGf`F1cxVyiXmsvOH%`b31={Y&a7mdhO%+&_!T)@`&lrbFT^=Mhwc~AaO+HvS4+I7hnBSD*FEguU@+U`rc~Z_ zs?ahNpJU`boG(E-ofjpuGBwe=L@RSq57lNfvq$s34cV)PIspBmoGKHpd?ZZD zlkxbSYjG%~DgqO(UbJ(vZ*_H8XQ>mc6xHj0bXK05)n3>|Thv34`c02l@*@RX;T8V( z$0FkQR0Y;e`r{F=8I20nx+qrWn>db;rIB=DJFeWqD{;^Tb4)L5LixUBs2S@!NP_W> zM%=ZNN@5b3QMHNBRsVK$cnsMdf3j|GU;X&}UG5R+3Ud;_oNZ@bGOR}Pai zhq;1sIDTT))shdgn@d}RGZwH%6G-Y3jx~?r)4X6FX{}9O1A^GQw$1RInpf!>m!yl( zSb(OuF{10!m%=PQhpzuB`XP>2GI@a49JL*GjhE{w_ibG-@=C<9?)JBuZZQL74W;wy zbTjm|v-MgMb@)N<$YrC%3%O_xY@gB3R!*2-Ob+y5-u;%9Y00UIT37LQ*F*Vq<(ssV zg*pe9*-%)pMMpJySX20U{oGP2l^AQ-&}2d~d(nR|JhB3)hxwl$%DQNZH6aXuAvNQ0koX)WM44WoA!HRYXKf0<i>vul55f1Cq+MKz4>TkeTF4TM9m*vN`B@jE@FGq zbi+MR|IE$K9&}++#M#+DX^m!nG=4Dr<|I+`yZYr2?UQqlM&+^0HUbi@j*ZBqz#@o> zc#^@AuIzys38@^hQj;50b~=)>_+3K%<=_va{EDN=t=lQoVbpo-xiOF%aQRA$X^YsC zYl27Eu7eSEtcQuI$|aKRaKNU@B2&M!s61#LZue_(0dChC>4epXq`2FVHJz?Hk+0xt!xNirh&HjB8i_r)ehA_wz@d!5+PE zq&mfIUK|o7l<<2H7$)c2NJDCxyEL<`!_36;ID7vKDG?=lZNW>>a=}!ytJJL(*!Q0s z(iK!*r5$@+Oifx=%Vn&aC-9%h;n&ej8sU2?S{cJ2gu`(#9Lr%b^=21qJz$G~+H|-B zv=9d`HR{X$Z3TajHQk3ZJ4Ktlqr|o^)9?4oK8{BULt+Y_HUJ&=vhdmC=z7BIT-hIj zD|0|}$o;_M`iqnw0Y3=u_U(6G#&+uI!XSJYIA~s%&s}cdX18xR_L*}Le)72I*sEy9 z3kMrm33K1K3|7Y)$bn>+)(B^ij~$qrd%e(X^|dW$k<{LdIw)yXc=yJKDCIe6Mh7r6 zfnjGx9!?qdydY+ANd8`x@of}bC2L->FgsdFswk=JZDwXxH=JlerlZbi@aD&$P+O+h zxA8C5#7j5?RCTgSXLPp(1;$;rw=bUAe)ey@b##rE(;f8CQUK;i6e!dVQAyoXy4}SB0xiKLtQ|f+9Y4G_#v0)1ryF3socWh+hlWRf z4BlKI(Hk)AZ?*dB_t&24|N6C{b&zvQ2brT5V>jzx@D_H~v&632Z3)68jO<<-W`f{H zu+XVHhM7v+T+%FnMsM7`PP->yvgqX?$nCI!%WS(hAi3gm@ z_ERLVdY>on6S5GsfjF?#&PLESwk;3+67$HH!it+RIL7H+IzhnQyaf#87Q;4vw|!=T zVY%pf6~sP>CpT%Vs~77qsez>hVjCTnB%n;45j${l>A9wRrrc`6!(Dj?LVBNwdtw$K zr2@Rs!%LA37~`AtHrx6{)PaN8mFs)4^o#A6xgkYnr1|FK%GwPjIh>_mxXD6JDsnp` zYn3Rz7#2DIu%a-t-D)aoU24#o42GG`-ZOs}_DEVZgFzgA5%{tbE1Ury6ExAq=uiixr4#xREI#Y#@_`T1N69m3*JWqB&cr3LKovTj^nSo`Af4-4qH+76& z1HtH45ElbBm;x}xq&kFsJ<-<##wmQk+JX?BM#?DR6``wL^7vryM}S~YRUjL35v7kV zB~)Myi0I5FCkU`FcJ@%sr8Kff-{d*u{t@WjqpviL#d!4G^Lm(Dj;;Nls_B-Rp zZ1G#wvydx{adYmt_{llGDPQ0u)yH6TGjc;sJ}PjDeE$SKf? z>vpq#Ecg*7?>-WNxQhTJ-T7>In&Y7 z0;IZGCDq`>Q;)5$g_Xf&wnrVQ z3YN7WQ720#r^hjg`ZW^_7mqrMCf^{&!1-5~D6TYf^Rv!q!W zt^{xHWD}pB=1L^t!m8FrG;cZA;-eh8d!bu%J@~gGQeW@33&jt){|GY-gF;|MNTx5Y)cJi+*9*FaapNYf0JB&2Mjk{vP+ zawwwgJ&rA9hU~Jlx9m;I2-z!JM#ix@#<6~nSGqp$_xJbx-oAhQer~sJ*M)PO*Ll94 z&&Rlr$6>981%hu@zLzi|6bXV!*eo{HIbeBomgA13e>s4DjZAn!t)kdDU+}E3QxAVn z(bcAoeIO8y5+IL+1q~9%3g5N%sHkYsYe?(T8jF5c{R_y278{X6bMK!wVxaxG>YoqC z_N)#FOYO{~IJNhslxa*>H)~{ycX&0~urQskar7dO@U!~RboJfodBW$$ZBwu)(A|4> zydryfgkZzPf=qfp;vEnR2+Fb}%y*msaD~&rJIL=jG0R5p9}PF+-gUz6*v)x+-w?1p znn3IS6}h0?+=N=0c)-Ybt;_bWe*+-SY_dxSAt~}KnU-tnje$l|AZeA}0YW2%+VNng zm6}@Uz2DrDv^*Nkm{PpGYs5TI#1(IACpXm5|4yet_Qu8^2=LFguyTU8MmRjVT|D|v^+wH>+2E*o zRmJm!pGz}F^?(Z{xt)N7G=_ zpe=#?H%|ipJd^$+#hCvY5c9~Y61kqebnL>Ga^K)o#X9a_ZaE_x#~xm(pv+YLO6BAv z&l`)7@)X$lA8?*m9aq{CkhOT3QtRbQKmZsORZ1(b=}z}Y_V$FG;qeztbjBD|tF_DQ zSHA6Jw=f_DRFZRQW*TDZTZFE^bDr0Cv=}2({I6MV)=<*x{btMAWhLz)E;umVCWjr% zxV4FAxKAEcT^ev1Hf%NZ7gSJDS}3WO{`avXTfd+B-bC#JEoVVV$vWN4(%#xe3J^ue zQx>!3v6OKJ0@sw753ZrJhDw72r7Ra&y86b_n64A}bMh*bMF0zFzB(yvKI&A7T(KRa zaZ(iHXf)iXXUHZFpOh+H`P1P2Iak>)p${UguA0&#_%cQ3a`xq~>n>VDtL^tnxb%5V zFf%56H~vJUuvdG5vz6tFfo9oT0*m*=2Bgl>3A@72z626Y`xS&DXORBjE+x=$ z@$SrI`C9w;7H5n+fJR9PA2jHPJ*eos3+%_Q?;UL#*@rjQ07|V(-L~hTU^c%X?+*3i zC&is70D}Js)ZayT(53M_zat}>uI&Vryn+B+Hl=rb#N^z|=Kabl3E8)n!PTTlh%hHt zt0Sb^UrLGXXtkpPWb6D(t-Cbpl z`GpSU!2j^mNG+T{%dcJB!?0NTQ6g&1;f?qkpL}o1TTB9^fa^9I)+&bqv3@tDy0%G~ zc|0?t9tvC_tBDDWhE_>V*L$R_USlYe@7~2S9WUHe)Rp=59iI{B_e%xI8!Iltd+z{W zYa!ntFk3sOH?-?|xmeJu*d%U$H9)x04X-t7vBpJ{ikMwHwF^~{UIHrR)oRFuKaC5a zVMq|Ta~G}zO6J;z>puBbvkT2n&-51u73@Qi4G$Tg*GsV8ZE}Ia7+Q{ygV9i^^cf8; zi8g*OR;M>yQJ@Vjzf-INeaIOTX{63&)#vCnwWoU9Ap2^g5c{_KLPS0d{AuozQLLo2 zjA^@dDhJ%0({ln%8EXr(T^BpO@LV6U9N46*P~_d$sAXKq^*&?LC-do(9MP+2xin*+ zu1EzSk%sn9`Crg(?0f+=lK!jeJC~et&6@>oN4lFe>QTJZvR0qY|J3l?FC&+f!C)Ag zT`A|TRusuk7ncr_KV?GCL2Gd(;S_oxzC!wG3Pv}y=d+U1#E!2G-;;w#o4);MgMkCEdkDaa` zImWQV01E>@x22M>620u@7a+5qibkJ0t0(&n>fbZ`q)1`Jq2* zfX=8+%>?=r0m9X>5)(m=6+*se6&A`q;t8R?C#BodoKhVbGnw66;Wo@%KHEQ5WK!HS zK6N=qyT7TrL~s;-tO;i#t2Vbyc3gEu=$xban}h}n^rDCybdB}z{OZ>)D+YdCiQz5p zmuy(6D?K_t5Qvqhk>KGEo@NzWt*A>Mo1c4!`#yX?4-uTdFc;>$sJ>ns4V@Uy47Iae z(0h?7Yut{z@7s9Jl`(ee8PHdn#ic*qZ9dBl`Q#@robaV5&L|xEiN@)!9xf5e+wDKE zIOxm;c%6VCa<6!3m*s;APki(WH(%RsD9gy6JEORhz=ZMbVUt=`p0#y2OY&>w&}V5m z?w_yP9$;u)^%++2Mg6|@2w*TMJl&GwFDY**03h^Q_5n?89T&iYPC_wwaN+S(Pzq2Qk-&m(!8bxdzLWIcFbWqT9WjcxIt508&XsJE9ekpUv+4@alG zvWh1h)H2U*M`eDgyQA@f|FG-1IC3G(T^R6a1Q;HP2Kb$x@Z#vvK0Xl@n1x+}@} zw$2{4m;-@ne|goh=g!B=OP|Eegtr7*)c|5&Kbb>%-=ODX2EUY6E{PLfjDSE)wBZ>v z$r3TesP5dnUFW+*hh{3gwy}liS^-6oTo2``*3uFW% z&W6zy3n0gsCDO0sSN#xmvoE7(rQ5$C0PWI7XeEZnMw8l3kM!&xfn_khP3%zv;pD-d z*ssoOdz@_JMYU^~jd@fFyU`_x^ziFnKMyXX{bmt%sGV?+N9QzZurG;c+UjEJ4Bb~t zeBLmLh7@F*KUp`me0C|`<6WPz*JkWyI)2m zflI}`d6$P4J9C*mWr{rK^4YXlL_hZ94exdZMEY@3_BQdh_YwfG)C7KuO`B8X_kb=m zK}Vb-j<*`nUqF|LEuVeZEpW4H37%8ERh}mcFqbxnNK!229%TX6#5$84S+Rp;bSo}& zcKN%^P2t*)ics5P97vz7#D;U(z1jPC7rfI%k2ZkBy3(rh5vLFoRiy{}Le@>oR_lfQar_wJF-@Lf6XvYW@4b?~$q? z3oSSX8br6RRoa7Zbcp`=O-d@zQRqJ+ma62sG*{m04`= z8J5>qCV3&Z?dETB0((XPUA4gGi|3t_e}rmGZIjK$dIt@}2yw%UxlRXV8=YgNEv-?K0zw>r+CkUqQLni1i5>%u_n|O`F-0U&T9P z@1|N-=rolH8L?@`-IzS0%p1ytpo^OZaS|A84ZlIs-U0^FP?jvII?_25vDOF9! z!^aNhJ|(Ha=S{QgvA)0K0r%;_b}m~zt}r2xFP(S&UL_GTjh}T(B=knMEf=<#BG%FI z#HNRJUu7$3wLXv^h-13<;Hlbmkr{P$Lv!#F9|VYN)E6CxAS=Qa5|^5DEIo5+;L`Kr zo)kZe9=&T&1knRivwJnjD&c|AQxGq97`|F2{n<3``bf#L#CX=s; z+-M%#>fM6Atp+jXqJ3ZAiy14w!lJjvHIB%J($)Y}v&s#^6{SJfRzBRDRmzul2(Pem z(Oi)JSg`;Q-Us~S*Lt7nzTy`K55%IfELOC+nwpv*b%+A-QZ|j2)}QU0`I&YMRv1Bo zfN(p*J1{cO*QG4?-Ol0klf>*CaR4fAw}hJ2XdFGwtNu-$f^TG&Q` zi`8`L$nMrewQ^U4I_%|SS%;I56CXzik~}#+8W|MwHBKs!;Wt2s&q1sc{ciIJ0bI+I zDGAHCYn+*$8m$OWMBSYu**rWnhzi=cMw4;rQXfdE#082QqVHal3rH|7=e}*-&XzFQ!9m)Xkd#Els5lW5G%e>~e)6RM zg{B8_V`FRa$x7r)ZjM##%+9s$CN&IDh6^LBK>74qovCC1eUIsC$;0dy@BR=YAqL3*obJ>vCz;&VE?+ zskPf?PlSP;y-K_%+N9g?&d;MxAzAI^S;$}ci|E`dUO1KWA2$(RJBPS*#OA;^gG8d3EWtJ&B3#SukD;ehkD#rncu*h zdDy0jYySB7NC{77x#JJmvELvE7=)jN&1rA|$W<77vr^$6{DmQ^UUa@+=|C8#7KSac zsvHp5ZK57HF->lRFH?j*ZF4>>w{c#zfZ7k@R~-bhrm1Klo%D(k5cQPx@!5tzq>Z}S z%meRks}L8wlOa)BC9p8Cot8~w54+IJ?Be$Ima$LA$S8Z!INO#w>5>=uTr-!3FhYxx z!`vy8`cES`F;R{~O?lFL)*x7MP1s9GYz^;YjHe<#1o9Ib`^MZ&Y%`AO)88xmWI64h zt{-#qpES}}G)=*7!se>c~n%QxN^p0Ph8;?L1e9Xtf z=fF4<37Hk_(K8Rh!#q_ViXLr6zn%=IxLG{W$mj0a@SHX05nS>3 zi{jdFYy|)DxUyt*+1$7nxA9+%_OE#@G`1W;0gU-ICo7h>-@8OhK7xS|xb`~N1QWw_ zc6vnFToPPQ-2iWY^8N}F9o@s&D)I<7Dbx^6Y*30dH&;&>k=EjwI}j2$fZszeP`7R~H9O7Z$pLxs<-7E<)~r^xicN~z z2R7Ehsks`_oAo6T4Odo+SU{dDTB)66=-1joDx&jK z=U-v_lI7i^peBjp+e@N0Q|WzfEmJ67lUYwK4i)I@AJ@-axR5aR>XTzvJ=MsN>as@& zNy_pE{NA}m1e`y(C6t9xe8RACd1Wa%uDGSXK+P5y@}St)9cA91u7=6s2M^oY253a& zrzEKEzL>d>;hR;rh)+EZ@wa(hJ$q(>j^FqI!k4w_7Mz~mQH4<)#}+Rx)??Z5X{?Q? z+L8=T0hR06(la`QEU|h+T-{*N)w4T>iq4x?;<2!|dEUzf#MXZ;*pvsJuxI^0MDO>5 zvllD;;QMq;XMu;k;erij$24o0tu?F%y-$9sYosBiLGBpk6h+@~MD!A8R&L0SQt!vr zONpzJQ+_fUuSctcJo2XtENx9^Ef6$;Vq zjL?J^uPC%}NN8mxCoj}YXb^hSGT1k^e6U#K^X{A=?|68@<&hwTU?`3HcClXgH&XYpg|@Xs)&2E%9+zYX0^ng4bm)KbY*xeIsRkY zq8$U}z3h=!whi=zu1O)JkdI{-bbl^OWNK6qJaa`5ok%FK8kBvV@q-fN@Ui|%W(XZzOEpnADMmB-`$hs zi~mCTRMc-Mta(2i=qC(h3P7j??x%R^xNCx=y)|_FA}w`V{@D5c9JAG0c1-gbF|9XN zOFqS;>Yku?vkUQmG-+_4Zd{%E`S_omsq>|$C~>>V^0{rI*yX$2>WjinA=1z!IB+qH zJX;rq5K^We`S4*`(`+6{Fp-9@VL>g!5egOku>IeQ6+B9obna;)1h+=R8kwz;OajH4 z8&ScvK*-uwyegtnLNgU+AHCF>h_I#v3b(iEwi^JN!yJF>QeGtSaq}ob$)|^?eE=F7 zKWxkHXi|P22=*^Jq@?xm;~Cr0%I^bJavH-r)ed^6kq74ycA4i6TqiCUWs-AxTSnLO zVAiYM#A4fd^T(z8eQ2+GL&mpK|%j z=a7^QUw(rK6yW0Q3^dMhFZ>LEmobnfc1^9>)EQh%#>~I>g14*XF6S=Z%_7OpaSp>j z_c#p}9NNKj3vrgxfpAVrYJV#p)5OYT_3Qc%7+(Lt%{mwS=II5^Z(u{gt=Np#r zQX*0DJ*B@j(!JZ>83{MtM{fTg6ABlrTEuDJq{o`YZO4AGSymDg+tzF^HBCi6Mk?)T zK9f`^Z7sH!s`3MF%1A(AX9rnxTdZ-QM=c^D2TkKy6BzqcssrIoQ#MJEqca|SQ2Y5; z;A#Qz&BM;82}th=QiJ{mL2=-&fEZiA^_@9&Er(7TG?!h zdTY0`+2$Q-hHThp+eMG>zLkQQYBS~RI-Pv=3;Z3>`v7{sT-G9$@UzA3c?7%9d2izf zOO%u8B#Dt%J1Z`)xYR-|rHqw6vg)DvRAYgT()eKZgPqYpmHJw}Yq_!a^jGRj9Kfr)Q!3vEwmJ8nS^hzd<@*lk6hY z6kq=Ge2Kkjpr*(xTKZ-gAbdP_5HQnY{$G~oNNpJPe%vPb(qt__COjp-Rcx4{elVGq z^ZsMuDT3wpcH-M6lk8eW!}5#Ys?kq-1oi0_cHhm|i!%wfmkXq`^*(EBIPh)MDRhbY zFHH$(F1OdW^pUNeGPVsnee1^bZRgF$dy&({i>(bMo(#&?X{ZMEhUMOYg_7#&dvptp4p~T{?|SN07u+^sfddVCEK z$RyO(uisHg;g-ZyP8l4)50E}8z+I5F5i))E%d_uGxmyzQ3|M0U&v^enJ7Stwh`U4A zbz2NfS0ucz-Lf7y8Zkhh&?`92fO1OuRM9UiFRgF97vmMxbBGLoli}IBl2Y@KqzmLl|B+)wD*c!UirHowBp$b-gk9@s)m?4p@(tj&GY(- zDGA3t?JdVOP z*Wjisy{HI)X3{4=TN?5fZ-`6xgk?;XHcgDP-os$r`N};W-3W5Hm+pA~PRCBy?-X#0 zzCU)!sgVmI>%jTe*asnX!Zu;u39K{dw5E%ma;$XF+nE}&fr@MliS~6y4?SH6J6hYk zLa{clh{|yi%&H2Sm?M!k^R`m%`FQtXuXzd&6Weq;VwGeQNjqo3k>Mz;xwEyd5N4|S z@>-bA!7ql){ju=f*HRZVj8%)C@rSW~@Xx7YZ9obOuN`IP{N;hnm$~{4&dPj6deaFN ztFISZd}WhIQ};I@^1q|)(Au;^bZgEJIARt%wm`xvnq~aMjap4wUc|;=i~h)3rjqLgGo>gJLLZ9X3TM*jd8=AC=hcrl zjs5QUwLCQ}P7wLBP=>oQ{DDky1o>++0vi(_y*p;QAJE)#ffDfCp5ordCfkGSKccv6 z(tc#m#m#B^-HZ+#v8ucSpJKB1j1u{X8jv?rUF;fr-vkxn1gHw9s(s~Rzkza(m+Md(zNveGmJ;u(7)R^Rc@OSx zgzmZrR0dnacXeR?S?`{#BhHu)$-r>f;@2bBO7^M;za$GJ zceWp8r@JOnrq;Qh+Bm)5Vh-RZjEPhz7H~I1O_O}!Fy+(x@zj6EVdU6>jqytRXQ2{R zV-chAv%Alq6EQq2|1=m4cJm6?ADL*2%Z00Rw&fWVYzJE|tD_3e``!U0=1s#({!G@_ zqj_>$*OQL{&3HgMyf}B&#OHr5Is0gG&8G`fH=U#n^PfMZjrZKYu)i>%4Z1`AkIob8 zlden=39S)2ksy7g>>4{XIuDl=bdJG?Jh0A*H3hR(QjEEIkyVsmLq%mp255G7TdVx3 z&o>z1#Ei~PRXuvt?f9=Cugv_fHpcH?ao-&(&^@5H2J zq%we7VVjOTkm=t)47*5=9NeV~SO%7wH)~|9a*T z(IuBJa#oirH^-oCs8!4A%~ywNW!AwX%MOm4T~7boX#&T9#u%hP63SwupwaZni`YHRdJ*)r4kj=%eBe?sn^Us zFAM+a!rfpJki%{Fh6wUqm*bo)xrY{~l&=Q#86<7fGk`FflNUo_4v_N7E0=6e8!H!p zrigGfNjrS$)@L3|%h=*?8A_=@Wx^iX1z<7*WNEb}(;5f53O@*PJ@zPHm1w(%IZmbJ zL?v?+XNHp>;br4CD|7e|tUqkqz@l$4$F>tET47Vnc_^n#6@|-|h~lFo_zZHWT^$Q8 zO(8JOognxlkesDD;@*KOTidHS@g8Pdi3ZP^3KvxG*P&sgvtI%d4|s z#3MExGS)hIat19O@_<_(lZEd+*_+yvLRhzvBTiT(Y?g?5}&wj{L1V zcSk5*V*YT`vM#fEOJB|3jS%I-?zs)!CU9OSb*Mz#u`XT2gNQTW#Oz36jhlk58l|FE zwpZW%wBgFg!n5pcc7>8{)#rE@$%jY*OTUf#cpS4Mt*9~vTq3U}Uivf@tvB42?SZa# zyrCDI6e!;3ij29g+4hZjM#O+-pm6P5%`tIGn)y{jtfy(Kn%VLfa`OvF z-*~4PV{Q=)oxx?Uo^Y$3U=~I(lJJAV7qJdH{f1}e43kNY*l}TscuN~4`r;D_^AWe; z+}bTLHGs#}Zr9tqmatuO3d(c7y=tyB}f9cMUWF>c547iGT6 zYUXUDAvGKyPAjf11~bO-<@AgtqD^~nN}vDy(vb>6*}rsIO_?;e08oKnv?L-K{R#k9 z2o`U?*_<53?q?TFo3jYzMT%`;FOJjC7uE%W?2#|buhxIoicdwAcBTrZ#o)x`7h3l{ zE8bZYU=V?jCiry|cra9`Vvf@{`64hgTkQGFQiMz!hcFht zQ6Z?Ev&(b=#;a$wlP|1EuTaH+4XaOcqm&AhF~oE(;d|j6THOYIkEzNqmM-X3G~tm6 zBzk~%NxSDY{jj?TO?68x;@#Va_oXCDolvx~f+T~*3-U6+E+QP2ww9d~S7jP(n3*n~ zhsDAV%oh}kDjDd{e?)T(j0P7a7%iO&R^*C3_ui}T!fX`ib&5vq5<83fQ z-W&upTNp&6csE9wc3sAn;`gMd5RxNkn$@Q4`fN{V&&p&}fpMe^nnx;o;Cp|**7MGi z>8b4`|ESGzoEU#GzbfcF>N>NT1dOc{z#)=KCac zZ1<@WU z^C(+3VbCGX+qGMj(X*IEDTnc15J@zu2Jk7P_u`<(s)p2p0$%nSIzLgz#`a8Cd6*P7 z^4K>rt``-G(FNxYbCE?a-_z@Bn5;2&NCkN1A87~(IrJq(3ogLZL8)6Y*D%X7 zeM@3%ICio+Nq(_YC?qB_XspO+gz_#oC1}OJf&UD>zZ?ufp-JxCOryD=^B-UY9ZXJ$0IebbB@N6cq23{Je$)PW5qwsUXbDm3crJdMIhfVKj7TUTTT}$E z2mzgkI1S1zgulGo5>6l3^x;8G;6yEv*%5y@OqB3k4`ey`r8a7j81W{@-D9srBzZqT31 zY4>W+xzL{j8dR8k(rnz-#bmBg7!B_cm{C$`Q4Zwtia4GDYg{Ko;Lpu5n>)P%{1^yY z5T=)((0VyFOJW`WDJWz0;VTNz@#}2oJ#N8U^wACDaIo=|hY_Gh8;j#X0Y)5OmbTW| z;5k`f2I{QuJFPIc1<+nD_s90rJ?QPL3O-(tCh0+W-Q%l(o0H;y$(*)^Hld;|Ag`RD zh8qg~U-R^DaAg3)H9XsR%5)&$(GulpKWrx?JI);Gaq|a1rx(!}P!W9^B1-UhhuX`s z-3u+Er;)Bn5nY|m9acFtc&)rbE^QvrC4a{-cO(ZD@_kF-zfJxN&YTSPvcN4RhA|7A_ZdmVxEa7fMTP7&Nx;M6D0JE62VG z`e};bf=SXN7cMI5_qCG3eoEZldsucsN)A1PJ{5u_FYw|cE_i9tT){CBBU1Z?5gOYC zl#xJ_^%cjIh(mUl)l_cj?odzRIaj^d(hJZ~Km+|KVt*}`h1YpL(LjA<nuOP7>h=Q^M@#;^Rl+9kJW*PAW10#X!oG%x$ulEOm#mDE3JW*I~ zXy|L0Z8$~n#shahl<~1H5MQbvckJd;Wo0=EGf93`0M!+!7~=(o4-sS`%*#3vFh=ZwqV z>a)wb!8IbV-?fX3cR^kDS!c&Yd2k!b0_CxKZRX(7z$?R7gF5|zSeD%*Qtdux=w-BT6+MGEm7U(LUrAxN&@ zyW`_H;H&t=tl)_zH*bmWZnq4O8^8PoT4aqM`&!&PG%P3+;zB~0OMpvojqnXOYy8bm zx6@sc0+O^@qApNhhm3m(Cv?P7PJvi1e(V)}awV^5?pymFt{xbcRG<+HfgO_GwAHsC z6i{{nXVEKIzc9FwI*`07Ne!FHzk4L5?ZeIDnly8{=il4i#;+4JoG_>^a<+&Bv(z#a z6ZZ^QM%r0&YP-09r3;r~v7Os08*RYx{yKMGc|;?yH_+s(kO1x<%qRqcPk<5jl%365 z8>oL}r6wc?snDvGh-8kK7oSsV`QCu@&NUC+?hvMi zv^um?EVK$o0-dI8C}_!j+yY6zH{a}0(h71fZdyVw;y<&Dm~^4F!e$CDqx${{{Amh1 zCo!?t<)wK;u!RBoRN{ec%BO_~jp!ZgJ{ve2lrS0U;kUAk{oZm-ELu7;uZS|eO|Kr-*WrwZ_2c`z=%b`J>zQ`RETDw_^f2|dTj&FQT{49w zYOeFMo(quk!x`zXHQn(A4@umiLH4__$2BP9-@*&aVkW&khCZx7Q4SvObbVlU=ZGl( z(#^ZY<&np-KO8c)<1b|V`bp1i+hm+uawhxO@*sUS`)K3H`jJ3YPkTYoD0N)SVUNJx zp&>cPmsDs%edG&}8qae>kj@H%VLPY_y6%`2T%IKgSzm&zGCXD^0&3dSw5rK&ZmNg< zHU$IaF<`-ek+{?1KF-w!uc#a^S?*yVsFvxmSr*NHkS=3KoI6Iqe zFB+W6VDDw_yhrG0wm}(xh89U^smyKMO9W&CGw?!|zooH&>Htyk35KW!+bmelRD)-SXE+ktiH(0%>AU}(1GviVF@>?AFKLJ-rp6atp@Azz7oszj@`}7O4ED0uL144@Bw&HjOv>jy{SJ zJV^@?2%Kw){CXTXiKo@vc1|4{V8i>)a|anWA^eNJC%BrJy&|{--@zJWl6b>Z_B=CC z)~%rSP{L>@bqm|GX=bVj3; zvGy+K9$(}IS<@*lE}L+ka`pH<3AYDXyZwIjg;Y}xn7=H&E92)3>W{aoYRl39F1cws zI47ZX!yGmGFJE3H4=5eX`fF4M=W^7Y`8WqP3TZWzwS62Jg#dYrBD_JFi)mt zcya^}Y}ypZRhU;WHq~Dpe4ts$b_TGB)fNIfvs+D;x0o#wJ60w=^Im{lNw$G;HE}^g z-+GRkosHZ2#Fy+LQpyk^$&bg*`|N(c_j+fxs1&H)Wzbx&%DjhhP`Ua$2E*i^q!3&O zpPp2^GA6U(0E%J%l|Gikp)Sl#3u8xT;ptX z^jmH*Mx=3x-STp5H4dLI!%T|;lviU!64dpnL>*;B0hj#AuVajJ82xl5k&c_tc27Q2 zE;lDfBNRR`83lq@YO40^VaM1Qb3TzAet^I|i7G|Ie8Rj30DpvVLwTAVE? z+m`gQYAqGdPFg10b`2|2ncGGIg7dJ<)*6%FJh;)NR6DV{Iez^!i$a{*fL<)|*D3x$ zlPgkdA&G6{kO4rR?cU_$Qd;3(b_XUCL9=rbMu#A5t_bOYg_AXE6c+K1!kwGe-=115 z;X$r77bLrnhFxYbD+d6VxYk9r2!2$>aB{JeXydT;L&eGIed3Wd1>UKB_P zk&016tfJkm?&(ua=i}yg3<~yISjTU5BG5T9m=Supbn=Nlf&b91;F9szbDdU4Of)c^ z5=LnysJ%`_DIW5rCb<*sgVjBOLWohKAiapRA@^FFbWKoFqwF7b2%&$y|{l}lP zbHR`TdW>YDU{}z=3koMT4stULzt>N99r63xV;vF^J~F-GmkL4$X5W!V^>f`+Wi-A2 zD+T-Ko%G}itbQ%G|22dAOK2ilCA*o92g-YIP*euc(pUG&fMyc<_wKk3Z@*A54T)@v zKNY=aKLI_~8><|bl{**{m^xX;o?EFNG~E+$ee>#xL3OV*ukT!@1Bitm3Udy)jpi3h zYp}1HODJOp`F|?gQElVoMOvf3U##!`JmwE|2DHcjg)RB7$q0R+ptT2J!Z#RMJ>Y{44|A5h zJHYUWqYLRC@cX)#) zz-t`aX$_P2oh(l-+FV3zJ>dPg{Ks9z5U&C(G4lA%7vQrR-D8Atckub}hsC_tM zf)j)nysK&?h%p>G*ZHAfLO7Jr#Xma)H~#ek~Mv;*wmIdaixg9Rhs?VKhnSe3ptqtrQb8;py!Fj)5MAtRS!o0H&E!D>l*flfjKfB>( zZNR_)Hjhg$W$zyL-jA|io(giYlN=BLC>jJkZSz$S2|J7~5TsNijeGE51LUS=hbs70 z;i?zNa2U`jG5&pLI=!p-p>=5XTp2699d&^kg`Y2N+~eX!Z>14VO=^AKb8EXjhV z`G}{%*aei+^Zi5e&w%SMnTLjmi}-*3##2P-yyYDbhqzX#Ugtu+CeDcc1~z(@&0~T* z*N(vbM$X7loUhRxF=RE>6v{IG2w5lt%|FtUz1qgI zY>gvLRXJjVqeBeHfL!>B$3AYUc7FK8L3qfji@l3C?{EakpSSt{Y0Susa=Q9x_JZ~&O2qPiarIvW^w1il@_Y}ZV;(uC`Kku46w zM+9nteD3XAc6AYCtfOR^_&}E7(})evh%qveu5Vl@@yi!sU%BQkT$u4`TgGCz9LQHE zU?VlF>=ObH`9RhtX3%mkV;rDhdX zBav$htsWv)Dck<;fWoj)Ur6G< z18awXrwly+z$TW2;-_+7=;xJq0g7P;nGo?zL1RTQhI{#*3D~@db#)i68#|vKrGJA} zhA}QRLntj6>zQg$ncV)~W~!0Db9xbGrNMA0+HW4>KE8H|9Bx*&v0XtK7?B$E$924a z?J(Xx+ol>8cLiu*3&p&Bc(QqiF!2|D|A%5`;-E>g+{P2={<2y8?uEiLC!3 zJik^nA`~E6a0u2`J|CvyOqarc%2Z|oDPfs#oaiC3iq|0pX*5XpRI6@L5c|(R!uwdu zoLx3S?hM#*bO2x+zg3Kcad2nezT{M==BIZXk61RM4zhPW4%~)6SWad^2@6AxpO74_ ziV&o7QQP8q9LsSYkOTXnk;twu5B5Ggs$EB%R+pTpx^pU%=Bv+~@kg^G@gA5-7(RA( zDSP_69^BXebMJ^u`r9eFzYu8CCs?tR3KVLKX* z%vkFQkW0&Ky8nJ8&;BoiT5(dgN7o@vo-?3~mS zAi4(u2$3d44Lw8VnM*k$!MBlIN4R~UQ&nBpt*QiJ*PO@ zP6RU&8Ih;OY3)!MhD=c)oc}_YhaX`hFoVc|6`F(;soGb(=(KJ9IMU(TOVCbT;sgpnk4 zY7R8PRptz#`p+%WHTU?1{uHi)&L6x$<1dX%}S6)tW8TnH7|pR>kZm=0!X{0*5}j zX)c{qa&Yl4ag4Yiqd*Mu|8_wZv4rXagFW-JM_W4^*6r3*jVVn zY|}}zxEW&P0`n@F2en}l@j#}U@coL{9E)c?`lXeIZ_7C6*D}g+x;5r4J8 z4+|e{zdXy{8!NG7NogV^URT828*G8LjlHLys1rEH1$(Ne%PV^zC_^VmVlU3G4r<$B6QeN@_^1tq!CcNp}inVJQMH9Oh>A z+|54YhY0Vn5qZaGG{P?twcj~Lz5zAdKLik&`=$cW1RoGD=FOL|V^>K(-A%z3)AyG= z&u9=DW+3O^&uwVdx9^{4yfhs<689;N|4 zH~@JV)W?X_cH8b$Kr{j*JOMW!RM1yjqn6~+Hv$?Jj;k4v~H}S*kAVFgqAYos4-~|+M z#Vt<=l#&7W0>FlY{A+-BW4|5x6^}v!09^SqI|ls)g^mWuX@3upUsfP)|q1W?-wc&Y}Sc3DO*VdOi$H zqSxa0!96;Qwj~v2>=6$N@iG~KnK3_HIO;q2e*M-Mz&AOl4SN= z^YHWy4UM(B2`gNG!b|9_f&Jy|YA!X99=CqDRPK2}*WvAiz)#4u$mZFInzaL@aeDwe zJAFGuXy_8|@6%hSfcOT$geF*Z~&HIOz?Fqlo!dNR!X_UpGyZf>{> zvanlWca&6io6}5E>hHa!7a=}r*k_n+0lN}pA5@EHuxxFo;6;EGe_M#fDEfoL^tIMg z;m3XsC)@4P#c~AITU}882#g06tD=^tSSfPNzgr(z&=~-E3w+^G-T|dos~cHvefMLz&2U5GXoiU zjFImBv@|lDl)Wo!5Ixh64cCcDBl&KA{h zj1(U-vaJd34xST9sLd90<4%TXowTg)*6}6C)(6lMI4p^(us=g6pnF8ZC~!SjH&3C4 zaP+n?&s*W{iA-In^3t)ZIuvv`JG6JX^UN&Qz@aHydDPx!jsi3hrSq0Cod&+@eJ?7x zuN9plKn5SWsFcpJ0L*(|26v4wRl}@+48D0k%`)2)uB3vBO;{f1MLTW0Qj5qt3Qjr` zdpeXc;BsH7U^4~hI3Q0vw21N*ilAZ@)%(uo3Q^*tA610;xJVl}1Kkm;>GaPN4IQtF znm^ULJPR&EYxvE`CQeC)YJcp?5Ddmt?h-tIw(~pzfPksZuTya3V!ic3s|53nIU21v4@*b<> z#-uDmkI@#r^iG*dy^(Vt2nAYJmN4;xxrI%o8e>)k1Clhlg^|MIng>@m@-2G>n`M+{sOI0BAx>lv3DECsa-&<2(ZO)WZN$+4Mf zIWp=%J^(P0FtrDKN9N@(Z;2c30gJQs?3>Ef*6m91y|0l%-IpNl!FIw}-|L)^5$-l5 z2D~3=hYWmJEjo5BOqED(FhrFkP*ZF6w_SKW(0h$MmlE|COLlgjIQ#nyWnt66TDG!Z z{#1$QaY%)}^djDu2PQPP!^Ll#$_b~8J#gaeu|10l(YFIh)rFT7rT>mnw2o{u_QF^Q zKD4(wM(u|0cZqNlDEm++dao0J9GF`mug8}o75WUi4TNLUEtKF}O6r&e(TK0F-J_G~ zQTKMvpA6s&usJ8W6TH8GKuk6HbdC)AqfiBtOwoaxhhTmKFOPX4&Z%hC1=l%K1L|HKTv=;%~(W8+XwxOjRc zOqKMIQX@UeiQx%ie7UkuxHwNZ(ECb^tD$b`(DNc= z7E@H-#}EaTr0d*Jh=X=a*?CTkZ=efMzH&nz#?0wTzqyGR3tK_dQ%jA=0>mMnvi5#| z_Fn%a+mnyr`T*9QDFwUyNDz-uoLhCWi!Y}9Rm*WQsmYRf=Vyl@i)pHstB;^XW$!vOMik^^g|#v@X4o(+1^}J zA_c#(KoNY9*wep^*V?ZWmwGgIubn^!pYUSF!?7CRK?O%M5vX3zfB|&D>Wq;=w*Rz& zrGxRw*_$aSa0ekA5lB8RT(pD`=-N*`xvKXaJ`JO}FAwUT$u;&S@UTFkB}_<UAc2rPP28zyaTXsC{Zi)-0vk7+i$k5(6LmmSU`_<%s4dq!w#zMuv79AE1mSLEu zt_kLvoR~8*APELE93g;{zalnZn7$34QXv8~Z@T^;q6{Wx?C$^snbT9`l2rv&X$~&~|E%pyI z(29SJl+e5q1EB>cJ{dcV&$H_jFISZf(OG5U>D2r9(kQln_C> zR7xok5NQPo>FyL2kP;D4T0lA^g-wfybc1x)rn}*rOE~X2&-;Jx82=dmKVHW;XFP6U zt+nrU-!rax&1(W^I$@&))5h2i4tOotD?BZ)10arwRUsY6LX0MUcoyt|4|X+642s$t zZ)B@OHjC){Qdt|NLl1@GlsVAi{>rI|Yi)~vR;0Z;SV0Hy}*v8gOaC+9_vlOk@L(U2gT77q>c%oDN`-7(6=-;@!aV(cUW)sazni zv%tUmY)>bivpu82lTQ^YkT(6JjU!&Pv;pu&q3iM&>?;Q7Wo?&yIWzGal7usm_#?T3 z3fonHGk@x;2+rTflMq@3Kp8!+St1UrL|s{_F}^)&<1mk5&SU2*Uwf*exm;2->ouqj zhDr&*9ze^JX6l3(R}wTos^>EW{bAh^IL8XX!t708*Fj_4UGRsuitO3k({@Op@6)P- zW|=1>h79~u^rx*?eGzVeg*=BA^L{d}hL-$E zC+%EmQf$fi@@`yz4bnD?QJq-W5j}wdEj`PLcRWD|f*c<9HI8ypWM0HG^ro_V&SF~Y z2KRUG++Q2@NqY`?#1<-Ik^y9t)$xS!H+kmsC+RQJ;feIgWsVnSVymZoO? z-f3%YU9HC2rYbNRTr*rg{r^o`9O0wOr|H?0NHzq^+dC$hNXBCl!s-R-HeaNjeawlU zLbxt|r$D@R6(iwSU`+-6Y*mo^n6@)b!^2opZo4wnLmxYc93{ywcBdr@L?f~(eFbkx z%~rWT01WzifTQIlC+D5Wk7d8x_g3M7k!Vo(YfIb57(pQEFd)U|fH6m;X5X;3iKr2S zoHk(AW-BHncaG%sMMVkTgOa|4mO~}%&{~e|?w`q3lk}x-B0KuoACX=Ii|oJcKhT;p zVTlwPpfqf3j)tE0WU~&;IrfJ}qDkI@1WX>QUNTw@Xhuz$h z+}m6}l&@ZzBYQv<_3s9(Pk8^Ks6S2lA6-p|0Q~=VadhN<@aKQ1TEvkm9@iOQLRk~~ z`M*nuh}eANJ@f{^Lq54MT?3*a&_o<3*x&a-diVcUZflR~4-IsWXpxC@U>y-8*P@Gn z!9nx%WZ4c0K&aKn*I7Wy48*NA6)#SUAd0}>JKyF0SM4%LB7s;MOi^b!)HW^T5u_5x zOW#1E2qU5BY^?2jV)e8ZBCNAKn_X|!#Y5C87Ck@pqtWez4s9uA4@8=;G)6!`J}Ijv(fkEBzM8lfx3Bq zEjD##uN;8rKNcvaTbF^#rD2e-lF~erVU^L&m-y?UluQj2NC#|NsCU;C@9#t<%}O$0 zz8Wn8cs@$cdJ%?5JsOe+Hg2`bjp)!C8oQ|nwMtrRZ;N$0b_C!^tQAH&!ihz63zPVy z0HB7WF|eZU8$KI8Hp6Rc<8NKvK5r!NS|Ax?(s&nB1&|;+bOk|?aFkxO` z7Hs~{T2rKEdR*VtzY`821ymWI6P~SF(fv%$lN?hhnf3yBp`a4L^Q43b=<;J#Ec~sI z6^?4lEPRnC8ioFP=yfnZG;;zd1PJ_PfS4L> zouNA_IpFT;K}V<6ssRVXQ=?N4{wdsOh-TcELbJbs=U>opS_Zu?XH5v#S_(3V5fBCA zH+kxS3%}H;sqoE>~b|6!jdfQ%#XZ3H+mcVX69F4V&y2qvWN@liP3!-nrM z+q%gZH+$+RGUawxb)sSI)*xl%mIQk7G1DI67DV`c|0|#fzZni5ssC-DYBSpK3W2&D znJf2K6&ysr`vd6}Z7@K{UWexreM44x%?Ew}N$(vsdhe*kL`#0+B>&3$JwT?rFZPL!3N*7>n#NR+Y0i4P^=;26hrm4FddM6nZj1R`OQtI%+c=2E%wZn>q3XcVFi;GJTo zfK~LfrUp2)6;**k)18fyq6BojbICK|hQKfgp?bV_Z>*lJceCOib4f$xD96myS2;ZjM_E)giF+G<40~$M<{E7#wi7 z$hd_5jn9V_@&e=DFDipaFmc$F7y(6~NDYxHQ+IiBkTjPn@to(q`7|4uw8kR~;Ctbfq8U2G-*HgIb_<;Xh!g_? zZtkgkfC@BS_fd~fdq3^#f@>xU4$%TdT$pvSAVw3(Me?yXvwaXg)YSte1GlB(VEsGy zAgMm$0~$P@BLk08YR0O2Za8+c>lR;})y$G9j%VIP-s}z%4g5&3Xs%zCYIemn=!kei z7d*XAT*zAkCfc6U@itswC4169VB}J$SZ(J z3q~p87!~PH`A$0?4epS?xp9D&`Q7J)8gf;5JFk`-v{t7@DfpudRHF{QBzIh>w{(>qJM~htAH0PQDQ6q6ViYG z`5=0)N^|FnCgAkY-~$#CX+lCtT>4>K3qrm~N6b+L*wulgs?%2N>sPIKog~6zv=-S3 z!+#c!8*nTi#V-9hH3xEm(7Fkr^ZRE1fhRY+jo`X4HaJ${(Z16O%EwutWtKMBV-6e_ zNO6E600mYr$o%M>j(SAKS|x`7o<&ATK;I4YV{$<{^BP)NipTJpfQVnW4pYp*H9qIE z6NM{-tYHQ5nma=Uw7Qn<{cK3fdWkRDexyvOzOWqV|6z)$uNj%53DxLS8>)Nho51=~ z+Sklt6|J*1NV)zCGQ8gqT|!1>IiPmqUDQr+US=12v7tm}lPCY++Wmk*F;xlnRMmpGgmo&?Na7|3vM6y3{g*@b^WtDtZ z^ctImh7J|ZhMV)F=*7%NrWuVIb&cJUp0W?cmPYxY^Y?hwYGm-+f~?##*zi7syj}cV z|I&2iGo^$$48UpY^5UZ2vNh3|Jfa!L(>2S`xfa&vx8))-t9pZE_uC2|oK2*J6V|*8 zu@{mmpt~jGW-lHCCyE)Rg!<*lhtIF^T49*W_BX;rEOgf4@u9k45Fl%uoSNzj3`VT7 z-LeDxL6DMCO0oMu+9BLq3X(q0WiGi?^C7T2$Z{rIo^bkz6$p5VG|Wax{mA6p)PX;T6A5_R1<8~3HM5%5dh?VPL?|B zFnjSK4-BASEyi%=b*?quTQYgJ-4i4|qJ$ z3GVTw0Tfg$-M)E(JeWSv-=mha(1lg9Y>{#>ek}61hZXu~Z~?|Zx}T3xWAl!EU^R|U zK*h=v%08CDuEwMBvM&EZwToh+c3$rt)@>Y@3BF^o+4Xl8YieBgnSaD*Yd1w?8laLz zQ^ZfSVBby@k<%0suUj3I_oyR2&35+hrdHLNC1kC8_PtXZGI#{(vuPwYYkT;y&m*21 zfE^ZmTn&wwwcfY<9yHi@`o8_{EE~}+HqTEl-mhqtC`nPinqhNXmB6Mg;JsQp`)M%0 zxo=*oKZJ_4*L`s6m1I_+q{g*zr&o}T8j8Rbu8RzolF z$N`fG`r#Gh2_jx~d9MRg4F;W_puWosif>_9Co|~EQODhusDKVZlH#R82XK+qs^!LJ zP&xQhW)f(~4<%h_EH%V4OmMmBCT)b9p9j;-=Q-;X;E4FGEu5hV=M!^dO`my2%YR+GB0qILMN}Z^9e;fDiUq#0GZjeF*aMD4LEBDzCG4y=eo1IdZOVLRWpP=b}%X*PzMOtISWa`pwD`hhsL+i zEQovl2MDfgw6{2}8_iCw^p3rZu(G3RM{iX6&inRggT;HkR%xBjtnvUULcp*ai z#52i@u#c1iI~cGmpg_oY2$lTjMd-p-f^LFz8RVF&eGL@{Zl zouQ6PQ$t~%iwD==Qj6`ciep2oqLqeqp1e9p@Yn;+0YWBYub+ zmAB2nIqlLad0MkFVPXxUBVwjuH7zy!iCseDn$|I=>~XWKk?$__KgB2;z*->_l-(n5 zs#0Qw2$-?&(eTbwF{lT<0)dzq_&d;*6lavKAhkv~i$DTVqE(rEAi@uFqlSVL2Am+S zMLVnT;3xk9x^d0v^-p>t==aPM%*cCy01(q8EYl?eJ3v5O9PS%Nv#r zSrhb{T<_Y@gwMcjBF&JlB==LvbZPfNd7cwpR=rXsKh!4k3b(UYVAI0mk^XrP&<`r*DzJKQ{Y;LKY#LvgLzWQ z?sJ9KRNuD+YS{q^W!GfA!fdAA!S@kzVNjf~n*esgcCRkEhz2N z-d2*Ke08Qb-nsiH$VWbcR_$Tk2rC+3F1EVlRtK@UuCHZK( z%_tYemjno{@o#jRV`~#Sc~U#OA7W{TPVut=d%qsn`61MZ#Q+0YZGOec7h&QN;W$;C z&GB9Ao3n1^HS>}Jh`cB3u94N+{p+T&4-fMKI%%?5KhAeiY12G#k%?sOiju5T9sqwL z!Oe1BPgWshNJ$V0aAIQ#m+bl#??iZ7*MAwuP{tb{s&c;wd0C0196*QtdKarRtx>5I zJ*zc}h77gSz}T!ea?qxsGrzfW11UfOn>4cNeE-KjB`SA%16Eho>pm`(?ad6zHK%Jf zKf%r~lpdRUw?;Dya$}Z5h-eJ}orDn1SZTRDkmcoqc(HJ-Nvq86oz49G6h3FuG6;nK z$#Yq^X_fOXvG~7rn-HY^quT`i5NsigP-6#sK4o#5;4v~^u`Z4~D4VZymTsn-9*DvK zOWkXG_4C>pQV7i?qr+Z7jY3U~CP)>iy@-$~+jGTt02Nb2CZ^qgsFPznp~Y!-y4@cU z2n4d!jJzA7vH=j-i%gl->0A+X(Rgh^#ABn~v)2p>3=h`Nzv|tE;jONpe?j{O@`qJ2 zo<`&A(93xs7i$FIZM^1Axe^}Hxx6Br5fnX$Xm#Y7s0q^-rXa#V082$=1i+WLBeBj! z#&pMnks&5RO(AdW7tr(&BTwLZ+iP02E3!K@uVXE3&~9^L!bt#I&_Q!Q*+Nd}3=o=t z)^jA77p?($iv7;#Q;i$juZ-rx<*Z9#fF{C3M_Nm|1EC@dG8;IENKOkiu~aWSpAv8F zwjy?rP(XS)m-!39A_l>ijLV!+QEQ*=^UI|qhjg|mq^;*$DOH+}_y%YGskL>ix(2sE zZt_2CZT(00$loRUe>qq42s!?rpyR)H{olB4BN$|4><%)m4?(FDjUEBx4~lBgqIERI z6GZ%wr!hSFFYX=mxPV!S|JOhxrMuOwM1o9*Zon=toVJMvs|ebww;wsuAX6rQp9xq0 zOP!~ZG1@(<;p3t%(B4Ey2d9iQT9(I zvLs>OnipDg{E#eywEK3Km@Ea(xQT} zv~M6UxTEfvYW0rb2%v~cN30jgM2Pyxq(q+2l(k1wr4e>opcyR`0FLE`&g<90F+#>iATs+< zFiV-t_{;ZY0$TH{%IUQ5?nvO%pX0cUOwmM=ZiJ_Ikh5e4LkB9A*v>$qi8!D7UvCh; z=e?gU&r$A3%3o;*THMIgbreE|hr+)I`|bVT`n)SS-8TWt1b*~s1nmH(9#YAHZk~l* z6Z?3}5AB^0=OZk2K*dpsgzNLU*`Nl6i~?{F-OoqbVE`%xP!yq5CzKXVqsvspk@Xuq zD!U|K?m_j@-2km5J0j8xZNDJQ#Xy(7np zhLKAOHNxDtt}y5a)1f+B#Xf}Sd?BN@p~sOQSE-(PiXFZ`NNm?5lUy$$+*(Tl zWD})E!m*JF)%y+Ai}GCw(AMz~$bjDi{p(a!LQ_nGQdEt!w?fn+oB#nHdus59>LHeT z(AO#JVNeS-bs+tnL?J}h!DfIPETe>h2I6`!BfN5+?jgW;CQJ2tr6y++jwemCMN!WwnlgeOOogY&HUNd3-9zxt zFxX?_!i_phN!<%Ex8Eb#r!{k3Mb3whtOkGe^%YGHpQA2Yq&VavLTv7Bfo6W*Y9#kMgG!LcH~h}=9AL< z{xon^WIkA?@`MA-jFFzW64aExQu1Kx_VdX+ynI|iux~&*2s8VO^L^jO7&UR9SC4Xn za|CE54&M|KjNJ(g`@2BLg+5heftVyysT-NURxE#n^wzX|zZ|Q^K%XhY>*ION2M-0~ zoM)y!w`=kF&&jDSNxfGvJ5lgK$(}dU5}ES=lOjY8)IMj5e!eDlnnVgp+(<7bssg~A z@d^wG(*XFAsM@6l)Q(fZOd>X{;jc6WkI@8)`X8QrF%Y{zg{IS=PS=GgX3$*B&9?x`I8zV*@;uG#mwMX^hTr8ZBGN3KlXMYV$?g7Z%2S#}8F1X{ zn9LJ`w)_P(5tEKCJ2IBmIPYJjwEfm+q6zrUWIjxOIqHL|FeN!JzSa}I}US%MO z-N_lDO9_)qfaXD}GVV}UfdS|W#&FfGUa5;+gh~LyH;4{{69plS!|U(2HR@r!gker= zZ`|8}SEhdjNOr8^8zvSWsDw-@sRY)kPuJ;EDNM+NJ;@MPv!e2Jh76243qd_z?^+ZxWd@1Huo{B7#tSz_ouUDfC^5C zB;6kRp2x-{_5h|50Yai+Qq07p@C5?2;C3vBhZLg9U7wS5qc%n_IJ_=Y41~t?6w?4n zO0_AOyQd#@cXo}yArxul3gKo2u^<^tIDk2p8j`b6PUe28cCK9n^dEv}mvPwIg^)}L zgwsWMHV*H%uNtMNvp-El7cq z8X8zZ>*-}8&jWzP;J=EfM@){K7P7nV48X`^0QG`yS%n*FLMZSjTmOD=k`VD``?*oB zHH@?r;6R|D9?O@Bh)8AYr5N&Z-i_kMolqb?XG#f7x8Ld!iF^wnp?If0?J8^tm~`$F z9EJIwZGtF4hNncW4aN*V1e86T$>F0M*3n)bIi7J(f;f)V97V+{>S;#VE}Q?*04EJf zgjO1dHG3!zObQ{B73tA$kH<6qON9Gx_im;R7$pcX057d;hads^hIQI#kVPVH?W{U# zjBB-7fC39-b&)F@yIVU58%id8;V486KOWX1n$dDZ6u$~N<3;_W3*7PM{mWKRMOpn- zS0pALDEyTBAI+fu>!b^0A^v{~*fn&3OaKI~yFep=^lEhOn$i)U1KKLDH_@*k`n7$3 zUlGo6Z?^IPu=0S+f$&X1!16{@*$t%4@eVmHG-5)241f9gm;d;uaDoO1p|3%9rMd?V zAA>BeEd_Ik=?SEF7Zhg^V}Ju5guZ#b6+4ynL%XW zaSbytCnS(CDGvf7q-p~sAiVsqi7gjg&=8L!a^n5s@b7)A%^NCBHbb|{qi(a zB1f4&cd9bQ+=OLAZyd{Q&<4GFq>6Pv0~LK!qj=!W?vJ-4Fo%+S`?u%lT$(m)Tu|7) z8k(4XPXCS%JR+#4;XtfxP9e}9{CSxcw7?huv*JmU2O(FW>;B2bqBH*U?yvGz8WbO4 z%tD8W!`EldfGeXaARwL?ehxWG3FB98T{ z2+;?}OSZ~c<=plA4wWgXef7EKw#FnR6M#U7dXD8tc6J#;aX@`IpbL+HbHu`_4&Dt& zx+2vkqz`v4P4C|_vg_ezz1d_`Jl5<&5u`uKN*XL8OW+ZvLnQ(wpRie0-{8{8?k=LC z$XrgoXXHQ$SS(|!gx_M-i%!&oIL-OSd3Y z&`XK&qVxFyJtySk+o%BA`jC3PBQaO9ZceD49R)*2VEvIGd~InvC}coK|9^2EO~rT-g0!5hK8enG>;!iDP!AKKm^ zbbaWH15VCFMchYZoZwlv``>O1lOI*$EyGEbnOG7O&$W4LS%?;<-9{o%&`1FcN-S$> z=b1emQs5g?((s3?hsr1b~}LxcY%B*_Y!W}CB^2h%Z1GGp?)Co8zsblY(21crISGI%du11vuFZ1N7 znRoOOy`e~>T%cOd6Y$#zW2$k(;HWm`2c+B@CaA}Q=dGPUm>%W{!5=^%vHTCn#r4J&2IL5Z)Yt-z`4i`YgTr8E z`BdF%zed)_sY1w+;GTAdo~j9}@#*ov^H7|UzTpJ0CJgK=OwmI)%&#E|tepi1Az%Nm zT+hjE;M(}Sp!;F-;9-rD(uC9SVZB5D?x*2c9&68di}01{vy|66etJJV*hzdLyUoBc zH+A72r?SKE*Br7G9~`2xC^ap`@#*VbCG`$|r(NHwl-|2&clK;zp$=z&*P6(A21~+z z;f1lWDWYc8h0>gl;&P2V%smP=a`Fs zD>_mg7B>7n#@8z^9peF8~tHt zb2rW3?T??ba=ACD9nLStz>vzT41EO)Kht}d3@o^x&E$yX$R1#xy;vGHr{>#X2MdPz z9*`O@A^WU(VISqbShkCwC=>20UgJ;v~iuUmw#Np=4RTh z!x4Sm@??|voY!wg%U%Y5bLz(0?n#QFzCZfs#M2-5hUx>cvB(*%5822WyKNoPYto(wm}Z6crM6;gf6 z+QVUP+RJD@cs(`a4VNPii##a-!xH>mw}RvCZPtCa;>y}Mb?f(Vt-fuFoI+iipJ5h= zIN(w3CqP!{;1w^rXP$sSUw&Q1f5iUcUG;3!PW?MPc3oSi-f`%cRetp~A-7VdZh2$X zGgs(xYO;hbEb@B$z=d(LA0q~@<5Qjl_*m5Xtju`5+Pg>1EX*9%>|&pY=2c*0u6@%; z>R*cg@Wrr}Q)8`0(FH0^woskEp)HPL%_5C?Nhdz30^RKqny2Lpa^d0R?F$M5r!1DW zZ`wNNk-6=wT`V)^@vzdTvRM0l%0>xQ0k1Uq@S&T`C5R~otB-&};m(DHiqGY)@w@2< z^&C8QO#+W7lR>fNhn_E&v{$alENedn_v1t}A35*pHeKKG6=WdJ@PIdQ)h$Oqp~iaT zGjy(AQpJb?X9*KZPDH`jXmotJ&^nvIgf(o4Z)+?oAQcM-Z&-2uva`IF&wc!xt?Q58csw4d=5GV;Z}~ zian}=t(`WwhHo;Hr{C6zbC|+zEV!o5HTU%!?bcCFjTWtSDz4ut(di@lZtxex;XJt5j7f06Y3UTTi~8FRjf?6IoDd%srYYHh5PCx3HsXcx0k;#B=`H{}WQByMnE^B!}8 z4QY8+rjYy8i02QB$p&uAIfJmn+w<&M%X%;Nd;QIMC`%vHZlfo+sND2s-w8#VFNQE; zr*}!FH)>t(=CDl3ml^!oI(U{sfJdD+%a)qThmz!KGY14r7^bm zP8_sjbKR