From ad455ec25ede149e2f155e9b75718e727bbed36e Mon Sep 17 00:00:00 2001 From: Vincent Moens Date: Thu, 3 Jul 2025 22:51:05 +0200 Subject: [PATCH 01/28] [Test] Fix failing test (#3033) [Feature] Add thinking prompts to GRPO amend aned aned amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend amend --- README.md | 97 + docs/source/_static/img/llm-data.svg | 5 + docs/source/_static/img/llm-env.png | Bin 0 -> 591070 bytes docs/source/reference/llms.rst | 761 +++++- .../expert-iteration/ei_utils.py | 2 +- .../expert-iteration-async.py | 2 +- .../expert-iteration/expert-iteration-sync.py | 2 +- .../grpo/config/grpo_gsm8k.yaml | 4 + .../grpo/config/grpo_ifeval.yaml | 12 +- sota-implementations/grpo/grpo-async.py | 72 +- sota-implementations/grpo/grpo-sync.py | 66 +- sota-implementations/grpo/grpo_utils.py | 122 +- test/llm/test_data.py | 556 ++++- test/llm/test_envs.py | 769 ++++-- test/llm/test_modules.py | 616 ----- test/llm/test_objectives.py | 293 ++- test/llm/test_wrapper.py | 1710 ++++++++++++++ test/test_specs.py | 10 + torchrl/collectors/collectors.py | 14 +- torchrl/collectors/llm/base.py | 3 + torchrl/data/llm/__init__.py | 3 +- torchrl/data/llm/{chat.py => history.py} | 607 ++++- torchrl/data/tensor_specs.py | 212 +- torchrl/envs/common.py | 76 +- torchrl/envs/llm/__init__.py | 4 + torchrl/envs/llm/chat.py | 527 +++-- torchrl/envs/llm/datasets/gsm8k.py | 15 +- torchrl/envs/llm/datasets/ifeval.py | 73 +- torchrl/envs/llm/reward/gsm8k.py | 158 +- .../llm/reward/ifeval/_instructions_main.py | 4 + torchrl/envs/llm/reward/ifeval/_scorer.py | 96 +- torchrl/envs/llm/transforms/__init__.py | 4 +- torchrl/envs/llm/transforms/dataloading.py | 33 +- torchrl/envs/llm/transforms/kl.py | 1132 +++++++-- torchrl/envs/llm/transforms/policy_version.py | 6 +- torchrl/envs/llm/transforms/reason.py | 38 +- torchrl/envs/llm/transforms/tools.py | 28 +- torchrl/envs/transforms/transforms.py | 38 +- torchrl/envs/transforms/utils.py | 26 +- torchrl/envs/utils.py | 17 +- torchrl/modules/__init__.py | 1 + torchrl/modules/distributions/__init__.py | 1 + torchrl/modules/llm/__init__.py | 18 +- torchrl/modules/llm/policies/__init__.py | 13 +- torchrl/modules/llm/policies/common.py | 778 ++++++- .../llm/policies/transformers_wrapper.py | 1941 +++++++++++++--- torchrl/modules/llm/policies/vllm_wrapper.py | 2059 +++++++++++++---- torchrl/objectives/llm/grpo.py | 217 +- torchrl/objectives/llm/sft.py | 60 +- torchrl/objectives/ppo.py | 1 - torchrl/objectives/utils.py | 40 +- tutorials/sphinx-tutorials/llm_wrappers.py | 363 +++ 52 files changed, 11037 insertions(+), 2668 deletions(-) create mode 100644 docs/source/_static/img/llm-data.svg create mode 100644 docs/source/_static/img/llm-env.png delete mode 100644 test/llm/test_modules.py create mode 100644 test/llm/test_wrapper.py rename torchrl/data/llm/{chat.py => history.py} (54%) create mode 100644 tutorials/sphinx-tutorials/llm_wrappers.py diff --git a/README.md b/README.md index ab820c19697..db5d4b3316a 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,57 @@ **TorchRL** is an open-source Reinforcement Learning (RL) library for PyTorch. +## 🚀 What's New + +### LLM API - Complete Framework for Language Model Fine-tuning + +TorchRL now includes a comprehensive **LLM API** for post-training and fine-tuning of language models! This new framework provides everything you need for RLHF, supervised fine-tuning, and tool-augmented training: + +- 🤖 **Unified LLM Wrappers**: Seamless integration with Hugging Face models and vLLM inference engines +- 💬 **Conversation Management**: Advanced `History` class for multi-turn dialogue with automatic chat template detection +- 🛠️ **Tool Integration**: Built-in support for Python code execution, function calling, and custom tool transforms +- 🎯 **Specialized Objectives**: GRPO (Group Relative Policy Optimization) and SFT loss functions optimized for language models +- ⚡ **High-Performance Collectors**: Async data collection with distributed training support +- 🔄 **Flexible Environments**: Transform-based architecture for reward computation, data loading, and conversation augmentation + +The LLM API follows TorchRL's modular design principles, allowing you to mix and match components for your specific use case. Check out the [complete documentation](https://pytorch.org/rl/main/reference/llms.html) and [GRPO implementation example](https://github.com/pytorch/rl/tree/main/sota-implementations/grpo) to get started! + +
+ Quick LLM API Example + +```python +from torchrl.envs.llm import ChatEnv +from torchrl.modules.llm import TransformersWrapper +from torchrl.objectives.llm import GRPOLoss +from torchrl.collectors.llm import LLMCollector + +# Create environment with Python tool execution +env = ChatEnv( + tokenizer=tokenizer, + system_prompt="You are an assistant that can execute Python code.", + batch_size=[1] +).append_transform(PythonInterpreter()) + +# Wrap your language model +llm = TransformersWrapper( + model=model, + tokenizer=tokenizer, + input_mode="history" +) + +# Set up GRPO training +loss_fn = GRPOLoss(llm, critic, gamma=0.99) +collector = LLMCollector(env, llm, frames_per_batch=100) + +# Training loop +for data in collector: + loss = loss_fn(data) + loss.backward() + optimizer.step() +``` + +
+ ## Key features - 🐍 **Python-first**: Designed with Python as the primary language for ease of use and flexibility @@ -516,6 +567,39 @@ And it is `functorch` and `torch.compile` compatible! - various [recipes](https://github.com/pytorch/rl/blob/main/torchrl/trainers/helpers/models.py) to build models that correspond to the environment being deployed. +- **LLM API**: Complete framework for language model fine-tuning with unified wrappers for Hugging Face and vLLM backends, + conversation management with automatic chat template detection, tool integration (Python execution, function calling), + specialized objectives (GRPO, SFT), and high-performance async collectors. Perfect for RLHF, supervised fine-tuning, + and tool-augmented training scenarios. +
+ Code + + ```python + from torchrl.envs.llm import ChatEnv + from torchrl.modules.llm import TransformersWrapper + from torchrl.envs.llm.transforms import PythonInterpreter + + # Create environment with tool execution + env = ChatEnv( + tokenizer=tokenizer, + system_prompt="You can execute Python code.", + batch_size=[1] + ).append_transform(PythonInterpreter()) + + # Wrap language model for training + llm = TransformersWrapper( + model=model, + tokenizer=tokenizer, + input_mode="history" + ) + + # Multi-turn conversation with tool use + obs = env.reset(TensorDict({"query": "Calculate 2+2"}, batch_size=[1])) + llm_output = llm(obs) # Generates response + obs = env.step(llm_output) # Environment processes response + ``` +
+ If you feel a feature is missing from the library, please submit an issue! If you would like to contribute to new features, check our [call for contributions](https://github.com/pytorch/rl/issues/509) and our [contribution](https://github.com/pytorch/rl/blob/main/CONTRIBUTING.md) page. @@ -792,6 +876,18 @@ A series of [State-of-the-Art implementations](https://github.com/pytorch/rl/blo NA + + LLM API (GRPO) + + NA + + + + + + + + NA + + ** The number indicates expected speed-up compared to eager mode when executed on CPU. Numbers may vary depending on @@ -800,6 +896,7 @@ A series of [State-of-the-Art implementations](https://github.com/pytorch/rl/blo and many more to come! [Code examples](examples/) displaying toy code snippets and training scripts are also available +- [LLM API & GRPO](sota-implementations/grpo) - Complete language model fine-tuning pipeline - [RLHF](examples/rlhf) - [Memory-mapped replay buffers](examples/torchrl_features) diff --git a/docs/source/_static/img/llm-data.svg b/docs/source/_static/img/llm-data.svg new file mode 100644 index 00000000000..ee76e85e5ba --- /dev/null +++ b/docs/source/_static/img/llm-data.svg @@ -0,0 +1,5 @@ +

tokens.prompt

Padded

Sparse

tokens.prompt

tokens.full

tokens.full

mask.attention_mask_all

mask.attention_mask_all

mask.assistant_mask_all

mask.assistant_mask_all

tokens.response

tokens.response

assistant

user

system

batch[0]

batch[0]

batch[0]

batch[0]

batch[0]

batch[0]

batch[1]

batch[1]

batch[1]

batch[1]

batch[1]

batch[1]

Tokens

Tokens

Masks

Masks

pad

valid

diff --git a/docs/source/_static/img/llm-env.png b/docs/source/_static/img/llm-env.png new file mode 100644 index 0000000000000000000000000000000000000000..df6c7401ceff4238021deaf5485eb8cac5e671af GIT binary patch literal 591070 zcmeFZXH*nhw=S%Rim*jcl1fIDAkjvV*n$Y6NNPYbs7O)}0g)^Uk`yoyMWR5Hk(@z- z36Ly^1SRL3<2Oq|yFH$B&pF?(d&k~my!-84RMlE*t~sCi%&^K+;ga-T;zPvSwr$&c zUgoUQwr&5|Z`($6g=iOCIX`mi5&YkFOC@QEZ7H8%kZ+_sI175U%x^GZi1 zw{2tGcK+-c%w3()j@{2`EAzxhHMTSF(NEvQZ0X7WR?jc*-q}y5rJo&%`tU~YIN6up zM;`oo&*aE1mmS@6;4H6V;I-0wM{e_eIV!VUY~!Q0(eWiL>`OXGy zl|~wDSE1Z_7WBPs+X)DX$Qb`8Kd=W~VnYsky8O?c+8W}B{=OIgn^74RBniss1u|(K zO8jpgC!0^({Xbmi&lw9GVH{PI^wwwlpNi?nWxA$;D7X3ygn36WtG2W5nh%)$?w}-as-L`vI54L^$nq z6&g-u83KKRsgIEdsZ1?^<79l{d-_4kp8MlTEMg zrAf0eKK^nUS3u~9c~B4&=f{uy@Sgc0 zV06xYBlFg)m6U21ckCt&y+N^2a_-(Xm+_;MvyJjLIqWZZHWw0+Ku{|(b1`wf+gUfO z$NB2xL#NEzLsaPeKH+Ut#8(i2)e6@(94*@t1^VWm*`Y>5r95(h-6Dj-AI~r0Ypf6l znZ7TPi3-s2^oMfLd(-zFT`Dr@}-EU3H0QXG(uK`&kwtX;Yr{g zn#6@OB{%A6srxYfoxOO^V^q+s(FvhKxZAk(;)oLj{+PMD!|`!KGeisf@s!r(4hd%e zBF!nYP)otF0P7uiFMYm$a$oTLF1%}-x+K7qR!Ttf4`{O;uQ~67U3+Cg@JXIihY0cR zw&HPdT-(~^ChFoU)t1_}vHmito*6Tx z=4kC#+(&T@+J%GoY#j>$Lrh4TH05h+DZE`f-&%eYZ^@ejgLZy&6IIo+s6<%})`id{ zp4l4jr%EkQyiXjjZ)!qa8*P1k%u4=X2L6~PpduJejPu`|zLiXbR_oQeh%?6DN?quN zzslj?6mf{aR^Rt|1JsS+ruB?b2&#f*cey)s&FZOwElgfxf$-8vHM)&!kvVq ztbLVkw&z-W>TY_cbchxAB_=wh;(DJ~_a9`$z53IBT*jFcxJUV5vogEAIPdKHC*ug7 z5`<8Y&0{|p@Uh`86M!l0KLFFFeqhBxF3XxHSa3HMZF2xbXvS$)j63rnJ1|=1+RkVA z2KjT_jF=cl^@Z`ZCmBJ|W#WSjxa&A14MuA5x#1!nAZ+e!7EDx|C;ZI;oDGf8Bmx}r zHaK}6cTG*Mi2phM0AFD@RS2fC@87B5+Y=chqBur%5vW_=&3K+KYF8oeF4;HtX1FysMi^+e-}hGvfUP z46lqnt61W#^rnU)s^IdLoNV4GGz)bAR|ht?*qRISx-C_Pleq60q=MIUTnUzYiYo>I z&{D+%ReEH&w{Gd$e?Xl7%coKoCm+6BS$2o4+?7ofwicCsa10MGmH{iV6LTl*aOK+c z3~Wm@Ip8(EoBUzLMtXo3yL7aa@xbr_m?-MKLx==Mz%uW)xb%+we!PJSO6NiDf#nv!>2XfWb|Qf-)M zffH{Sp3Mo70RlXb;y;Uv04NVYfCtB48RL1;pGb>FFNS?>3T zR-aA}WG)HbrMiQpO#l_5(*R z%9xj|-Lac%R#S$5n2a(j_Fu5V@|^k|#~G!9JnSpAfYf51+f(t`~(Y}|a1_F|uH`a$< zJ-Z}(;*;Rjf@Mp-U-Zmn9{h2pTJ;>Bd#qE04fpw&=Zot}H}7ysAa2rZy$VEjD8w}7 zJWSp^amQ$k!t(ssi#uNo0y95rn5r}akgGs8I)642(_@L_RWLEM`oYsY~3|q>(A=P_8#Pa)tF7r>^aOu9^_%IqwhvZ z+{_n?Iehx|wL>CzZ(Uyq)Vh5>@}o5C&FUaN<66;$|_gwyMbJ)A=c$}MLVIEpu3 zzoXW+s&6W=nYc99()of#Z+^OphdQiew&6m~!kw01eA7w^RPxz9Gkh=U1!~6;dz}Ur^JrH5lieE@N&!vPnc3VOXYTxNzzs#9W z!Zs$$ug71nbA26(3lICOV?RbZx6-Vp8&k42Wp%kvs``F9Gm5k?una=@PLI|sSl(QelEIB0O4~zqwIJ(;`cPoT^pBKY)J?$N7Qto z^=fOHK6?VWRV-J@>a^Q1O{3f?ZNvPbrlNPWmKXB@u-az%z>c#Ljw(955nX71=Cty) zE5KoXb*46aJh#W^(&L&Mb}6Ipk4}6tl6(#3`3i8YSo!Q>^FoW|$?|=gb9^hK8FAz+ z=Uy4d2FGeU$_Mc>N!^P#AYw^qSsk}2xuEEBrC==k!bIWBC9`mULKLa+Xxc118A$ha zykH`XRPfuq^cL&h`$4AFV=v5$jV)Iuzn3=_&V2g%#Ei3e;EQ3n90d3#mX5BbpG0P= zy;dxKX#;uyQanNf17^4gN`}<}O}fpg_7@Jt$h&jrFw^K%ld^ow+CoC2f z{!y4QmZ8U$Ljly6_|a^7%kv#b?u$>|qe7ovp1MNX&Saak%DGR}!1+t3y%6`iU$F!cCTrwoEDZ8tJ% z8X7L}XwP>RmR1ecM>l3RUee6oNBzq^Sn0rOXCB&Zap@m_aQO&=;jp|-xup!a0HS<3 z;&b^AyZL$}L++69og2+Z5ctkq?DkemE|C>pULHwR#_V`+0x*3^Xe_hwc7fLl3(aKd zj(x9u#n*Fh-K14z5WJH?EC#BsxvP1qIOpACtkm!zW|QmR(}z- z&RzZCmQ>w>WjX;9%ylE<_?Q3=2etGBJqmF~^I8=Rx>p9_eDBy51&&D^0!5OKt)2gB z9_}O9?V(lBqGQiRclVo1azX^E>(*y=!q;ve1Ythp{ zt?TfFO8wdsirxL3H(#HucUtXXP{WWjaa^nr6}6wg2vBMs<2zJm&EPnz*k%;{n14ds zx>==hX&}5S(y1r5Yi- zkMKmHrI#1Ym@y1$@geP^{W5#;$<+LFUB!-#fXy$ztch0>&3y)1it? z1AI1g$_63EmXiu6X)TDQ?r`xua4fdfFx+AK{4(fXBVf$4{!q(LUY&~8yrPmnvpjoQ z7)R@QFBmi93KnY%?m0x4c?Vu5XcMeO2~9*t$(y;u9Ou7OSJIrkaaqGGua94RWuau_ zrPETCZY-AAubJL?bqZC3YP1?+mZrQ1=~d>LV7k+jT`>zo3$b`o-m4#;O zB!ySsBU#YO@0m&ShzYA-5024xbOvFv6R^F!308Jf1IU1;wemQbcD&)G_);a>3SHnk z{0y22-Lj+M9e2Ja_b!we7}me*u<7dP*V}O2b3m!YX-#&&^kUozQ!@Q{7*P6-!=ouh?0l&tysm7-3kl`({zmN^3buxo?yIfq#$4t6=r$k~DU zG!#q}zbIZ`9-)|M!rtQt_e8Nyl#|yVlGx76xh^6>Ra5JEs|DaNAok^*Hm?cu2{-a1 z_JFWTgd5;TimSf{!onoTjK;EB%qZDzOk|{|_zt{V7G3InHq;lWdkJwsOd>Y#ncS^R zbXBh2SpJEaSsV)E&T&46PkmAk)zB8{!8H=}%RP&`tA#Xq%US=U>C|Bn%oyw{arTmY zSINb}`_DoCBR1;mlkVcrlirfK+xL9C9Z%)%qYJPyTYaLIqU@-}py2&I7NvRe;bLy$ zQ9@Go1T1`=f|#&odZ)$jsPcbKT%5W|wR@oFmM!gVA6u#BNBhKA({AxPd|(Z{2>jlf zct(kf0L4=@tMtNm0-yi|5)Uk#$o^_}A@1cJjl|k$=J)TPu$* ziJ9Q2`dI5#UpY;)JOlU7$9Pa=yMW~>xG!rvug#Hccey==5I;qxV~GYkQQgZ*SG^+S zG3+0H`=-5dlHmh20;E-^1hUp^xd&d_=q&dzY+Rnoh_b7T^5$+EURKj}%(_4Y*Rx4X5bPOW+IYUfc= z8eh4&>mM3FDh;E^+krVtBJ<{i`ScExZzL|q4iRfv;Zw(EIZ`giGR!Yd1q`mHwR`J|S(VvBMLAsSC z)t~reA?UjCJ35={!Z{#UGDxWS!3=2pMD@VKnTLnXT8E3Vcgad#g}0DG2$=~EPu2hU z=P4ND{pbk^M$Y6+CHFAu-5;a*WmGkx+|%fAz=~U<49tnkGsg?SxM50neEF-*Y z5CA^Aax>)X%0x-{$quW=YHpiWefP{}Rn3^O?62H2GuU?iTi;4aYhv2vVkFL4=sM`y zFN}-kdAAP4yuCbpn*7!muw6~ZP)Y@pV!fBvpN%PXymqXT3waLnk}E8Jkav2js$<)C z(s!lznLM%o;ixO;;;AAZDt!+nGL|AscV+6$GH zrpZ~VLJ*>GrsSV+T3swFIKiQTX)C*vQFD@WG$X)NXK2krn=p9SK6dQxXIm(~djxHx@y4~6X!jex+$v5dg6zaUb#42uD%#`B!U|fByUYbD; zoY{@JdQf@)Paxc<&8MbMmc#nOnD6=7Y_s*|%6>1>O6w8;^f4!@h|1lo$FC6(QX9*k z{`QdRE1e>{zRneZ(iqd!qU=f2HUCVd@6QB9oTfhEDPU{QHE#dSWp~5zjOvEScfD@AnA0fU#Q0?9p5iR6Pfu-C zdtDNBNE-8u&Ft`vv@=2wbspX510CYlzV&8jaCu%|fF^M>M%L{NvmUwoTHM*H#FP7H zOw?7Fi`^l4VAjIhEqY+WJAjMh^dlO+sHioknVM5^Cu&79nzN=HH)_nCPAsKL33w7Y z_>&AiUtX8m`AD|wz*OC$Pf~8Tm)o}{0BY6GED1`k+N@`uD!r7VVVN&I`r?Mtv@H^_ zA>^>21#93w6}s42vCfq;tTg=Q<(_oZf;z2_$@<7thexTVg|}$a4RT6#Qt{Qvbp3)s z76LtT!p5KU&E+$2ygPqX`}|?2)y~_K;_<%CZO-OCWla!Li=@D-iME{f7NOJC_g{e;;FC@#6vo!N?Gf4EX@Rv=U= z4PlaaS2ry%1-2f-AQfaBgUnA}tolwr>ps<-qzAs71!#!AV?3tPEI zanUZWx3}4<7<3mL=yEY~r@M364zgz_3cNqE=sGPK9D-a5#4&5vhhhS`9p(XCVjiDt zW1Gn*lB3uyKMzspECj1rd14%G6~$gzch{E&s^l^zm+B%VNhnW;d_`Q6Dy0O+@T!7zF_Gs(e(=9 z8*dL9UL?Q6sq+0unJMG}hNX}TPH8!*!g4}1jk-2eAr3gHFm1;e#N zPP9pny=W@AoCMbWBuURRI4$ab$XY39SoaIiy8Yop_{NaDvn*6-?V3L?4=1Kxa$K3e z*{n-T%dPT;i9@`Cr)eOoaG1uzd{(i&+iwbjpoVch?GS@`O!8tFh?C)<3e-OBoiis?cwt4%aV=~SlCIdy_RwkZ?66RGGMoM$O#St+eM%lndgGB#X5NBF!)EE3iD5>iqI# ztKI_%%21=8w~sGq-po?zpJO@qNXD{8KrOL$y6ntrr<^V)d-cymixr2C=d|B8NZrg4 z52gUNc3Ah-yui&GX!C%<&h_J^u(yB~{Sck(E_$cKx}{n&UvWtv$f&I4r_+YZi#@(1 zB{E0|90QTpK#C>im1|*2T6Wi&tqM6;Q7v3^Q|=BC zWGe<&+3O?BVW?$QFBRN!O(pN{+FT2XKYF??Au&Y*%Sh&AK~|hs`DRHFt=nEET!vET zYKM{APBJ)7RZ<&69Ke105s`4knbmG8CQl-J3N^xD2*yY#Ij+zpewpTwINRqfNY?q} zBtkp%*OCHM^kW+gd&lAiS?-@AFEuu^?eWM$*gMI{b4m_2dc*i_s!27v>(ha-D|P}!XQoT$r*$$04Y7TaQgMal@i0E z2T=*O_iBs8LLTKGELw{-k{2V)FgMvqDJaA_N%g}aZ()pN#a9(E zxgNyK>h3)~x2&dudP=9w3dRCXKPD1bosAQ(d3j}(!AbPh)d-w6@N{9EG5fKW>d|G0jVnc^Js z<0dJ$8`FDkzX|O=<7(=vAfzFH6}7L)`r&XR6oCyNGGML2YXr&yB~V;3z2=IW^4QEW z=DQ&V-_5uzcd7&9pXH&u?rTED^%W>l_!8$vO-n^;!MoJ8TEU)2H{af>;>vji1rHX> z8%tp47(6>hJdKngUg7ZZtt-R`-cYp0n8F626965!TiG(^=*%^1p;P(1L$m?HL zAdpD8+tMr5^CK5|X+ma{l8#&SqP>K}M%nm-ePV2`4d)=uYf3{IAiJ}?TU6&f@A0y; zEn1dxNILfy;BF^42rf5!=axVH;diZ|b68r9%^t6_V=H-WuoJS2d=86~Pj{zQUE+?R zIODolxdQ$tzot)d4RcQ|$Dv-6=j$O^wn;`U4BSBBvU{1P3U$AlkBdyy-LB?I zAV%eguTS|7LB*wtw&i9{N5Jyzkn$ew7pC2H&he66h>IKkk}=S0->tR|JfSMxy_N-4 z2T>yF<2a2|MN@AN8w0I3^UomsBrajO6Ry^pSpM~#bVB2v+xF@fw_7L-<~ zIix9ayt(tS3W^a8RQhp5qLevG+XHgBIm~Z>2lp3?(r)S7Ni1tk(G{FD;jDA{sJQ~D z!6n$WoBgZ^lrJ}iuRj-#!-kRTKP?PUTZ+n+JvaMIZH#&3^ggjgJ*dg5z3^qm?sxG| zqO!l4lGp!{&IU8$@tQhEMNKrNtGupW6gNA#NZq7Q!}o**tU~7?{6i-e2-P z+g#Y$6h`!Y!I5+Cr4{5=jyQdU%>5-bQpeDL2uy>gQh$L`O^a_q&&-Nup)yN-3@PU$ zwX+ll%!EhzSM_AU3Z&IL4oS1mv;hkfu7OlZUhT)k1V!45Kf-QTIj^t4%fi(Sq89V9 zAIW$3dP~wcFP1ZC(6yP_>SB(!!o!V7wJ9oHCZ;6TxkcM11_<9f_*T$NzzW6)+8K{>~ zr-(xtpb@G#Pi@WPcl5DH`&@jmdms0{=`PNw{W7XM9}=O?rZFXs0R#*|XcoVCaI9z& z!cI9PLXgV2nf~D@y9A&57?eFP?xwwkmRdvUQcy}nDapiB`3o?&D<||lj_VV(Emm~A z8tF!5GUu_?VL(~SnsDQ6*5mG9IjoHE#qoEY+|7{jc7Ga)1^*i>_nxh@U}~Mq%zi9O z?Zd*gnJimM6AjgxMF<}nArpG}^@Y!Fd|^bbDCdc3p{m4AiiOG*RPh64;r7q$WT$>| z)NFBG@^as~~G||mzLtFQf z8K+2b4*yl)AD6Da^WmbESsycZe$jo;-m)t3T0^}6n~g_d^jh{pv66f3RW9@Vp;#2O zs2kon=>HE=(fgpH78P|7v%^@7CS0wab$)!pX`$exy>ER8D}-Cnzr!vtfax@{R`tTQZM@t|B9JMPh3!;`#>yL2#F(hf<0r#c%?hzbbnB^T#4$INo zV;KI`pTLY(M=-fpr6Ly+@s0a099O*KV@qAJy-yIUl^~VnwP?L)JMi+9fPg>@kgAx) zzO}_@j5h}@)SRRfeZ?W@Q+?wZz;Mpr6?%k(ZRlf2rQgZPi%gw`CXjc%D?R90FqhCT zx)=`+lihsb{Z#ISEVph$HYPLwSifVp=dnxcrbsT-LS`A5($I6;su$7BZ)7=>n_^2K zi65=tc7!xT+S~NX=y%ji^$E4Hr5d`MAS*Y&3##4<=Uk76aB`e&e%iWQ@d+#Ey#xM6 zyY?Q!404b@3rr+Cum;UI?QSM)iU(N}yU)y0?*5(%L{6f_b_KH7rrw&gCcY0A{4*~P zC{doD=0Km7VUu94Jab<*O#Op^ZS!4?PTdcJND-cb@-D~C%E*N7mFZ4a|3`6oB}yuq zSA5UWB<3OIK4T~mf1-5~F@G%ZSq7nK{R4=jelht4q3yuZl%BG1RNVjtce62@S?LE# zdqGhy8sio{c%x)(p40W2M)uux$lJ?`aTXuoTfZw`86pmgW##;Wvf00onUwTY?RA`% z%FBQq-V<_#hkGVY)pXS${cn)JPHA48xvy=ZkI_Op3}U|4SZ{9Zp6&YkH1T2?!h8vE z&YaIh@vm;?lG6$L%ILvG4%S5)tOP4j_tHfqpHuNDd@w@yvJu)u!tEBuJBH^AA2ccM zWlG?y*gkC2<#>tAZr1AEB&DPdl}H?hbafsPw_B+$%yr~q+-B5F6g z&{qqLv>=kYw>ePKzX26I>+G+Ya4v$HfVY0SjQusk72RuDgMouN;H z#e!EXLE&ON73E1;H=2;}gZe>4a=lrXpUHA^oppOr=(T?;lu7a-1p!(;`L2BL<3tP* zD}nS>kq5^a#W&1lJ7U_H+-a4tG|x_zd@py;A$Iitsz#?>n>4BD;NG*QT|;Hr33j1r zod;$UCojI1S#vc#;W35o?1b}%FcdW$-AXp3$Px{z>^>tNr(i%sl`g)&aPH^1nEV&O zYHc(vW^gO2rA^Re9wam#Ci}N+@Dn7~40D+NU{$((_qCL-5OY4Kf}XNVsm7Rm8on!M z<5oOLw`ye9=P4=0nG2OtYQRDfPJ4lbr*N|O!UZ}FAIZ5&%G2Gkf)wFBx@Tri#kFEn4GzJe8%lX~LIo^j#XYXi7CX ztZD>X@oq)qqe$nDn&51BvlnMkX5rB?X2oX4RH?p)*eiZ5SA0t#4Hz7lNoyz|;=OWr zb?RMXs?%Izd&*21N6o3XI;6F-#mD?H27>)TSBbyhOQQN7PUL`TfHZ!nM9fQ{?7PnG z&_?1`+6wX5(}Qg z>SlxZt`b`gUo3!D!A8Kv=U=2Ssvg87eHxg{9_1SrL_$tqigW3ph0*xM&FtXSz!{DR zlx}8|(gl@dV(F?WEr+Q}b_s%)H`Q18`IySC*VBBECejsLp-F* zfkN%{)N1KGPBSmMUSW?sYl-Tv0><~m_Iwrb5xTHjpB^Kvs#Drk39V!~Zf}5d>P*|W zrjN)b*t@&X#UmBQvgsZAR@v~4>4nFm!KpM>l?B}eC3nfP4)#O(U|0cjdTNMo8?uRuk= z@!OjRCkjM{vC$16%dhqCwpv2}cKAmPIkn@KZur+|Ulr;7A}{qQMXP%2Dou10*nByy zdi#wugX8n4GHF)-Q|Zgz!xW%0S`hOYet1{3S*1?8Cg}E&mMc;1xlmIkfNw0M1Wsnu z3O-_RpB8R730)jz4%2Nw`$Cv!r=Z0$o0z^La)f3=Xe8+ww4`bdGIO|!01LgDKO)o0 zBIN1C6C)u&7i``>1I<6w)fZHfu&=tXN76d&MzZo|yEfKsBAGuz8=cnqQ^_fwlMv9*w^|=Ynvdl&TP&7a3>n%!Q&zM;0 zqrEIkcMJn)a`$*D`b@i9eiA5=NH64iTq1E)0NJI&=4+JJT z>~}op_CQ%)G)sHNlkp9NmF+3KQW(ueD5@wyo$^P`xZf5jMO`=WP2$+EsKQKI7-*s& zMoAlg_vvYBCuj{Cf*d=?YJC{hiUs7GWp?cP=Y0`U(ByITlBFv)x3$EiwnAyLvDRrm zS#gW!bfX*RperWX4gJoCuj% z``Wc0fYgJlQ@9I8)63RP%p@QFa~_{x+ogndaUfWC2hwWX=Ue9k@ zgxhBb?$oy+{SVnt<)e5L_$eD{Y9d=_Ul%)9LVgSBM*1qk+^I$$k#I*aIm6OI{KxKK z)gXSb(>~pUcRb4xIe^$=cM$&&=?`;rc}R(!OOq~zM&lBBq%$p109wl&l?dr#&w7`4 zS)lED-#-DTuVZ4#D^5D`cKZ;~*@`#)&>vjftzQT=MmGg?r~ZcE5l19zdiRp0x8xV3 zIr+svh0CPBss{VQ65z)5d{AAIH@-@{!+NNv)h_G1mFf2(ty0}f7(4xVy;6V+5|@Vo zWBDT~7m%3$Lki~}6h1A+l=PysvwCE3zeU9&$7py(&w6kj)mQ2p2-+Ag;EjE>T9U>1suNpeNBVo-fg5hzUWh7 zGXW<7*b6yXw8*<2LpZ^*2Gs{Skha$KZ&6JTkUH`Gdhk3SeYi&Mw`!6{c}>d4Q<+jt zq<7ocV=j3zZZ%|MkmHm>K6XsFtzj+>z&(ZWjee75hLB<1)1=W-tnP3gB3F;f{Y4LT z2j^1OnhE2(Gn0a)hnoqmhg!V7<0hOQwgIOkPP_!l6TI8EXR^WK9bF*OKBJm*&g~gC zw?2KjROtej3fAoD9OsOB?c?(IihMoDVFVy>>mECrkj~*mz4HRlEbSpVqrSURUhuoC zgi)-)3e-p%fF=0b4vLUI1>gK7nG`GUIh8$nf;8-%{uHDYPRgCALAjN0knaj=<04QX zcQr9vTUn5|xLopdLTdB@R-sPo3@hn~w2J#}|MOl0w2{w{CKi<#f9xS8nHfreCQd~<=^{MGh%QE&6aMx+ zyq^v9(MQt~1U&>%YwIf$afpY3GbICVrEj6@Ed;4sKb7|+tAKRC<;AQOBwpkynr%>_ zt$#koLHa63#He#M6OdRS8KSS?eZC*b0sh352(_c6vb<&YY0H24(sXG8Y3;NFg;mB} zkIy8Ks;Y+$vE|FaUcOpiF1h>-UM@q<2IrdP5tlr+P0sDfS;i3{A>6KJaL6OM35wE} ze3=QP6)D-uC~I7z1N@yiY)e~oonMAQ>ykwCBP_vF%FAAi&CpdI^hFz^U@T7=UC7tx zCMu+~C+6uA^~4H9xRjl5$a}qPD}_&URLqWd2y%S znaVeZ&wnbP`{CvygyGnV)jO3_hIm<*A|Q+{FYKBB@Nhp2$#87iB8H(8`( z-3r><)cQ^fZOZ2ma>$ymr8x;Vm#<(7Gt0_me*2f6rHB`ZAvC&8lxFr&FDyXY4r%+B z`2gUy;)sunhY!=J49OpZBc_+W1zq(yztCZ$d;J)|pj8_r4TTo&ZFD*U>3*er=`?Fb zD&*qhK(L7%we@>8r4j^VKh8_Jj=NPZ8gNb=n?=b-|2L^%UK?5s(a)rG}I%n%@5;&xDP;1|yS{_t=EQ1{Ly_gBxkbHClUf`$%*d23MrSQ_`O zV@Pb!xTSB75262c!~yewuaIaE^}=yDii8jq0uIO65hAkp)UD9L)d;S(O$!CNqc|jQ zE+C*XAbt~yJ_bY3lT%Gw4Iw}%Y!LZ{t%b9(!}ykej*l7W^029t`FnonAsn?f8thA6 z?mY^sd~O^G$X*)Wh@zbQFen5ZC+iwPk{-4nfk8EbJJQICLyGuP z;gwvSB+yTAy*VtJ<)^eG_~2~XAxrHVFI1d-6!|EXi&p}A)W>} zl%%}6G>c5fEqV?o(S)%Od0)2ytA%bCj9p=5Gb4jF;U^d z;x?E6j+pQja4&@1O{L5<1bEx+f^7HYYs!B)9-^=h?0GIt_ATy-;O)qFS9Ig{N2m9Wd4C6L#acX_zh5i!g!rsYExC<4%C+;u)91H0ifjNg{pWAfkJS_zOXc zZZ>^C_FQy6p11t#iRVqFv7-i%S)fEY;fmR6pUa(xq8}rP9y~uVjrT2~O(nnEh&#a6 z&i@@j)d^Hc?J@U75_0)v+FuFDuv(?yZU}k}u(fHT?I7Mbo3nceX(%IXAL=0P>`+Oi z7$W=U88;5>A}3^*?zhDKV-pZQ>9N!&&H&7eFg5ssf z2;w)!jCbMS4$2Syu!Ac$9#2_+?giEmiEMR5ui~D!MK6#WeM^(>XFe$)u-AB9ue-uC z8mw46VI?U$7*||+!@v$MirUNnWs}i=`{daiV_ioXJ?rPQlzw)tNw`{%)<(WnGdF!u z1Xm23yZhIVkD$`g!D-T%BAUAQGdw49J}~*czWOe#?lIlPms{q5rr(C)MvE{>Y9sm9==R2ZW13eAYm$r6?c>)m6<`FdiWB)wbhgU|cQH$=?8O{e3Wej|^h>R3I zRcxlGe!gOm8{W`i5L5FES8zWCg?^0e%Ob%+dc1w1Ak60Tsm-Q9F6z5z5Zm|43EXFtqC7KvlCS|J z4{(Kr5BB+nlwC-&$1~#p*++;(k)zNoRp|!Xj);Y$j2our^uqYwD$2>p?c3wrG&Fqh zB+eo!LANm#z0A1dZCOxk=DoCcBDOb>1OQH*3nSUG)6f7dn@H1T)epf2Rgeb~1` zBY5Q|WlePJRg=_v19+`jTbl{4IgK1&o#+V<@r^2eEDDokgqccS(J5n?n@r|EhcmHs z=-eCq`Axmajn_5|lS&wyjd!&=?1z$kDRO9JMl+dxc+3AKDZhL0W&?(C-42Q#D3HKN%(5- zu^y{R2Ql$dnjV$y=Ov?ur4OmP@Zo4)T`Ro*kiBE@A9LGWTrfFkWpK=u2t3iDl}Uik zOiah|t)7{TE?yv1lm0#B2t|-KYWO2R&tEr^|0o&de4P~%ia)CH`p%4_0Wc;i1opxz z*$BRor=h8#QRv06a~E)t#e)5NCq&+wxy0&G-%U>DxIR z6+%XRe;<9h6|`{Q=-{AHRhdYL#UtZ{W!Qm(6X^a&znAy_!;<#*C7Bws$1m3hRQ1Ky zh6q$8q9r7MuNZ#)myqyZ>EqQ-WH55&Jq<640?+0dfJmO293$ox2Je)eBG!;$1taacw3b|MT_Q|{x;w8r0qpc-{ z=m0=9b`fmDA3hiXV^0HFE5ErE5HL%9->VZi&-1gOTUkzAs6^wa%J020x&hYyxTv_A z^!GI2mk|YWIsIv0O*9-`PUhm`N}fwtj{NpZu|K}pZiokns-Rxi3}R@G;Rx)voc3}9 zJ9tAKG=%%iHiEs(aQO@sf246!#<2T+hK%bqhXU&`gS&7iqjVG&U_erK>#w+CqzW^w z$)B!1jQb4P`vAlklGV7RHQ#-c*5nR+acMF-Ah9M3=Ox%;xM%pVamKJpZaeJqbgsuu&HIKSgp-n%%r{G-1QzJP)=*WVi|;|7bu9N+Qa z_oW}dJU~m{a4C7BMISI=R`^|L;m zF!QLQv;+6!-!_c=BGUx1+~6R6M5Zu_m_6^?%+JK$CJb3m`-Oh%0TBjKgxivKf6VUZ ze@65Z5Zm_N&U@`vv+ePuyB(0-lE=A>_!@;i2qOE4?s+cbvh-%(T1uR1GFM=gTwI%U za&8_e&b=|PCbo`8CvcbYYkAm2bQ8k{GrmcyItOB?c#q-eu^$s?;6aV!554du@)H@*n7VSF=UL)S2j;)6<2p-_ z{`>o|SI3i-u0B#M+1+r_gmTr$_2cWuucxbV+`G#L=)$*4e-#*`Bmtb6tATEYBv&zr zfAiC6eeoxuun#6Tr!WvQN91(v_3h&}#5fXqd;~V*iR!bwKUDn3f1(P}r}cdaWZNVe z!0E2f8{Eee5E{OEJ#XY;P(6FZ<*wg@`&l4D(M3P}ed!lcGS+Dk_`8Z8 z@$VO}J1}pQVjuhk-#XMO0nwWMO{*<97k_#@jYN_1AEd*CK}9vz9!K>Z5XwRK*FApk zhYm%b4IO&LH%o442JMU2LWeB=!CC*01*seW=O&;bl%k;LGNhR-JQPdClPOgS!UVG*%R0G8_%)Tk1VITfTXaR)SDF+rmBU2cZTHMf`tRj26DCYOe~VhR2us_YBlgD#R0sy^`_o)K z6LF|SI(;asO{`DT0UYNlVb6H=!joi`~j5eB5OauOT<|J`d6xs9yr?0nuKmFsE z3n2_~DHM%ieKA+jztKa z-4cz+uS>Wx-Z+MQs`PV&GZ z{6}$C@DXfOF@<*E4*~z2u3yAGE^K{6L&L#Ng!3E^9cf8a*_jJy3U(4Q2%Wy2|J0;_<);J-J7YNXmKP@1~V_C z^sM-g)uc0m-jXC%(3o;5)R-{1ikTFJXzlww0vDihL;C(lIDq!+j}gqOzDZw2GHWynaz717lbpKk`cf7sD#NE+MrE=fJ( zt-sSzL!1Zv=s;dy)H%=n#j&Fv-(zX2Z$oW67-S@{9&xFaWd}(u*&jyqlcvcYP{`UR z!4MN{f%>(OZ3O#)jyd8l{O8aL)Y}Q@to7x)qi%w-I6wkLbN|bwm7nJL!`hCp@)~}k zoq^J4h|}6~a7=&(N4(k4?oC1SbHElwle|kC;YJTek6$R=_InTc5h{I^6$t4ICosRBVX^bOaP|Pb6V#WZ zv}M{1|8o;<;iqi_FkQIY=HNh-wdcp0sgl4 zH4di#f+Ii5?QS~}A3d|QdT;4!iXh6*KQEB?6E_p-OEqss>88d1kFGP1hq7(|e~Gdr zDoY_tDsZSYvP43%?@h8K>&Pg^zWk2szSDg_ z&-45K*K5ekT-SM>=dpY~?_E%k5qcJ=M4!(>!_^Yn?~-6; zOuvFJ?0PN#>Os>W=;tH>zEAK3u2v&-if@2i&O0AE*w^hjc5((xGB&;tubTp!-YAHu zUYS^fg!mNV!f@N6NYOYn1Dcd55ZMi^+yV&h=_dH)CYFJTM*pqb8XpQMUjqAcbZuY= z^4+c=#hJ*+t1(yho}XDTsLFUVA#fbKTu(Ys(0Flx8eG${F^z>NaxksNyYRTpz)Wven`~R+lNF#|2W|-%H zS08^6%~LZqab5EH>ylyW4rx*XAyPf9d}ZXN+wY}jJ%A!Kd?^K0C)n!*x6w=Mt7u{N z>jso1Xxt<~UJ^M0hMbJF(=|VJ(ljO0Vw~06j5MW&X!Ai+c)ErsUyOked54)>R|1Pa zS^*tt4bi$d=GS>)T*Dw_?B0LY5YQQo2%gUomGEH&K%UB(Uj@@|g5hM}TJ#--5*o() zyr9W`RiT%T(#`kv6YP05i|i=2NIY01wsvTSFTnPW+~m?3vF?8-oy9b(T}N}+>i$0= z#Qr^kv}c@4`g)h%-*{>k%sE;=;@m-vsHlD{IoYv1syF-m>AUPJZ)|zjU!_S;{RB^T z6rf<LTt&IxChaAlYb{<&L|zJat9`Iv zmAk_}w5jIv6%yEr`=@{`V~i?5_ym$bv3<0ZyRn=*b?I=*p_gLjfhBA6UtT74j8!3g zt@bwYtmE{89tdCVIRankcJ%ZM`%t5Z>_{4L&U8jZ-xp9A5Ayz#bSlK#!0Qq}+Qpsz z=N?OR{nI&G4h_bLsAqEE_x>JoCkHpPCkb4)68((mEQC;0r7fAx^7{4qFL=i6yK+O)BsXN>~0IlG30sn#Fm>Xz1Ucfma*%w2WPVk%^ePPp) z`r`D!tq$brumrf*NdTTYe^B;<=ONoX2)<*>6qr=m7x=mZMt_0O`Z)*#`au3uDM*gR z=#LOMO~hauh|){=9-KzyD(5_2(5i$x;2qO!+~vSHhPveP1^*^ew)=&+6&FqhVc#_M zmAGhtqecUy0+)b2T)yEA1l&2)9k%thjp^>}=!N|1u@CT(EaqCkRB8(raoJe_<9Kd> z;d9L)zwy+k=g2`LJKuY#*;P7q7mdqk<4I7Dyj>c)(EchXXVMHD*36O5uG%=U!v3p$ z1`eUZ{84rKMLO(+^2V9C7EWHrC-mA;zVc7&Z;#Xd6O;V^tD?vqv);ZHKjUCyW77m@ z;b_YnjPOkf93jkk`fG*H%IGKQ7I#e)ve7;q&8q|PVk4|U69Lbk69_c3>0}+x6OT%p zshO<&=2M`{^J!~E_j_2lz$H#|s%Mr?1u)+S1nSsbg7{EWqwm>#WH18q$ml)cobmRA z)g55%YEvxY}m~%bjrcQjaJEgy?V&~d3fx+do1#X34 zBYF;2)SlyootLj0h3Ncs;t`RsHL7$jRH0x?Lb&yzuzvz}>z#0QLtlmcyGZf-f^uoc zUkE&aoe&)E@8T;E_6#&Ph|_%z7YIUxtBAYol&0)du|BmT%Sv1|3Lv$=k2nHPf@CHvr0vg4?j^I6$VEA%3hD}Z&% zw59=$XUIfX<}*E;>H#nvn%w}Wtla?k3d8h94iPgPa4%ac@ftWGw)2-I?|Vt$K5Bb> z=$c#Bri*bFKBYp9`yQip=7&T+SR9UiKH_tPe+FP5QQKI~iWQ_ucI&_2$&GH@$s^V3 zEzyaB~`t#{%YSfnEtQ+f>SM3R^n zL;KmDVyut$7AP8Vf3PXsEd^$qmyvv_T5&ggf%4sD$*6RKl7&~NLxJN zx?;fh^l|6v_+}If?Kp0l)l@NVa*g0W;lb}K93Mc-I22|inkFxYR?iDkP-CXFIYUoz zahqGzdtSnb8&1U2nbIfAs7F5{W}IcxUQ;L3ybx4!HUH;m2}j^HZNK;TUtiX&91S*^v}3 zS2{^qf;*Zo4LHijM{Ot9AA3}Gc&7yN~clYokOz>#U`7>n@5XI-wF* z$C)$G);6?u+TaJRtJa8fiOtE}=aX;>OJ5V1asq>L6j0k<-(5dlxfCi5QxLK){sJkY zH+Kw(-foONUK=^#cM`mJ){RG+`puVa_1lA;xH|x$M>jM7?0RmarWU*e!RiEI&zu&V z6B-~dEB6C9g$1J6v^7FY`b>lMb`;zcD6ST`SQhIbu2v131w?>2NH8l)CQ_rbhF+TO zXyO@VU^<;16)K#!yO2J&a=mo_$*Z@id7v}=zyp+ZjIqAoI@xrCghUJ~@`gYGFY8gq zcDT9u7$Ol)6G%8YyHv(na7*ziDM#jiUf}US&QW-yFQ2;r{ASs$ZxITXrWRV<%?#BP zL1xW36)=lOUx9ls%xjnL{%~&PrLY)k41{$rz2&$=fnUpm2wa7S)w9nMQ}fTlU?EzO47$^}#3xi;fZZB~vUlqP7tFsQiqTCX22$tCjo8!Uri)K7$z1K=I8r=e?$5(_i z(p7fpS;c#$(_W-ih^Lh567TU7cV4#DNnyaL(A$-&$*nJ1oN#?IuQ5ry>I|yj7j=Ao zedz}qAQN)qeoMr>oc%?%_-hB;EL!zvYJ|`qjHGO zfGSObeUHSPbz68sT$gg1Xf_AXCXZZZpO&_w6&pH}2D358=UKWA#r0diBLDq4$ zyR$3#mrLk0`L2gjh&r1gpOrbfEwQf2!o4O+TURW$4OERc%?SJ zS`Vj0d3^8j`)L=*Uebm$X2~fCFiILXZ#{4!G{JgneGT|($iNfhz{&t}f zn)q{wI5tTtOP){KIeEXNZ3NW{@g!sP8!BHJPeazGoTyczCrN-Ezwc5sI(xRF%OH|V zu+PS& zG(|@Y6gfUMGC2!w?b8S!}&?3F$8SQljlO1t9u?Poq=xPiAgWm(Ip@f zyR`b4pb*m^4WW*o2j!>y%IB6wusKl5>(mON9?j)IP$rVI1f}@{*_~f!iCifyI+ApP*dNJ- zlFJ+A?N)Kq^3(*eplNxZ6Q2R$+EB4ES6U1_u~3r@r>N1a7H4DekEuPKFq&e>hb8Ky z#hufq=`JD@9amOd*uN<40rNy6?+G9iO%Ye7j^f|gZK0}2>6COn}E<#bS<_NFWT zr%?gfA|$zy!)N~|FtS$PN_R%$=g#0WFme?NbGd$dr|Q@7Q@Mwe+?swxrOHkv9FvIy z1XOqQDC7oia!psXg1V1-;+*p&agSOpdy6%=RdE;v?)8431!D(`bhMkT#T-1#4hT}H z*jR-(?O`zbsogHj56Ed;Z`AMfRw_l`L>=C3@6`@o98-sBSOJM?KZK>?wjQWloMpn}5MzH*9 zLPU;II8IDKfVq4fkJCs*MUBf<0yU`-*3@K{QOe2?Q2D7!x+4;F`{v}*wp`d)Un^gc z_XwX@(~j;+LY+otm6*(&%8GQUTwZZ0AFz4v+gXSozQ;8?us(m9l0O26y0DCKLb%en z*2bG^7Gz+G^rykIY63KL4~=jEs{`S4yu^2b{VO=3Y%vZ3YHqE8%C&RFHz5E4Oqu9s zvL^1_rMqC&x3E-Q;uU^C)~mpMAd-Y)__~yvGSdQQ@Msq6b*BB3x5|03KI#HR#To;O zYzKE%6n%?raiafIOn>PXkgUu{{3Y;&kJn60l#G7Bw;BB?@V|uB*j^&~fbq{uo5}j+ zgAac??x+fj=u_KyC{}^=o|iPm+gFqO!e(1aEDf(ckv1{(yLt4Ff{bav!`v;b@|*4} z=}(o%*XHBs+o(PI7R(M0KsWKT^?+Iv)X z?!C}F>x%cZ-J4_=E2FO%1|dSfbQ#$HK7&1Qzf-V-tY~q-Kv8Xx?TK~u6Huq9Xj2vY zqB?2jN2{0ohu)7WWXUgrZd z{0d9YN=WRt3K+mz^CRbzlb3Uu5=W$jES)L$FD8xLF+j2&oh!6|+y6+7cLFDquELb* zkwjL*OK5pL@CwZg49*s6O=CQc(TI~LT|422pGQWCJX^T(GM9Otcr!u4Sij?_?-#Q2 z5aEfY%*Qe?|6|_&oX&KvKEs$K$Ul_3nh(&n{pD%joi~>Drqw1a4CS@luiirCYV$S* zCSR6C&gjb5%?$H==HS+xd(u(4u%!>k8*#R+;v`iUFjXx#vuM!ES*>H;aQF_=N#o5A z5I(V?=S1>sK0KmN)=&sw2^ETKQS&PallCCU(ch}6`6l*fOAL4nO~{Cv4^{uf z0Kfm$I!+Vd7_9f<(4kN>HC2v#=B0G)yz{N2()i?EXnedp)PKr1vKkmVQZ;6kSV-xd zU7}?(O8HgWxz8OKl}+)4($_0=m+g}Tw8RZywDcc+2lHFb%~RwDf#vab;kK*Zk;i$> zM$v(V35&oT5SDR+sO)F8FyUxE_}1jQ3t=4CH6T{|kR93hrNMxv9`%MiSh-HkwFKJ1 z>vesX_P)UEyc~e)mT*SQIAFuLBBoD%+1^&|=>%-&9{P2og+@r@tzS!A_*Q;<2Gb#{_@Ea#lfxQs{=s>;k8i_y$ZO;^mTStsR8T zIXC}Ho<=K?6u(jyle+RVJt^#UERp&1$Zdbdmo|HE#qbB1czfNe$QCjy1pk`r`g!*K zZ1j1BP}*+F?6jXS=$sk@?WxuNI{})d<$Wb{_C6qQU7h zA*01$q4wS|=B{G*e~k;GpZ5V&*9(MW^~tY$ySH8>|Ce62)?VjNBIOI)zSQ3Axm7T+ z-cz~m))tPVyMR->mZHv;a?9kUV>N>%3{ogEc^yS@XN+8fDCh(k&2UGReY9=)1H*2T zXu8-VqApl5-;UQY(hH4`=RQuzyG`?)!2e81YI&e&i+!H5kLnFT0+}2+Mdh|HO&gv| zTH3{p&iXV#tVnoxEhe?~VMU!vAwpK_AG;}S&fqst)pEPK#eSg|#YE>22GNs)|eA)idW!r)#2dEo?fY-Vo`fMn<#rCQ_NJyF*5qQCY~M`CDNRjNi3 zsV|H9C#jdmDkvVU3F`W`%$nU@uv?k904GWHfaO{Vx<^&k?TcR`{)uBBlbt4GX>aTn z3W*+<9+ME7M4B_EZ>lLtC9;lZ^j4+53sn-uM4PcYwr;o1&hXQ*%ag~q8bjEbt=xWR z_~ME3f^n;;;6Ony)B>EzoK`X~>xzkHM_!gK5nKDNszULBot>?=98oovc|-&n*W?zU`(Z}n{k zlfwJXB(QK<%_ZM5)bT$T?e_`qG@V=ReGjewJQ~8Wk4Ea zFHocc&KVZ&qoZMgX%YG^uMS`v)_`iT;GJ;WtYg%p+(F1O5har%f$MvP>rJ0uc{%M~ z?t2kmwP1z&+InYIYCy6&Cq;W4fcSb?hVxZ>3|rL$Hc+S|uKFA)Y6#@Z_vE&m87 zB@$sZsv6-&|5FBwG=?;{68`QB>`0dF8zM;2-*i+yegChe6?1@t@$OqqQcV_f8&YH# z7b1Bqza3a>HrRb=y3jQycC-RmBPy}K0CTV(A1E@yFtM)rJ~ryd>yFy%_=4^y8%e0M zi@DdooV&6JOb*i(n|MERC zaNMBC+f>WOz5FSg;}rzP9Q8OhkV3kfbx(=(&iKatS*!=)DmLQ-dnaa5BXVyiTQQf6 zYr2~hBZH7a663a_`TM4;)%E2hkx7PSqDPXMx`<(~r32x0!IFGdN&hycfV1WD%cq-y zsVNI|elzEAsexVW>V<7mb6oV9X4vr#_J`W^V-S7*B3eh1R=bK<$8=ZLG!^A=KKD9Y zTSOqhRo!|Yz>JyVy;APeM!Byq#>8vJkbA3@0hg4uACfPsYjzzZ^*nE#pPCFX77^WC zU8RJHHTyHeFzH2lxhDNXDIOflESdDF7T6D}iU;*N$}<;opn&rw^Jdb*)utAXTRPA6 z;+)n~#X`ukT9G;1ckzr9K7G5@Oy7=pBIdh#J10!yCq8~Wj%FwlKj;0#z};8*=hbrzQ?i+ z8cB!er0}=p;pl7Jt95p_S6{f_szvU-5Ht~i-WtbF8TZoiPANol7)L+B?N=Uyo`uDu z+19P3&UA%_ySoovf*#1d=C-{p5I?I~nUd<$kCnQUyR$!?Bq_Z8cr;qSMMvx!kXrKA z8*x8Wv}SMOEHJ(2y1h!vvzPCpUX7bD6csN7p`C1Bf?;XvEYLYVFj9(9O1JS-y74M5 z8Ob(JGSJUU(M31Pe%AIsnK5QRtzLe!_X!oLZzj!X=D`F*Kk;B5=ex6)8TH0_O`u?P zCY-_B>uvWaa^kyUk#mvcSMBRJXkPrE#D|c^<$mOrN-`*ce8(AjN4d>YrTOrzhF*pcm{Ar z4~Qe@Bz-gyNTFpAGkW|imb(l0(;PZ+32EfDi|m%zt>=ad3zZ@tr8^Y|>H>Q&IKMh{ zT9NJupr5y&U}d#O-0y8xT6s8FuDE#no-QfZ+>CEQ-8X@_mgieBQ4Cvr57VBbU2G=B zND_w_oIj^1W#=>~GX{(=W6WvG`6l!Um2+2;=OLmyBY$zYS&bsJtf_M&@?qY?Z!oxM zILN>(v44_?7MBBnQ};AhNI~nvo0qv7u~0vJ9h=rvD|5OR zU{>5@zaA$8&M%glc#g=Vv*FUC`^bHT7d)Y0jVg^D-GDh|(isIIIITVwU4hm+cKC1Z zxFGG|9O*l<6DBU&QcCEeL5`uU`mxQ5@2h(9(Stc{)ZHbNXle|?& z)o>FSF$L~4e3wQMZ2ZklZ8=2woY3}^#$%Jcz|}A`wIj^>0vFz!B3*dHoKolITv@%v zT*N-#x6=q4s*rGwjwP^4peJ=+DUR{fM87Cj$(b<3NO{Cqcuw-G14iJSA0JmG@$~rY zG~jBAwinOIWwBoQYgsCy$~kvLS1zkOuLDU$d7cHf!%l6hm!I)}|<>`C~Tc5dHXja@3 zK#v@?2k9j$6si-#(Kv_op^Zw{XM5~!UAFwkg{6H5j@?tOQgr_l&=+{GO_($GH< z-e>=4U0$k2DuRxoK1}Doj}Du(`A+h`qN!83RWI^>`a1TrB6p)?OFGAVr(=^M_kDcy zfkGkU=o5uD1KkqqN1hqI9#>-&vbJ}MUQNC9;wsDy6QD#ut>MjR)egSnGR9h8;a!{F zuDXX}xC4!w*t-RJa%X{_KedwHH!kG>Xbuh*W8>Z&NHtN=>(6$jINWwlzm7YLndQdeW35r znRZJIRFA}>LS8%K+q*qWbNOI&*uB6@m>&1FRj5r#0CXmaT1U7ii18s;6ySC-p3xc- zA>6SSnH1NK9y(DxNRtl(fxOcE0G!kBt*P%){$_dLO?Cf5pbyyx@NK!LVUXhOg+o2d zljZ8QRk}YG&fxa9mGjwqX-pKdcNLfqP^EEFdgS<)hi23eq%O|`11h^Oj;{pu%dcwS zK770Fv^q9^Cd6bmXJ!CsG}dgocWsI9T%nApKSo^f0+Odoe;P3?;azgIXwk!mr~;v# zZ1#M9tFJyo<0M!oY^A+Qf14l++7A!3Yn{S(R@zhiV)8R5-|c%+PkHI-SN|?i z{M_bw&VL=!lFLQ>JO61LZXWT8bf+$tzxOuyf>^QW-OuF;sqyn8D;DK>xfJhKD4?_T zUUjYC#z|6fZqq_eD)Z;{G;g~nI4d64VOKHMz^Z`7?Bk5IMf zGTeUtEXIpQZ<52Am@X{}>y3vjo^r=EVpR5)>*7U@A3oRuy^G8&8zG+LhYzzSv7P_X+eZ7-Q*0ffNI7fu_tI=a>y!yQPG^;j*Uy`_6}6k z^Wkf_Fih7r!+gAdZtXsM9~94+mzEbSFbg{Ef|Ew(!BQHE5(K_#)ScT`gt8o=h3wAI zVH1k;5y(=(X=1w4p~!lqnXSxXRp8*71KoB_xux%kvxONO5jvW$<_((`-ohZikJimq zMGdIT+rFcmVeYspp;aSFxq-VL`-vVdQvat^&6krBjA5n}W+LOYO#cyi-CW5~fvgOr zNV!a-Iwb1oUr7rTuS#9u@Da5;n?u1YYg%NQd@t2=We)YMxx3p~^jXVT6U-=};7&W^ ze4TwLg47CKf7PzGL{v2acSdNZi8&`ht{ZUt`IE#!#PPs)9JQX zFTbH?GDmv_dL{%a^w|aDRduQUAl>ooI#>{5+DuB{ulqrvr)#?G51JeLf1ew4I`oHmHF^pFZH)PUh`8nKa?WTY)4<=A2Pp9?RI+e z^`9;wRA4um~gvdZQfGwa#T{#-9q#Sey68R>lw?}-N&*CBXA zOBle={C*90>D&B^PA3R%pCfX^tgjh~>MsolVEEQZyp)Dj|_S#wTu>Y-ly1XWy@6K<3tHrM+HGOO{sOiA~9zpF!QKLMmP z*WKA|H1K9MF5lezQuzSwOQX2D(sk$>k?9iH5?{!A7wHkyhg}J0UBi@){3@bzm^*N8 z;Nv%mY{eK`!AUZ(w#48YXk8dkk=!LMoIcFR@L~V4I)2cQ-!~!>h#o3bH01uoq!E3C>gsIx1#}4y&|6mwV~D(jVfN#B_)>TLt)7m2 zYQ(Ji7tn_sdM~(cmwwPcR>IVl`_#3&oZrq9KciVzR&?DWLBgeKqKPvqxFBC#(pi}B zrRd`-6hh{dWQ$VM;_bGTXF?^wN>%F?i(QU)l4IDkAAV0aDS{-i0g|=cDSzdRp8GDh z^sT?ar<_N9hNNj%vqrvGV5#r4ZBFRC*V<D&6ksO^KQF0089QoP~7o&ysE%+v-`8l%qu(ZuE;QCeq%4oo%=kR%RgHEI=hnG zn@$vh8IR?9FAg`aU}m_ynrzG8x{*iM&YNcpE~HdT7u{35;O95vccQ?v#?$nEsTO{% z=*^pGii;`YX6Fu`XLrQ?2IzY(650nDBD6 z-T&w${dM?9y3UX!9IFrXy`#tKA-hjgU!k-XZm*G6AJRDr^_^xEppE;L%e+SF+;2}} zJL5LlDcZ$Zkl_Gx0AF#cbnOp}UI(?GcRn;?udLi<++9;jvc(%C zowf%%ZN@Wx<)GNteUZ!~_iB)xONHG|13lc^R-c$V9PF`e%S2*mHXbJXjKUBk`f@e) zdyn2`fB2f^a7M4E90VHoeGA~wc#g!52T4e?*Se#kQW>|MQ7+UZk`2pnU+YMr1^77fHo*RLB* zn_7V2T^FN=l9cwlj5Sl`PTNOj8Z8*>Uw=I4&vwJL{24OCdlXhjcrtE;4qwEK);q%F zm(HIxn>;|NCSLC)YAn`q=u$5VFcQR*qLkd1&oZJ7uRqFbiKzysmlMD!&lwlZj$znS z10!pU27>K5ciz#grcq;brULmsKl*T`{J6MgTpcD-Gf^pT5;6h^iP*oqaf~Ov8(&Er z-LLCY)BTEgcYH0klVWy+%ogSNlVO?m-#8^>kQ#uGl zUoRx(F$a_8h-u@TQAEk{N}wKId_~FZ?(R-x9yd2hskUo zy(hhf7`{4buEPn{h_oenxV#zxAuBF!+nJuI@uTXAWlJo+2pT-~WISx~qe!^GJ4WJ3 zWnQb=5{9a^UbENWen-WwAVUJi>zv~9a<^0gs+LPSu6ipHae97#r1l^do9kC+Q`Erf z)STIUGUpx|)1&j@HZsFwPK;2Dh@HtNiOai-cI$ID7RCY}JW%hXHE2v;+GV=6uR#Zx zSl(}ivN7MfkDA%^p6Kz-A*X>M$7ywl#ZL@KxZ}Tu^A_rgqkmm|xez1mqGUQ;HF+py z-gM$8Z1~}x5DeWf6*@ANFiB_LCG6TF7|{vhkJh0IXk<3Gyn#AV+h4-sWD>9_o7HH*;7*4oiFEQtZda)rj)ED;LYL_YQ2srXVpit3#t1`L!|nXLCjVc&ixG(kVqDtZ(IWs@dJ#!jzpPr?n~&lsCM# zHfdBowoDnxbG#&m-D8#GG;KxDVSB0%?65;D;iYeI*Wa_xJeaz*zLik~^uYH`(hFv^ zVq6dwSU!CGI_U@tR?sY-u$Qu^-I=KZ#`O*5aa$g3dj3`(8d|w1is0m@emX5O0YcMTA$n#ObR;hiAj=1A(sUL!N$q3GIkq&1_TPGD)i&ZI9 z!zni!TG)v!B+oGw*#mD|b7QalAm$`iR4biy?s_spjravC^F+5cj9aDZv+pSQTnq!n zy5(?%3X!IP8eMx3WO?GodZy`GLn%gY2Z~=D9Actlw_q3T-(Dc4HIHB!n;_HYwVSRA zf=k8ZzzdPJ!??~O^}DBRfSibi1!g8OBDJX+!ssrz z$eI=P8pdOL;w+(_>qrP@W4!`1;t!@J-BUPGx%%3D3hK$9Jy+p*Yd@mRF;%bo=$u8E z;a26LKoAV4q#J1Vdf$G)cK&?ngl`wQr5hR(2a+absP<6VNVzSWsu698WFlh{bNx9B zi|J^eWmgvn;SE_jBv%EP2nX5b?#{*zQ)ZBQ#f^u0V)eS18oy~UK3G8knv=X}qQBI28d8*T1Fh~m ze6-r!<3N9U&^G<0d^n@G;5n$vwDhihkk%G>gP0G_E{`YmTZNDdQ+l45=z+H6?lDkH z#qL!TXuU*pTQ16`@WnsX-ao4E|B9p}QWVrP|Fos-4)wJ~ygBkjGXv=w00k6=5=GhF zo3q!7>{j%8JP(_O`BAOdDKsY z4qb%S&szF8HvA+~n+QdhWFD-8*3KASTX$R~T71OTE-DU9Als0MtwVm3%L71{uIImm zr5{gzqIn7#yU~_{w@VHJJ&-)%hLRq=!Z226pGpGl2I;nlk~FtFe$8gK68-24`Von! z)%fyE57U-c6@!8_pD>_yeEV<#%ei=7o7EF%Y-(-kBVv zN}>%T<3Nfz@K>yNMCYzG27g*0!Rk-(h7Vwo_Ni1Y-WZj5!%{6gC-b;dZ@Q zy?w$wNqO-iuKJ55GztbCmksni)V8LXIbIrwgKl|s{l!J){PiH?9-Z;;K<=D4i;Yu` zwtpQ$m01=|X*o#u0t9F}lr`MIfjQ~HWz9rJ(dA1(IUOrwt%m`VdUF+LUB|Vp%=CtJ z*^_OH@*0jIkFNBOf)1jm=dx;i0@AYRbUwA8cF*3KHPJqk*!s&9;9kLkrIOkz- ztZr`TJ<`;eZ56YR*W~Ni8^#wpwqKIHPNL2^QV;Zf&(sLd*%E@Dvwc!jy@#Z|0%c{- z1P0KqtOjamjn20WL1FlfoT71b9dtu2vh9bWD$)EmV0uu0N2&MRIL7rkIb+Nz^CX*0 zr|KHQF1;AZ=;uavfKzvWd@CZ);o#D59sN>$)OX|BwWOvuRT?Yi(Lb5S?A}vgF7K6J zPpEDcm0p)h7UI!Ow(l1pX*cau5fgg|2|Pqu4iIh$kVgPYN`pkEk!K?G$JW5!g(h0> zPP6GKMvvFx8NZDC#J7*;-aXg@X-M7Y6!dZ7xrCnV{$RF)o}gJR3#}`XY_l`y+V7f6 z_-g9a^b$tYc9VJGh*Z@|V6iI=s~sdo@Xgn3K^nPF?vgX+p_CKX zws(d6xf;bKSn~=3b(LLO<{ShAdyUx+ui`+(br-hQJp9?7@>Fjn?m@RWJDsvjQp7Z0 z=m(t7g`P7Y&wq=(!T43~mkkZ3*VoN|2L$d7DZuVM1EY4$jBFzGaZB>1XjC;H9 z(7XLzIzfBiH`qg|yE>lyGR7U4i72GuJ=xBwNwA4Cb4MQHF(uRwoN8@R6gojE@OzY#% z`Ot3qD?b;wb*blY^7dW3G|&F5NeoR+$$3Qf_{wQ;Vq-f7&);=t0_LT06L!s=bJ6Q~ zMM{=~Fo!Hq#hgsKo!L}uuEE{@%)mAyIaR+i9DU2~i6^#WbP!Qym3VgFTt{35$VjmC zoI`%RJ0EBKx}WvL7Qo(9pF3{j4ru$I%f%T&Q2Rw@z}?7#@lHx$9Za!NX|_G*Ws2*H zN{)&xtpru7!Q1<@r5d(8=4-b-i;4SaS31^+JxSS-uU{A{D)_PAb{@@b*t51wokfKf zqn---CoY)aj4$i#QycQn<6zg;={1MCY{5+!OJvBlgq>Z}4Dx`10}o7?ia3rwy?*C1(PD;$25!L;pBKgpE{fR!$kMPv=>eT%8&ntIZH{6=RF@D`JT}+DQN1f*&SMkkyA~OF^;e4EAGK-yCS1d z_v6d3I)%Grs|>lf3*)_B7s#{5YkvgkwNYm_O8e4_Qy<2KRv7nnq+@+N}Ez^1P0vohAtyN=+ahI7lkr zxRiN0m!i9tqiN0(+=RO>*4a(xq*2IYUbd@9Yc8iy0NW+Biibk*JTVfPx!qqrkLbYH zgjT;Xda^Y?i}M>UBu1r|UeE_q$1LPArrD`P+9(~Y@SD;a2+YQ^jP|%`tQm3NbmLnz z)$s}~3FLHIp5q(N<=UfrL2;opGx07y$;pui6aDNxibHT;s>a%d!n}oAfEsh-)QIj( zD?IeNmd>G6M5%y8JJh-=6qosIwnRa)H_lw3Fx~&zhOAN?Gn8jDT&xK#5A$9jDw2e* zfYn`HzLgzP?@_Zg4BqMCEb^P}v@v?KEbR3n2`rugT9JGK0!|LI3yZ(Zc<|7m9;`#mv-aqG~B^c=2E@d_u zZN9SEv`W|JzrK})=#DLpjR{jA(Y^-jZ6f0xG+jyTam0Y(0yY)Z8!-#HFJs?jX+(R; z_$8>|-|e~~Sb%pGp|0o2t_>8vG6>j;uQ@J(pYPsVYTR|+-eRfA!Pf#yjc;Y^>ji<& z#RE69hJzA<-t>EYngaE2vkROzCKMYIY#6GA0a&fPaPi9OlW@uMFwc`aQ&-N75oAS7 z%~|mhdePt{#ZVs|j#6SGO61iayzhE5O@r*rY==yV_zdE>C;I~PS?F%Fu{lfPytU~b zf>R{^yVETY8=KHy9D7D8;9#_;{`m6dQ-)Xgd17fD6i8b~B!|6zRiL&L2v@;Hble4g z*)T1AmXST!XnWL?KrNEl4Lf3QM|=BaR04G|?_EC(Ah`ka<09(aP!q}J&;iX#(iw85WjI-$9n+ zcS7Z}qVCFH(}rGC=*i;>7R%6cR`$qYZZq%l zcBN!YKFkL7sAaQ%T9_ZqJ_@Zk)*B^1o*E}ymSAg;(=sfTT4WKp;5xLgr~Ckp?&HnS zbXJ^@h2F^X^m~RWb9JxiC4!)5X{mpG^sd5d*kB%0lTUAMD1XEuTAQzImOvWV&Ktf% z2#GydBq7b)0pd(9HGU>@7QA}*-h{%EoS4+m92hrqaXf>>pTk^^=@Ol*@s@Qm<>tj} zux~}0#)f{W>YPw=r~DE(o`um6C@TX?a0pRzMsgM6!;AzT1Ao0Pb9~5wbS;qvRn($IIeFjn3n z#1lUZZKoU6N*wFVdFI@U7N#ES0h4x6E2TIa#0*BzC@2T-5}`UDIj{k)S+@+SJ!h}j z?Wejfr85zKg`i*6d2($)IcE$nv%b{gH@#CT9n}aW9`;Q6p5B5GgQ^{VE?>i;)R_u; zon{+Ua>~pTe3ENlTFqN@_1oBGzUwzuJh3Uk#a-+TB{wt1(jVW;CTaI+zcbxf)}!z; z6veGgS*haO)NPO>#_B^{@tDwRnwX;4VRqe@g2in{k#cM?OlTk93@)97$GG0-Se#KrJ6scI`8UWcK# z=GY&p5bro?4N1|miq!%}3k|k(XWF^9^M3=qHvBG(Im;}!NSmbC`D&TS@mu}DFkC^X zdIa3n2mf8m=+x&|LH3tUj~^vGG*q^FrCwz@ugTKbTRc|7H>-3akE@39*nr!2xVHwB z-gi8qYg~4WXlIc|vmFj`agstYlr*Ce*WY*NY15y!+u~zw*TA{QW-P9MCCTv}D4DhAa3@HsKFoBbvR7e1)-cC7KmaOib#IK6uL zephF2-GluWUPy4>3siZF9FT7%ZpeB5x|R=R@DHWypk$gP?6>bnS}>oXWD>JBE{AoC zfTO3w^8r!0{z_GZl8)Jh=+(lH(!JP`L{{GR?HhT=D(j-+Pu5bC9c3>VZw<)nc|?3* zy{qd+4xo@SQr$GXu>88B?8gBHC%0IDfKKZEk(k&q4c0JTh{=JH^cb57kT+iU_0d zWPW-l2M6!mLW*REY?5~)b;%VmT*^aPPer+P<%L`{qsD@wm~~p*gm0(hHwm9zw(l!{ zh%4IVh&?vhfgFDNU?a;ErF32wCm((dGP6IkP2KC6~ zRT2H&hnl6B>WWce1VR=Ri%0q^JivbZaN5C#<^fr^WzVg` zz@ZN7L*L=v??vS>`*{WC`)#umc~(y?xP=?qC*>ILCI~aT2}1V#ZaA^FTe`+>xZHzE zU;|N3OKwVh{p&RD*>yRf_#I2gA&RRnKM8zeP>o!qBu=v|RXLi%GQ}^;GbP`zbk9#af zVGMb_AT|$XLviD@Fyl2GSWx%kvvi*;ZKahRF^cw=jH~`RsTOS>6z!8VssqXVQPOH< zHKVYUR$DQ>ZtJe#lsT^LWys^34+wz9)etOkOt-F`#a3D>n`kdAO}~IDf%6#!%%n zIWJ=Sb#2YggfWbr=8U}kJ&EO8+WSsuz8wxb12yNNSlqboT8YUH#+Dc`;yaa!%BU`+ z(csqoQ?d`0r?S_~?I43S?F2XEHTV7lb&r#heEf!E3Ngu$fDl#J}H&}O90xo zJ;-d1*?r!Gzp8ZyPVTWOuh$=r3m5lniA;AgX`3AT)+?*kA=IH%&ME^|v6VxNJ?QGV~LmuXhP#EQSt3xnBMS?}6u z%>a(Cbt8T6m$SMTb{OlB`MO+9QT9at=OPcr1j?adKl+BJdh^20 z$vl52f`3VMH#wF6bKGM1MgBT&e;yrdr%%T6+nI+)oH7Vb3B6BRd^7d>lb3m5arEwd z3AHc!g$er6Lb#D(Fg=MQE~z(5nCOVYlFQukg&0q8`TnBW?nLCh_BA=?=cj2%!z7TJ z`x_^t1U6|@U!vdKcMqKR6>z@lytOHb@pRD~rvE{AJy-rsCcI*ebTr{G@%%=)<~r zcdQAs`4tuZPi8JVeKP-`A~lS$J6vbCH3xK6nqIdzBBbE-Z2#EFNCfl- z8yph9L_5hd6Yr%q`_;K|U%)wNX&-L5VD;Cfn5Bc}D31$_Br=c%+0Zp{j~TP9&Hduv zZg-pr-ny^$-x7V%HtLHYa@+pSK!SBgX0mdSo~ zvNIzUB!K*X`szmzFnY-Spxp{E*nx(Reb5QDS^jF>f~2BmyI;_3zVf?>G0VU@V_hL> zRr{}01Qv9s$>47#pG6x1^n8DzUwV8MjCTM2sHaZA#yESi9!L!_3Q8c#lr76hN_wB! zuJI4q@_Z#EH*>kW5B=Fre_xe%d-(^eLg5pHA~@YPz2{5FccAX}05wJvOpm=Ux*7C! zS!o+>(jHDBPVvlK4-9RjA(uvJr(q-Z|t&VO#;LAZe$1-+Lx|JU!gx@rAe6QhUkSiOfrin#x=Lb$8$c9HBou^KRQH#Aj&4|2;8KS-Bq+CHp0;wHag` z1vjtUj2~RfVs2K2c(A=-m-uHeE$rV(f1Zk4@Bw~iOYQm3dkSSmde)hA260Z1v$BC~ z`utF$*x~<-bOMmi-p2;Qe}{m>G<;Fp62iT55BL0$+El};bNp|fGyg-l*Stddw0BM& zL-THG0pQ!X!9#;tIAXSC^OOCqg`Pwj>k8jk;b9C>*pt9Q!zXzKCHlv_5^)4>$#Jx% z(q{Cvc>(ta$lUo0T;|b~n3R+x^5SzV6I>GQ7jTmsR;B)vU#$?2hKpo7+=v5nR;I$sCZ3>;od|e*g7u^oz8U z*bhPeJKP0m=&xOdZRzNfA&u% zk3H<G#>sQU-~6$)XwU@t!P{jbaO7#ReReAGw4AJ~U`7;o(( z-om;>j{jK&ju7Y%PX9w)`n|IuYxVcO#QGP}JK#@+9+vEX4%hGvIIP4+D_5Mh`OO0U z56pLc@s_RO4^HsESKAWMi9A|A+Qt5HqyBw8;*er3Kym96k;T{qNUgug{)t;H{!p!7 zBX~(W;o5%z{r-CE$)no24|n)UWPEw-Vh)eZD_HSdK`zq&_4K}mD;4pf_5ys1J;+DL zE}j2)_0-Pazh&)#oGg|9kFM*E=lc8p5h00^l3kQgR`!S}BO?kSGb1b6*(;H{+ht@u2?zkfXX_;|nG_jT{NXFSh2&+|c=XzPHs-YV`WY)QyZ zb7pjx&_srocRXbO`IAc^^S=fg6~Q9ItR3o)DlNp@%1BNI9^3XM`Tl2p3KSMO)U!N%E%T6oq_vyd=}Cl*TlWh#_$LsJKIH#TMktN}8`q9wIQOC< z7MU2$cfpJXp=My)?B$vQ<~eKLEKq#)>sdI*fgS6mrObc!{03aPP3n^X5;v?g$Gl-k zavCnG>nMFXxy_tOLj*$haQQ!4EzS@tkGE*E3)MffIcW(tF0SwpW&>S7IklS9z(E2( zy49ph6;#RR32JsQp3Uw0A_e-TWT+4X=!*!Acc0LWVIZBZ+9sWjJBh;TO2&)gdsZ9w z5!*HaOe+l>EwnV9{fpQ!NN+g^*%5C zL6H9zKoqvECpK=68{@%G>w}g1bfk?*SQ4|AXs?F2jRLb&(ZLURqF?IIg96Clm^^;h z8RR9uLhU~UQgJ{O{Lldd11nOJ#;x2-@|QMS!%*WtupV8KOaIYs2DolyN>c{J%5cpz zM^k=yy52nsclxFqkM`rp!!LhVb#--j<0x6I^32eV$$PVo{O2I)P^^h}^)KAL47?9Z zz3QFOxQ@E_s!pcw{!SoOy(>)b&0IsWo@I=fS{+ql!Tg34QGbgN;RBr5-`sJSs(~wk; z{Jp%_2YZ3!+sUWA@8^um1P4-XVcLm^9J9^LsIt&$w|mgJ&Uip^AhBj*bU1WM{;g1GUFKd zuA6r&1LVK9>j|62p5u_F2b92%LkcLWUx6N7Nbp7_WNmu}um*<+7f!1tdMj}wb}hG!T2nkV|kUM-t*caPq?we}3Qp;t+IjPRXYHotC|CrBnNE@q|M8mPm&*WTc}t{YgulB4$n+jMiTFe}EiQ7eT!nk#h#omd z^Ab%zPW-?IRgiL+<`qZU!p%6LiznLr2h_EmyJ#^~Yb5>jznuC+k0CUmJx+{+g0)!T zM~asgy9pPLY<&SoJ_u)uhV$9I6EhTU#Z_qb*HBWF{q$$5GKVi@#pJYi$+uTz`WgQe zgfIMdu=nm2!c-lI^Ah93!ajm;NrJ_=>ZA_}b%W*pQGD}Gv1Wh~H~^^{R0q5^UTph^ z?#;Ls&90hoP7Hri2?~g6eCg#9uSv$60m-uBa?EL%DqVaxVtdh>U)=N<38A@S1b9^n zx8qvMC>3W;akRzqukPH6;e!f)2wx8$emq08xIWg4zoKBdRx|ZEJGe8cA++iGL8d=X z-kiA+EW$`~w52K;|MrE#^F=S84+XXqB}-i63VwWabRNH3OoI7iSCVs3PhIkjq#tF^ zXC@Z9ml~okS~f;pyf_kV=S1yKRcze26g#o7BzW;FZ{LFBE)5LYwEtkz^3@XG2;%9S z4MjzC2}8(3K!LNBPyx@uW8#~h*xth?#SVNVcqOSIDJe;jbfh5~p5|tSJ@5Rv@WVZ! zh&AVyKLtEH?Dw_7aDdVuTehvu=H3L*W2$tyO4rg}AE!?N{yuNa2;7!Y(Pn65KX&5S zu}(4?$Zn&)a9P@h>c7tcaAn*-pEO&{O|l9Ca1r% zc;5rr_4izkeQp7Omf(IAVF&l1HWkpF=xNJswqo4J!qwDG2R^uI|3{g39;^NEW3WI9 z_O1<3vQ*z*70`f!IgS6c?T&3JP$FF2J2U1hVbje9dFPILAMrVeNSJ{H3jE*JyaZ!+kWKMBcjp%ofJF)T8-%7nA2L`hd*Bunhls@ zFBG=z@RLmzgMKT^CK}wUrYz0}3>vueGoXRELz7j@{-`N^8q z+H-qk4+AR@MRbRe1$ak7GdYk+U$T9YwqSca?p}!a$CZzcvOiV$02ft_pMQYYi}b4i zl@Tep9dc|xyXt67Hk*@nD3IZP1yw{~vgY3R!e9X{X`3pk342_=jy2{eWH21^XM$a2 zCo%s2Z0SBcU9Ca$u`xV5te+ZMV7UTRFkZZWdMxqP+>)xYGO0ByQ~fWR!+*`JUdDJ8 z!rY+{fU0?Zg#lc)xue_4a}xYbudv9C=@h{yBt{%=Vp8|WY~3s(#5k&bTmA=Fo?Vx% z;26!&z)?S-qS=Sxg*hsa!BHICixV!JP3|jp`|9aucf4Aqew#s5GiA=c`*Wa?H!L?^ zWIg=z+d-aH=8MPc#ut+9p4NZZS{RKSh!AGJ+97Bz-E}6D8m<9e3n{ax0XiJdcGd3p zP<(Q$SEB6Sd5e6!?JeBBN|=Z{_KZPg!@*vo@1NywP%c(;D*O0%N3J#Cj3-rt*LHmk zs0>$J+mcZp)2=Ectled})I|24pbG5HY0-b^cdeSbo6(As2zQfr>06WEO&~o@hM{YR z3wVz0SugC&eRQ-x+@??bZ#8fV%68%%xgvYJC_T~K4jfycA4|b@#KkvSM}dr=ODXR{9?AiTu2qm|v!4UzbJch`TE25wH_J%p(9Uk!ljXUH0w^!#z3M=b5y!TVJSS?F|{Me6Pko${PH(CKs=#Tv_>&G*1 z9KX~w2i@|H$#eeE$zPFXwnG{Hzt+Ky#DM=yHSR-eSu^rE%yT3t^dTHW)Q4bMw}2w+Z911 zF;o2YdthLoQUvLVO*9^bg>O<<;$t+dg1c)LiF;u(Cx_nufc& zSeI5_`S@&O7lB~%`sa7cJq8vgqudMGm{4qN?8>W2$u&1?f2HX7*T_CV0$B|%z#8+^ zcM<2bw1FzemOl?+*o=GhUdN3l0EyFAxgTWk5m!MsjJ;rgUlY! zq6(zwJ4{L>9(~H)7B9GY=Kl0P*l!VTGq$UHhKHllJ zqmk;YoR~OXz1KJN&;*xcu_OE>z?!VshlN%qk2D)t6+|@?FPbG*^KPvxzz~)caXiIk z@8y!g5TP=}wBSQ8mf{+-&pk$JGKV!kkkT03P)gqIv)JC94JZA#)CAgZK4>&*X&zMD zdf;RfvdNjagL@@({vgqGzY;n{3#NX!@9ysI2?XR^KDB>L+|8{ksNO5ni|r*)9s=X; z?^w1!9LC5js=JnZ_1$_rwWZ5l)!{8{l6iK`PXl2B^pj6hg#M(yIS1K=Cs$5asYD+P zLy$ZsgBHD&*4;Y_GdYQ~WjbR8H@{X!Fl@;EuH&+Wq)(;8&5iW#(gHklUb_4rsAsQ!a5Zv4={+DU8qW_BCH z-}=J-7}%zX^P>PJ64fRN^L_3GG{i!02Vx$BdzZ5jQtWa(g%4m0^r%trUlXA^sdljl z)cz!1(o8>#(nBxT@d<(os_{QOyC#;R`vsXH@X+oygdlxlh23Izpyolz=r7A{($^#= zhwUz1E3DmGJ9o+dskJ=sxyl3>pja~0M=o?`m2el8eKK%1#g z!7QOo++8>p{jb;9clWfs1>6Q%FPt0sg@iZN)$4hU!p!94o6}?~ zs@huyX5k{jKd3VVEF((I;rVOCsQuY{TQ6;Wqytmq9-|+n@a4;w zn}M!dtbpp+hBmP~*RKuy&QvFhT)=4IhMMkGUswXrw=%#$AA??#_ejo+deYZ&Td}a9 z;qM8XUpIts^LOg@XZM)Fh~qRv&kVKyA|V;Wx1EcSzItZ)hpAuQjBCDR&{=k1;o$DI z+&$iN<7u(*UXvcrWRd7a<99AyYB34Gx@1vmb!hW7+}@$FL)=mgS9ULx<~F?CIY09I z+qd{@Bu)pCr;5}4^KR-ZBW9A4lIc0&i`G;&?N4&NG;jHv?>)99G*s86Ju(6`;Ue~! zBfVjYa6j$J!hwsMsoHWv5Wkh2gFb+O{yPPcQ@d@77xC$#O@V+=ZfsReBXB&!&LAu+ zZgu95fby9!?Y>1ZZ z%8gI|`;ml(X~+o6qGNYt)EZfqd>udIlzaU|u=eJYi7-J8pHjY(vP(zp!~A-}Y+3Jq z;Irx9Q(ra3vcQ8sMLc=Vqh`eX@B}`;OWD!++5t>+!`*uTR{?Na9B!1qC(^*+?~{*g zh%Fbloxs!IG^xVOpnQjJJU8C$+6nh#?U=c+{Ui07ES#e--FI5C{ORYrm;5bV7CP+$ zu65Eb{QiLZ@0^h|IzsmN*~Ei=Gmulu!7o@XjhG0pbiT_fw7IBUl>T4-u~QDP@nK9z zIb~OcMe%Ab9+(uG{gos^ku)O?GnP3V$| zllUKTV1;*iAA>aBkQZpE73I77n}!sC30#LoJ8M$%EnTv4vO@~xe@+xg9+0V}eZf>$ z1+JnrriP=Kcw)2CGl?V~Y39q%@wNTqvS|kQnuKWttUG61e2wl9JmW$0N)yq-_83)L zK=&*05xW0RRNdt8Y^S8A%xOFXM6A77Pr?S|yLNjSvkfvcBOu|oo!ROY5TG8CO|jh( zyvvX~J>s!h-Rt@Q)3hw;?BqZ%S<_ZpaEbB5#bn>@S^zjH&0Dc{d*s4>-zh>wSh};a?|0?TJ#v77wFXWT&I2TN0WRRjOy6&aVB6d+XIR95m!j68-HQvb z#CWUOqvQ!vzF7S{QPz^vC z*$_-A36!8aU2O9wu*mxNH7I0EzL9(_YkBA&eU6(JI3Kt_ir9?M7UE;P?YlK|s2SgB z>vU<>ks9lZytrq?D$$W_Q4m|PJLBikr*tq_E~VSK{+LS8E#L8PQN;ej`zgfZ8`a9!sVlCX(y8$a>02Ftko9-?U!}?3r89v$pdq<4>o*A@1lJyS?AF^f zRv=%l`L5^u#1gNf3t%NDjUvz5`0{UYHGx*1L;!J2Ek*lj&3`^K_RMLl{Y*`@AD(is)(iYfIlf%!9A4!xO&GZ7Zzp=uQSqm_I;EZS4)rLn3k`Uxa$-Fs`XZ+sK05;~dsNNMcuba05{S$u0n`XVg5+nw+oFyJKpM3}Nq!J?02cFw1x??M6ZfY| zSx|p#+N%qL0sYLV949YF@L3zRSpN9@oQ2cqi`f?zoPT-%>lP?1vkt#!=vM&!EbfAS zXaJ$WS=hgOg&wM)7c-8ZTe;!*a^m<|aQX|6b&`L&9oR^UYoC-j;%|xP0FK!^vMMNP zWro_j>*;WZx+LZ0ART`D6rQOFaI+m!RXl8JWRTI_ovakcIRxN*SmBuO&+F(37KEjKz)1D zH%940V3TS3=PxUNYQh^_(xNOO1@pji%(73MgDeDx%2pWo214hx5a}J1dbxD1HeFuB z%e&vFQ3|r_;l1#yP3piQ(tccBP!TP!bj0f|fph%qXiM_b#7n%$-`%obov={%t%PZ< zhimslq1}N9@3y$DzMTr>^avP#l-{jag|Eze3V1%1kYW*D46)q;kaCWjE$_?76TkGL z()z)$Wv33zoSO!&gF4UvK36j}N{YT@so!4j6cR6TvZQrBy`x=0;V6c z`F*}_huNiIZfh6#U6+tR{J>-`o7k{JkN#mRXUki0#__jb9cgV`U)LCdgu`d+vp?{c ze@7s7xvy~1^KLA`o|ntZ6#NTUwxoA0_HpILT$Yx6<6Eu1qg`#lUdjUXdqF@jB9Ayu zc)vfUbv(J&6juX7D^XJj1GbfH4F7{rX!THnF*poS-1kFY>D1JPUbtP?D7dybs0XFY z49mF=F_m74sDP)plqPWCJ={jVXf_k7;4K!&_u;@tv+ni|V0S9XoVo{R? zW?~KIf04Qo61+V`$@c}wu9g>&WwBxgXx7gxlB6cxUQFQ`vDPZs%BZK)o9CAKfYh*u4j!b$XYdU z3hT8aigUx}ghzcZ0`jK}ZS_DeRtu`UHB}#Un(Xdd<1+3T5!u$-!g8SJu&jqkAzw`c z!-)C&EF*n9%EfI$z0x;58KQIG2Dqs*AAq9Y?N0&o4b#_Ee=_nUi&41=L2PDj1a zBKg>f6Xc25Ke{ zH?kcL$-v5Tqr~3&c1<_0#7LHQGSuoyp{A#=1?Gty22H|fxav|R^;$qKer83yr&Q?q zXLG+ho*n?=g?2_5M0bd_0!KD#{>6LB*Idpkw#^Qr*=Xig9gtG60Alsfq_0%ns4Y#; zLxrM`spPUbjoV2v@H0(sl4Ui?`5j=My$(4fv4?6*N*S!RaJ{~!MVC-C)0Mn+2IO;0ONfeqDjeQf^ha7%-{)W1%%w0L*4*~vkdgoD%`?s_6tk*y$bF;r9d9-57L{FL4GZ z#y^3!0+^GN`YoNzv1#8Y&>{ME69zv;({sPPl&^f_OT`16Jayq>3W1>l(fRn-_ z@=sI&?UBPn933^^U_T_e{fr+EKtXdTn2Ul^&mdw>#{}|h&=Mmb;6jxben8oE|L!elxoGF6EehUFp;k4GF*F<_0m`#CnW(&XE_ zLd4y6LoOgDJE83r6zlt?(JK?ILAN9aSM}|DXgU*&KmF*yJ&yi(ne=>vw&TT__xnm) zUR-}3>}6bg2c{=7_)A%rw zj}1S^AelHOy|+C_TeO+TSILVPNPX|cw3j+Lpk|4F z;u{p(_^&?ZSGx*N!Ckr>6u+l%WR%BXwlt?dN4xt?c>^;1>Fs6og>;N?*Uu8b>&9a-uk0eX$mya^#RAjEnGxcZ=^L%rZ5 zN~p*6Z;@YBMoR^`~Lloj!sEX%KLEOVK^f=43r#!YpcurqwUk3Cok$tl-SSJ zl^`CtI_Pi>G?O))1hwqUhRF)k%r4il{rtr9{^LUddh6 zx27=`j;V(W`rYw|enj1uwN`HF;8c!RRCs5kZ>-ey*7G*U(9u3VtDjm5^34lZ)2^ML z8u_&v-;wkWo}z<*vp;LY8!xvT@`c*CNo`Y+5~Xz7HyJG zk|MOLys_FTy^weo_qX~pM-7|t2;iZt1x}~wqXwYf#M=CU-1%}I^M{m%6!a|5jb*Zqfc#(=--o22TqGj01gapDW_UDN9M!jMN{qH z6gR&z7|M_mRsdm>Obw+=4jGz%+p*8v`vw z{x#|id4tszP%%hBzKrep{`n>Pz6ghRL{^(BOxP8G8!UrYq&tmM5C|<{GwDx$TIb(! zRcs~rop02}t^u+7TdT27`MegluuSkF*|o~|B}!!;O(5aV=mH(cNFQzHK3Y!}$2!y6 zut0{zW=uvA!DD`>Ldw1|lG9lFGed7uwxY}@Mi)AK2LsMv*%&^R#_JE6$#T5!t4dzo zPpLnzA99Q1m<6;_6|0`jl(wiMxmBC`A~sSEPek-Qnb<2Mz6S(!hF1yhsj^kpO_fER z)`dtb)JY}N_W(J^xdaTtOvfAXBG}zC(n=v$M2}7_#Cj@nbOxo3uCfC)4lne9*B^!% z;>A9sY$6=*wA*LDHODDBN|79RBBUXD7MOLCh9|NUGsAn_JkV(v1Ke5y>xBm*<)Tv8EPCXaFWP>aLCcdp^y~gPkmz< zK8In;6_}eH@cQH$P<|ctzA^+5Ul_o7((PI5Oo+r!K2u~JhGL4^jQ6N0m58KMa2U5s zL%N*>G}Me$Pth|?msgtHCcjR7^i?kQ1M0sOJjqX;Gnc zi)AdhOsTiuW|@D234*6ks5Lr;$S>8PWjrN2^Y&X=geSBb(ZW3NXFX<>1SunD(v9xe zh=AvsXWY?fD>)GXJ=8n{tR=V5>}z|$Osr|36pSiu&(;)zskU!Nja4Ibe>TOZWE+wk zd+l(8d-eAZ7y5>bYZ9YNWE{=QeQBH!rvYItMygU~r1@i|%x0rwAG?5a@s7fGR%4)5 z4l^XBEA8?LT1r(kNsjEaocq?|aQ+R!N%kum&>fl-*0g}!RYL`P@B3saEdhQiBoyg( zKTC?NMsIYtJ|Pf3GN02>?Y13|2}6Syq3Hpnv)rt(0L+r@r=_&sYNVhEN8m@Hsn4#K z6+pFG0NL>e-Br0 zjte+?g@Pqx@6?NJJ|zL8!Ln}#Se^rR<@fpa#Tg(k0&kq_Pn-?MdQR*Qfj z&$PD-XhNPl7Y*A@(W5{Y&6CS@F;V;~DoO|zOQs;U@p>hpsx@t?q4j|h?!o>0!R1X9 z09v$AiXI){hIsw1aS6xygj0`OntZ=Xl6lwyydtyQYD9x9h(e}n{@5HM zj6-A3k~*ki4CJdH`Z7%S&fk7>ejs_#I*0Rp3C^g4c;!|7Kj3hOAeP`Bsnl_Qi0{cp z6w(wvT95*k76xMu1O}|rpIlUZRy9V&RxJGdl2Z~mi+U_7ipQS}Q6w{WhOqIN2W{df2?J4MnC!N1W?|8QySP|9)gu`Noti z8{mWV-;psKCY`L!zYgmE0Y4$AqN2irW%P&5+shH)gOO=_0qvbQOgI%#eU0Iz^dSzN9HiG2Y5DDKI42Pw9~BJ4GfE&S4m6NctTNVlWz%6FRKz zMXB$>e5dlPaeKxyFz0m9_Z9OI8}Kt2L>HxTeYvAF?N&B3nN~C7Mf-PcUBNa9ZI}40 zf9ZCO00Eb8;R_?9G??ZT$$5d$30RP>JjB+QnWzWqa57`!ZkR$O#=y)hYrmwccc5fmsZL>BCD|OdpbVW@L7@*`l5Lx{x9?rlKVIHs&5}tG zl%`p7H}8gbDfP7|CB`BEXNYCzicZU=y_!M=C7{?)T^j@~msKxRqwMJ7Eyy4JhWy3Y z^6W$|JNxD_36S)&I9=x*1!QyEgMf7N!GhK4mZbIX1{|u#ysrdK;J)T(_1mZ{Go#bG zYgwx3g(XNKKT`GHW}H5qU@8LZFTxZR(4v!|Ut4=yZtu-eP- zTK032#*;AmK|EbEa+W_HOjWM^oKdT2D2paD36s+Kg)V?khk$bFsFRGKb>$L->5Sr3 zc1b3^MT%&mbbUPm1y2+Er+9k9oXeH;8gE|2+FwIxQY*8yV4muu7@Iz)1xc7hR7Yu~ zI64Hp$!uwAAGote9sqA?9U|8x(``03-Y3d*H_1qK^t8%VLO~&wt3HbdS?wKjwWP*u zIXvRp#J^8m6s-RB?Mu0j(yCc&;G?M7e1Paft<&Sr5?F_G&NVUe+n{lXjDPL4+rRhe zpr8f?_OfrQBR}}RPwVn2y$-?Jy>v|=G2hzOX*2-1G?QlrC8gQv`==uqgxtdl&%{I5 z5^~%ep$@+CR{eXee}_PXTjmRnC zy3@KCD0h>uK0Pi;2sly6kxAtoEjPWmipDu`kMDZW-En!}*Jx+s*IfQYsg~7PR)j${ z^GZ4gU3#uL0&St^--P#krbv9-xpxMtdLkV^f#CDi*!V~K=f?=llZ-CheiIB}I{g4^ zy3q-x%V$|eUc_T%M$0E(BdH&t$3Gbyn-q_iZbX}0GY0rqSDqoQ>Ph-i!$kwA?894E zTULK#qQa;;IOj!WFM-@{51s6S?+2HFt>fsLT^O_aP)d|(Zrg*E%OBHxeaN|GBcZFr z=2|WzIoXe9IsX_gsB%D)5sn__On-wsp<19*3&`yTUr2+>X;wZnO>tZ~-RPpiyo6*O z#+KLz)L^Er37sx0h^^tqxrxwxHQdf6jW0>JF{zm|csGanxWz%83hsXzPB72+Plp+P z(uc@$UmwmPN(?M_mK^S%uRi>dMs?7P?1gRk3wAbCMn4jOrSd*F$Sf*`D8 z8P4a#0>jRo#IXvH=UE7y;`Lm}UcwZ4vAW;MeEoq^%+J6Y-#852FWbx%$6&-wKMVR8*-?i*BB##Pt~}6Ohg}8?bxB~k z6Utn(T{zCkEP#!O9Z2z7 ze|M&8Oe2^S#zE3Qcm8Ww_v)89*@qk>)n;KP@^H>xkNYIontjJqPl~LgP!1s4xnP~< z{lH#*T&2KFyG17m8$W}q{kEcf0YY=ZCC2M$9ptptR;m&qY?oO@BT0vcg06=`3WZ+5 zGAFV;@Ql=z{ai|!Fh9WJt6o@gUQ2+hr`@MW4beXOWSL3@GSjAokf4+WEif(hG3*9S zG!`281J^``WvZ`CbQpZ}YbS&4fNSweOhcrSLi6XRouT7` zOOWTieIO0dpQTV_-@NX(lklp5-KRa2UQ2;?e;u!L$(h6Xiva&zA0PYa%|m<#P1*y= z-;)P%w`&1V!!|NDn?Hoz0h@8xS=M>Tx^M!18=&LHA=Jvi@O*F%zhd~0z^6~vowUS$ zkQ5CDD*-4WK;RZI5Jd_AaP@_O*$e>PkQnDMK3-#d=RB^>bHPV)bDA_K;E?up> zShh0#6fnr-^JcnAm&Gn{M8|1Ay%xfFG7A=JQkcbAn=pWpJ`cxO$5ulTWbp@$x35L$ zrN!4)y?8&?N6R< zp+D#_A&tV4Za0ao7Z)1)9xA#duv_r#C+qK=Dq&z}&2%Ji_<7c%S9(0jkfy%^pM`_V z0FbfnkEyW~#elJ+q>`Sds{CLz`TY$b_o%PfnWNs1m99px>pd|jsFF19ev=6lzb(_p z2$VqUH_lqWp7c-@2R@!qlqg3@<|P0k>ki+3XZwo6tQMB^02TX#gc?AHxKLOZ!o)c_+Sdr9oK z2Z-RtP~z+w7J-4!$%|*>sWXKWf6w(k==+$fSAXT9sM5j}nAGe0O4-hE1d!P#aYHE* zrDm;FR3$+AQ&ANl2pzEK*CBI)5|4%v2i|`)kG@C8k|@l6*E};UQ8+ z$pMg$P}TSiY~m*}*R7uj=wyY z>3q~7*Avs~FvKuTKYzIJ%PiVn*0Ju4m3QsMZL396F_j$IJEJ18`4HUvnrR>yO*|8Kb5F;0GI;1Hsq}v4_ zMq@pLAnQr5ZJIhRNf2tlb6%x&DpHYH9-n|B^~4UFj&T1rFAMV882Z^K-&?jxGaq!? z%*Y1N!qwt+AAus+9S!Iq?`lM)y*a3NiCOFJOi9R*%TQ765dFEYakV{3k~jj$PG=gO zoU74{q9Sh%Zsq5onZ)!Sf|;#@#Oz2^#7{m0#RDv=kcA(HS*nTBQCt_|`8kz~tWt;d ztaM8LhCia8rOQ-q0yhHA$xk6$L)~Vq^Atj?<(svK>q6M?Hd54E&aJ@gfTy0U*KR`t zjql8-NIyj;1BEO#UWTViPEEfE^<}un2}xPw#?EBc#gngOrV0MkUjg3o`tawTm)_j( zGd1bzc*3?09OHq0i4F_ncqHfYEIxp4d(b-bJ>7urw^}7WH0@bAIrPAsK{3F8(v>m; z0gk#o4|qb3#XustX~C&c!5|Yk)m6IEx<`sU;_HPfb4UNI10Pio^hy=Px~zDUjWrX= zJ~~Kp^sltd^*~l}L^~mr-ioIx*jVzLk!yw?UWjyVUD{%-=f{L6cpAZx} z`4*A}F4=^Mo6Rp!%nBvHXBw>c`Vf3&f2qVPvl~5DG_>tO#Zh4JlTT#oi+)_}&v^Y@ z%xEMQ-?&q>V*t3U*_?Ydx#xbTtfFcyz;Z2B-_w3wjURXdL!4Oj=G+w)IuXVX_J@Nj z$)Aj~Zq_VQI=)6FA63z4*bY2W2(wrzQW-u#aNHpO&};n%Hyv|-o$Q(b17D~3I~=s2 z0MMEG)ja=&m3ca0LD{60@gzc7MpJD>_9xRD;ff1>N=r^63-i34_vETcF>syhPJORR zd13Rj>HSNy8(o2l4wBaQUJE}}>=d0?fBz8Yw$0MEcl;}mL34d#y@NfiTUpUe(&l<~ zx3c>AHo+wr`&tJH$3TmWUFhXgh}s=h41XZ>a!9GXVdj>z2g6(A>b6bLY*H0q$c}`a zdVHjeV)1O18xncQe2+sL+IIn3T~zAr%7Q&+QuLOT=lMvfTC>)nx<;oHFqn%~DRt#s zKHnpyRI~0pRX5Ynbr`nG1T*$mtIh%WS8HlG?5TQg^>g5lr$?Mi+xxk;>z; zQ7UoCkTOV_2fdX1^Q=(%abK~k9Tq(W=CWS$Ht!DjR ztOGna3}|`2fGpF}&k>egnqwVYT{HR(#h@z%o%L^$3b$&t*N_YK*;-p+m)gJ*DQ^gS zs!u$4LLc*0&mkyppOz?Z^mM>KoOW@z*(uOFwX1Les>?+#m0aS!*F|^r>eXrR&^yN^ z9v(vxH<~suMv-ZNFxsg3ctydyBJo^g_H-yTI4GZ+Hm$clUkh&Py`|}NM)ks}Qclwz zWh%R&Bi&)o0sN3}rYjxI&$rl^)-ayDzA_>tMk*H5V%Sv%Np&LcBaJp zq8`W6StxJQgDId4pIgl#DwpFJlb6$4`zgZoY{ROuycT|V-~tC-e8{yiE&^t27%Mtp zmVS^D0a%Dyk)e1U(q2BxPN3r${Cx%cK{mOh0hfuM6>uU1WyUr@`T`>cHd!>*WzDYD z*l;*nlu+4jV_Uj7AY=z_c8m57fpKbUoM3ONgqWA2*n z0p(2AM#o_Qz6cH-#S8LPZj(+e`%=#XemvhFCk@)qwcYZSnH%Mx>_!P_G~QH?o2;6Y z%S8F>Z>W+BfF!)~ua4~d%PmGJBqAD)Q&kot2+Or-zIq~Jeg-9EzP1%1ezm33 z|8arRqv`^n+V#yH{FG)NT>8?@AF!3j2aNOT9xJ9RriU5`>AT?xitlGF`c_r7&Lr}3 zusw|u{v=N?;Wxyh_v>MtlMk!E;!R_9X!qii1Y2t|mx*IYItc%h<*mM|?;s$o`Moue z^7mcV=`FGzZZvF7@$CT*>CN(h@Q#M2Mir}DEHUMp5bW{$QnP`c&OEhMOd4vGN#&AsG^Q7(!3#FmS4)s9K93+}9CxPMe3{kdtuT*^iOh*xX?)>hL*M z@ZNpDb@=r#xP6@N#4b>alPvP+pbc}7nU5d$QSVw**ahH{(KXt+wgp$yaZQNl>WtqQ zG*`U5WDXOnQV!kxV3YED0p>2$qB=49=Z?9F{g-nuBaX@3d)-_*rS_Uh2W>wrB+sl* zRuW!aZmwfWb!SQ*G48*@WQ-<2iuDgc;YTQ-Qk{#5iYjn#pC^TqwKwIr{D%22Wg%j| zWGYHDlKJPL;PEKI_)lsle?V>}QAW#W_(9on%%6RdE7n#fWapYMjeYuP65ajwK6lzr z10J#cDrC+^ki|2=ee^F;GARVUN)N%tK7T#lxvb+TMWGR9)N^=;sjv+2kjd^RtW?_v zh+rwk{%QOxufHCHO7#Q@y6Lj#Rdb`!Cy%4R?mDIo_t%;zqS@LP#dI^4`3}@j{JNq# z^y?sCj218eEJ{OzJKOkirD=DbVUR(xKV&x4O8@?1qIJE|Wp(kL=b=2lN^knua|pN? zgDIdk#q$jiU@wC9_FWnkTkl)>!Cl=6kcTiWM?%P{8;?jGFP=QG!zu@cXm@Ij=rl&y zXJb4^kDpXpj2o}HglNpx+oMnJs-NpuE+4P1SD<`-iYJ698~u;MBWBCIpGWLX+nGMy z%6DLay6zcu8ViV-QHC^{^&`YuYWfY4B;y}10-FW`>o$Mj0?&uP@Ok2gsMg=c|2RK)h=jTJM3A>c74|IO#hF1k*9ca4VAZ;>JjGWwtk{>nMv!f z+{@&f#8iiQNd|5C!HF3=_h>zwlHaBlYWWxt22uFY=GPZV&%=~t&ii)FsfPC`1$2I3 z`OFZdZS&B<^w!UWG@LNq+Z5M-$+{%l=4WQ`S2Tk&J;^6)Pegz z{apvs_Vd(PMhksHFUO9o6>CFClxsF1qEhNu===j^x0AsiM2N@a^;p0r|6O3$39x&M z&fcUnD2lYlDDF$H7J+Tq=yRCm*`g|QBSPR?^Bwlxp z!Ugf#$Ea?9T5AF0j5gu*f>EsP{og!^@?_8zcZ&RfJ#6F`m0Oua(N zv(k0Txreqe_Kdc_TK2wwRE(Qal|wtZRS$B<&OlNUZ`piI*kYqcRk#bPXBX%vpS=;- z>KTFp$hFUUHqnI#cl0O70g2fXwpN8>C)QZ^8GS7{Ro7DijU$53r+Th@MYZ@B46}AA zTy$~)ejv$KvpgFu5#`2iAn}e@ILIi+llrzhM$Ie^Z%3xW^KintebG$2j=i58gIjYY z;!v!n^HjqVb|8WjE?0An!+9VkhqB*#$)r1d>Kn2W&ceD=|(z*AS;HBKBh;0ZT8@11$le|Wy# zK0%2~Z3auph6X4g&RJeHGMNyVu)f=%vY{ey!8Oa-<(h5$^3oBrnRMo}qNu~DJ!pgL z=_5#onG2Ro`FLe5m`bdBM%(=(+cH9P1zj*ScoK<61jwPX&I_4-T3v38){WjF+z{Ag zxeN3`yE=)=RUugHp+q;&883-=eDJ-jb1#!~WM(f8G%c>=8To8t)!QOpisvCYMdIm3gGV%r z+hWAlu8iyg;pT4aZ20bGAmO!x1&&XgluC6CAM@<$l zlsIp9N7}McZMLoevnt>7Wrf+=v92bE4Tc?Gp8vKsFTmzmlpXNdxT7?Mu2FZf1&DuS zZKhAkQ4qfJ_N>ota)Rqv^J{nN&5jsGhPO#8H%|rJ6!@xqDxYoAEp#cbWA!+i-?h-@ zTC>k3vqJVqSDeQ8v-72i*RQXrRe6E}BH!Qz+P#U;v;^3ICMIfEf@Xu8<6cbY2 z%20k*c)97~u1ml1LHg2j1L~`OUH#m^clF+(_ zf`rM)opd6eK;x`D)PXkA^Z!7pLM#<{KNqTYEV}_wKAU1fN}vLoJ)ot!`vqNvh0)d4 zjVC>ep5}V0{|7Io(BayKP33}mYEkRPPERfRLPI`@F&zV@Q&@)f6h$;wc7vnoZVC)>KNQ^{stUfPpI)ejq{mvZc6-jq&Kc_ zyxZmvA}zNP%{s%{L_M>8^}i6 zgIEHY=L)$tUhp;sw-Aj)e`O}u$mZYmBlq<_#ptd(>XrfSD|@4EfXQOR-#YWlNDq=@ z6i0m@%&HzKLTTb8x2;nQ=PO4yp3F;Rrt;N0PW*8D6DhtEIm-dhpoHmm<9`lv63ElM z9)!+r(AWhyf%1`Qmo0Ggt5F zr$|mIsPDhX`y*UuXl3Se7@$qq7R-m>o$K%oYHzN`qKwH9q08b!nEb<8y&S!%=k~V4 z+JeB_i6RRvQ&Ro{l~jZBqQIRD%ie$f@V^rhra6J>;G&{e@HkI;@b8YM!VtnOu$xR6 z{4wy9TFIZS6I#TzZ`Enm_(BR4YIrnENhpdsEOT^0;1_VzJfIU6S4EaP) z00d=V(Img@yv!{i#eh0@7Hy2kcx)m37=~J)0LYTc0c1u4Gm*j|Td`#G=`xiv)HELX z_T(N)Ri0PQ5r$}+Vkni(1})vq6)qj_A||`z#*2u8`gp~rwo-6xVGFcSK`h96X3X1l zx#-FhW4tAI7|ePfW-8mni`?DGc-QVS!gJ~4l~uYDlGtOH)V^pgrYwnS0Z!ABB17K% zi|W-z?*!nI%Q^u*r)M#tPa;SuQ}xWJ*HEK#LqXx#-z6_ScT{(QlKW5!L;v?=OS~u! zEr*g^zrVN&nF!*~KCf25>h=y84%lIEIV5MUDTh{a&)7nu7&W!w4K~LikJag->}I$C zZKmLEY_*vZDS=b^F|@I4`_@$adcRe+JsIUHRC;P5>fml34VoHYh@EMYVRQT%I+#B+ z4R0=7go!2CLNc^MgV*VlBhgtYl>QgONhyddWpF<5E!~Z&xD;spyIm6An>Z!4O%QtJ zsOLw$^qnDE4npI)dmK@(S|JRKc4Di6lEjbd4|?@@L!mgh9tMK_c>2oIkVuu`7pgZ6 zZ-Hi>Bxt^z@z#)JcQKUcYY>>e@yMP z7bdDvm&d5cC|UXu1r>ddDE$1Sx{oMB-2v1)of&9?R=@N;*^;F>DPy z5_eSHP7m(KpTbUPewkG&NJHF0UP($HCZGmg|B()2nuA^ZA zuy=sBO2jRy?!S4pOuP6&{l>T6b1#Q10W%cZQ7&k^PWb97x*xxwN(brh`lT`Zh4Bjr zilOj&?#{qlGi|UhitAo(#y2*Y4mjFV_*(o{0{P~OqT$|{nO2|N(0^;kbC!EeKAnGp z0)r(7C7(^I6t{kKxz@cQ5n#~b{6|FH@Lp~J;f>~UrX1y*NU*=@#@$1C@-Oh9!iJrLKi#k>dLCJDMD zd7t;9W(lbD4;^f(qvP(iy4hHBG6jPU-$D?`=q;dd*~_r#(}vli(SnY4T(tIbB-oAq*SFTYTqi7aLe>Kb3 zj?|mtdDp}sp`rFze?A!6zM*V9cJwN9>EfM2sAs;&zt3J=D|=&J*iuK?T1fxp008+>hi#!<1vO=BO6vnUiZmP`YY%imQ{I3Q2E#8epB*A)6+X;A#;WPVCjGGK z8pG2QEL<^xN~lh(^IPwsmgsKibtkyoJ#1k=zaO#kD;R zIBf3X@8baXoOWiY3L1_hhBGgR*X1s%IUE}2#S-dw*_KXgL6GQ8P7MN+)r)h(dHJ_% z0c=9R1b+s;0dkv|;%vW7P(ucB2EG4sn znU{DrLkTx&fFWvw&#!K%!Ntp9_43$sfY!t7kX=~qw21b41e1B)v>)S%rBUPLE7#tI%bgWesIwb#303oT28)Rx&gc>#nDpm* zoftVdQJ(8g=8R98`GVrw6JP`Qr~M}qfsIRvTw|&er2d&(C;GG?bb1T82ZG* z+Z&K|`P^$R7(HUxmKL(=on)TmnGaRI_j{!5HUB_kKUCY(f#C_!@#8)F;*y~>)4&(l z#kR$FEoNedXHKWD0a|G>+Lj(RReG&0PqjKpGU({x*3aAop;cUz5ABWj(cM(^XLq`e zK+9?HjmZ5}FMLQFT>(WdhPq$s3e)PrcM5vKH~jnd8xyu_$UT-sdz#D&et|}d)~Q(M zzoSso9Ug2suvPWLj`&Q1ie^zSAWc7x8&qt*3^Jcw>DsYXTHx4wIWhTd#ABNZhUT6d z-1)ro=guhyii!X8n#v9qWXxY_s?5g!Kf>NSp33$Q9DfoOrHt&1jASJ;A~So2kWFM} zgzS}+Ju?!LkT_)TSy^Qydy_3AWbfbmKGNrTe&6rw_5IH|?sH$)_1^nxD%CkV`LJ&S z8e)itb>`E4N*a7fdK-;DN}*U9;2Zei!_SeHmF8WeMvYhouhV+>*Cw!v(RQ8 z^ZQcf$u2Qo&-fz7MCaScZS=h!@A;K`$Dlp#<@V}KGo+>NyOdVnL28McZPUs_dvoSp zf9`m|J@vog804R@aZRDrx-Pxxem8Q{wh>kNw4-(iIrk)Fp`^(U%AvAceDtLWQa2>I z9^?ggG5&^xLQLk<)XibI#F`BWPyZ|fLosjtK|$-h2x=9&+>V%GE!voY{$&6fQBaa+ z?70e!ek~P}LIvxdWs$p^nHnjzNqF=b+kDgb{mEhZN@y>6Cf4d1FskU~+5&mqNhAji zVdufEB`thW7P?DtQ~2-EHaR-dACFJkFBBnZwmB;v>jDm_{!}ltc;-v<_#B9o;#)*_ z!nC1b%J(G)O zR%>bnGmi2%;bK0Diw}ao!iiBG}Dl_R%SnQr7}Cd%V7v9e9fEo+Nz~a z_1xcKqJ%mmIX<}OULnj4F2rkkp7fZo=G2YHm&zk_dhZUCb6e0OJv39L6SwZM3_IuS z-!ex!(4hTe;44#cXt%;TQiYM1p|?NY`=Kxr5~%^9qP$n)iXg@F_4`{fHn#P|N1ba2 zU_+R6DBXd158}1-*4pm5rh8i}zmc}hC0$&0bAwdqWWyfLd>y(7g-3an@43HB@Z{pT zeFAr@8LZWxTh&a9Vl4dN$7By(DvXnYDhxs)lC@Qp(T7_5b&7XS;})A4&=>zSZGsHe z%lxyr6X_+@jN@Xgl>CpwwQZ`pH%wK2)^j~cn>Xo!6bJ)SmEUA*iwF;py|0i6tBmdr zdK&j~(37AU5`l{4=5dbmh-u>e{Y;*r>R-WB zgiXYE8~ypf)pk~x5#5A|`wi)8w;U>MC)|Yl;1o|?pkkGxbckZy*K|nk7Kc{$@a25# z^q!DT#Q}7Ix(n|H$<&8$y&)4?xDsC7x-8m#=H*|tGQ+_HZumA9(`Xhuo#VCtu;(^4 zumqPu(OADl5_tklBJk6J8UY!W$Ftkyp4SI|`X`YuPYyT06LJ zmFzzKdxev@7PH3MMHbP@lCuM+6m^$c3N@q@@V zUK>FJidlEc<2ZH9b^{gjPPlp@hE1VD)dxo243z8Se}AiK#oOHOc2d=M;rEG5#F;szOn`5136oZ6R=2U+3wMcNGZ}PQn#QndmPGnt~Q%8Z;dU{q&GqGrWo_KJnB@CW%H#pXl- z+SX+ZKPA@@)&IDv%L%#yB{fo0Q|tN!Y#75+Mg5VR-7UX>&CAoX30KCx%rrNf!qqh` zK%E=7pP$+K0qoWoAnQVlXT|Ixv5f)vyk+?j#ZbJ;eB+`iw!sM)$`Ouw_kkF zY{+%Kw~Yulzt1HsLvz8@w_3O;FpT7HS@GM<2M^(9ML>I=4%~j&FvjonAqju9a4tms zL3`Q~JFap+_9E2bsSFfbY4^N7WvZU7!-ULofI{WAGaRWq2_A6i>;q`SaC~p%lHjYz zxU(AhDf-$M>9)E*RYo9XEBqDT?W_GMr;=Xh>-j|tNzkZC_h8^U>%ro9*TVgCv zZdzFl6mswk+{@PK|Cu2Zb_k`tlTcyCE!ZGKYKXM-)~LQw>k@*y5U*4Hi9o zsq8J06JhNJmp4yAH;c^d6-^}(AI5j_yRUVSr1MK*C=s1fYBCq z35Z;&r^*yPhx?$eq(hc`zBdP|qM{Tjggq;xe5qnsHEAuZdbswIh`hPNqb&3w^*oyaE@Z=D*J_cg zf@KhHM6B-JlMJp)sTSmyG9dcWVG!j@A9GDtE1RY0u2=yvb#Rx5%FvC*E9GD$fPC@!#e=fS2TNY*cViqKslH5%N>0S%hCfFMd7;<)>AG+dJ#SEv} zccK`OI+IK8%b)C^CLtzMuK-%vn;%8$$+5hRef3ILKXYr=;{#iNRa&$U7zhvepPx8+ z%HNkTYe3WVwXrG#(+AbJpJ;J4&;`lw&HCtyIVJN&v@M?}Zhc!jSQsqH$EA}@N*<_wY#k-s8xC)0cxz7$@inh3 zjl9oNDc@R^xGdks8!#%wMR9&fqN{`u)YQ|n*Oo1Fzk1@1FstIZt$ z4yJ2b4QCF;E8uwZ{^!=@RETzc_W&kTaD-|@lgv3Pftx>#i{^Z-r`sN^@-MtK>XxN^ zN3m+u=-I7y?<-sb9C@8mut!k>X)O!14h-XV0m&MSyP#K$RP!7_mDJ05&i)3^lHQ(> zHSob>KtIMfyxlC?(d4cQTNCP%y{dPwLxv~p!R3n{ZG+fzX5A#i?m3sEJ7SvDSkz_{ z)ZSU`;RSrp|T)xP=Wo$PTn$D^8ro28kr60R zB=={W_{STN%}P}6KwokJWfu#cUKBSNFsM&x>HDH5f&aJz4;<)_NW!?o9LcY>tGT|y$vfR!y}O&Suaw?9)Ip6s=tMsApuf5pl0lX+HB ze|ULb%?$=JOd)eE7O(ZQ54}u$1cf4BKnlLGyIHQel^Ur?@IfdKVu|gtlKH2Enagx~q7t{>0bdLKQifcO>3>2sKrNujip~ zimg%Gl?m4vbRp+h6NH$OD96bvDjV;oIfXp<@O^psekYk+#( z7YR9{CTX54x-~)9%B3xOvtK{zQ{WwbfT`CKmt6}jKTXg7kqwh8rcxKp)vIzP+q7}- zw76D2CVX&^KapU$_w`#Z>YA4A7uJi$L_*&W)?A_Xi>Y@VoRn0MB{56Q&{r&rM56s7 zv$XuOn@^m+r1VQ*U!c3~TtN?14A*iLgX!&*OeB#NH_{8&PP3By)3J>-8_S5fx+Z>; zqvVz+IP?suN>o3{!#VZSR(%t?%h)DiEJ=|)#iKv-vdJK05uvQdk8T}V&iFVoGbWdC zE^KD>Z{>@(IFQ7D#!NcX{4iOy9x~omBfQY#eGRb!gdty6@idUiQ%|`e2HTtFnc!nW zH+q5ys1+LOdr;%Js0D0p6Wsc9M~b%=nyCsb3(BoYds?#~u|cx$;L5HIgV$itRA--A z_Tj6n?sR3b$E)S{7SZl+PW3?HtzjDXI(#V&y*Z$Hyy3dnC|$_uAVs6(@xO~Q7Ll=F z@ZPld4K~HqpyiCAg)OadPVxb+y_v!1ti_Qk?SZ0)re!kPwZ!jVJ8FH-*g3na@45ZX z*2`?2uu6)@dgv-rdG!nXgBgLFmzgm>Zb7==fvKe&k7vQh$dLvO+IWaQ1ve*3Yy@xUbB%Xp4XD%J8dWoT`qqFl9r$R>ouzdiPjKIKLZ&t3?Mxk8e-kP3omM3wgaN2*SJ{ z#OaCByOXJACgBI6l-Tp4OZN+KF1hA<#-xiukXvENYxvnAT*w-`u?sx*Mf5af)N-5} zQRrncU();7yb32DT@YLgU%cy}M-H=a(j%t$$m2BN#=D>MCPe&eH=K}m^2YhNpX;*1 zqlOnc+OVtMgVu|Fl-=(p0v}^z*jl}uZnG56x(RG(WiWTgVsUW&Fe)nMKlMS$4qDx! zux1Mf!ul`CGD4%81X*U@ns5_s7m;1NGmmCI0i`?TFp-4L;g&ArHfF$rEf_~(OXvWNMXx-4CXZZ&K*O{n6; zg_~l-Kjb}B^Mi?5_58XBnnpQn$T864>oNAP1W}G}{oQhRX$$VyW>45QM%Nf}E z@e0%qUCQ+-RO{*E&;v>}sIB_I|3SENV+!^D?0B|Q)qE(m$7=j2tKM2N3DJH2IntM@ z*O@dK8^T|Z@}O$AZbil6;YI7wzM28k`W30Sv;f(r8~2<|PpQTAcA2ad}v!{yRoCe~!(wh#4Qk~73Bb?YeR8P%-&MocW5y!xB<)O9Av zd%yg&9xsc0aWc8QE>)p3@9QB^j&uE&4Wm^X%kkl%D1o_DTbtCdCY*?8xh}z(j(FIY z^Eh0F@QK7}H4TR-_9n`mDLm*SEvak|SOciy7sWY0O`rPNTlLGq2lIt;qmn7hR4Ca_k-jYWmZIQ#*Z39Qvag}kcQ`|zq2%;;I!Qp0ATyQM(O+>R39#^^bnN?b*>mph z8**(Xqtd{J$i%hOPo`hf=>%x;r;-Y?wMv=%|)ljSL#_ql#1qH=?CRND+ zHV_3|qlQWE*2QMnAcv3+LPJW}-kcOpCAjWYqf1K;VwnVETxZ}Ptg1C-U7kP(z?Fsloy}VOAZ?c*CEox>!7i6Wvfr4Xy4xgM;VtcwC|Z` zkoJ(_1MNG`p9xQOi%L?$L`2b**hYage?EDB!_K`&Q$DsfNpJXHSuvhtG^~VG*%S9B z8N)1T_tF?Hi2s(f{5Ixhhf?dQ@m$&*f{wUmi43QO2&YKv4$XnHe!@nFQRNcrN z>Z0lURd^7L04 zM*I913yoCjGi{?)ix~$M=4;!~`2Ls-_-2(=uRx=k#GA9NS#UR9?r}&Yf_@Y|O@E4P z`*mV8C5Sxv$6M=6)K$3(551smGVo15R+GDlb4B;UFRk<(5w%Fc+iRCExJcXf zXqvVL?=F}MyIB%^Hkrt47bFl!zTMz;Fyj^T>jqEV$c)hDD~%J$mJ<-G-VJIL{AEkW z9#+zM2er4n_FlNkxmkN8e`26ddOyol@ZnrI%u#(7@s%`U2*28P52k}&(v+a_bj)!Z z&UBunqb^H%qnC59E^V9iu#k>fr*aI}=H??U`d5`(_3Vj`{atSw2Uo)cH~K!gM55jY z%R2XsI9Hl`b?Fp`31$bCjc$K)O1xQ}Zh>EOnyIl=VW1O*vMi>Y7e>hi^B3Eo!t4#% z8saWF1ZQ%shB{RmSz>&K5gb8x75R)GxGCq2c`Ui`Zje_xM-kR+%(~96X z&=TBfx}@wqmAF|sQzz$_shhfnAD}W3y5&Kc?aPtTBq^&u*cMV^+Zq`9>H}5^(!X%I3rvu;T^(6V<5i%T`!SQkjBBNL z^VgE}9%W#S)Oj>&b1hDMrOFI_f))ZvI2a5BLIR9Aw|@kCBV=H~0A!#(#;Ng_4Ajo1 zT8mub{igW7U=tbK>e9XXT^NxUta?fE$$o7C^^Fo2x{0Otvx&N?lX2=e!$20fNmQy; z$vcFQn$io`4KC6tk0tq>AK+L!+^soe8QE&!CC)pspQ*n;Cx5qMCB#PO63k4Iu$T=psK41w!JZE32hfMg+W8g7n=NijZmGK`Ej97LybcC5&`+E;T#9 z1oJ=Ks_i987R7*Sspt0>&kgfj zOOc5_r;-^jKkv3RqpW#fMJ-`WJl%Y;VM#;PFLvEO@IvDy`P$bEw!M_`@}gT|Cde(O24}>1CGcLQVe{ek=;8io0U<{N!P^a5 z%ccIKZ|QXy@j)y6<81{t2P-xj|H}`(yWGm5t-{%O3Q-yVcnbjWxafVm0)L}*hFQkP zgOV2ssu$~j##95==-&ZG*F>PX}_rhNw)P&O`nFd=*j8>0qT zHBe4xoWQ5`bDlEKK}C!4wYxgMU%b3T&swKFLYGH(5VYD*)h{#$+iyP=Wz9o*G{MCdRTfNejb@q-IA^9qW{+9rHFIOu!H-{ZhOC+ zYTPW7+tqShyBF$`%_p1@ORW^ zHdB?`I`ogkJ330~UXazYdmh?HZd_q-RfwD4sCiR6Bk2&md&qCFud+rQ{!Z0WVB};8s&Y{4=t%KDiveh^4?ax4MQgL_bSd*6H;idX?Cza)MIIans3b)5$3E;`q}( zu1oQLjg-pxKR#XXd}RB2UdJ??2478W+-gO+l{QO~a>ZmHoC95-)VS7(RvS#%Ke8!5Q?t8}6`f#55X zQ^nwLgPvoD3?mU@Q@ofV|9bw8Ye%GHBK6mc$}2QIiyJtU)H~n(0^bWRn$xko5I3^b zRkb7&ss<(@zT+`>_jCa9V3^zRyk0#N@2rp49L(29TB}oYf3&4&i)7TlSdU0NQHSTs z0r)8>k;y{Axgm23T)I4iEhi%tuLxC#g7{&}Nu~%TyZw3nn%Q;N`U{@RKC~!1@!u-+ zmAV`GFDVD5RQvs#@Lra8y$kG1r#`&pe#vbi??}&h#Ow?1Lg-O{2w0fxR`=E*UbKk{ zT!OvqXVfMP<5`1XgzmO>+ztXDTIR@Lep-C`o&zAR!;8u?3mhOdo`exq(iRF%_Z zWHphuxJ??TUk~fS>SU}v{d%1ixzKpQ1J(RCT7t9}c?r^Japa4c=5c!UH)Hd2gg^%9 zmB!1yxaRZ@c3z>gXF!vnF&4dB)RBxr{HrG-ESM$=Yq>5ZM9p>dU?t^B&}`zW*PzI! z-uiNj8`OwsM+#n1`$}4J)6jlFcgeN))2ThgjF;0F%X=lrvmMHBII&!ixSsXt)O7kY zg80M>w-$28zd2Q}8r*fh6z1BMaw@)N#zA0K^a44(|2gUv-`&#!KTZ&`;LFboyEm$R z@s}l0UNMZ2qBp6N+;*YL8*_Ct{9YKcl7fnrC%>y|?vFbj8QPairDm4xT+c#`Hn`Jg zIhd3gUeJ$#G59OJl;ciHdMF`9U&=t!vh9e=3=#F-kJG2S`9EL#HmK7)I72!wL=Z-M z!nn)!Ig8F)=b2lhd~M-KXlbA?WG&*{#qMysP+J*E&;(Vd)%;b$qzeNAS!x3=<^2?9 zzI~3G-TBkmz?Yzgjp9SB4tzU8HzvbkhEADB`pkG6avg zG{nyu`8>4zO}VMQXxsrD`l$%I~YNdp&r)$G# zVDya)HhHbpyR1gd$WJk@k~WWD_Kn|M+&aFR3a!2xhfcJ_Vphk!Jv!>`FJDF{ z7JPM%ERD1?^1Ia!Pi8WRBb;t^@rwsp%nY5C4#TuO@dY6 z39)gX2#EK2S(T`_H%>yr?($XcMa6#i=vTJ*n#NYXvQPVr)zjQLWZNc(t&WC~j;iEj zMfY7*^`V01f$e8(#*4$&C9#F~caf0+RJ(nR)8~2T_tJ^+)M%$|>rQR<83>_hB`le} z>gcFA^5bs}|5U6;jFpAU;>OWtFsrAnrP^^9%WL_x*L7)?em&cX_rm+;wunPKy>Yk| zcId41Y7CL4?dq!UOPh6naoSVI<0il;?|K+@uv=C{f-6B3A^n-uL4J3i)S>kuSg5q9 zJHh-05cmDpp2}9))T(js$&xlt@BM+y%>EL z+>@!XJBMp`N_5JrtA(}%sT_h)m|P!5Ww0$^nB(4;)O;Bk7_WMrAeL5!0W~Nt5es28 zDIXkiWqhU=#>sJCO6dq@t}yUdL1`h?tG=SDs+;TwJqr9+KKPkijy~J!;9tZ~0Q$zx07MS1ka(r(<<6e#^ zd?8QO`Zd~m1)e^ms+-gzIc#NbCx)66R(8Qk4j#;3@Edra;Q&8e4=TN+XeHB?~rXxe1grV#7LDS5pwhaU-{KBY@vRAVb<{18RH_gz9c zf}W?;M)))93o8ECl~SS+rT8*JK{pW>6PKCLN_YhPU*d=}cc-1#Qm6%`iOGa$%UDox z!Te3CesOD?*Irm1Ox`(oAa!pE1Xhr=xMm%`47YN(*${q!5=WYhnu!Wwk0%JU^URl~ z4yeLV%!Dh4@4a^3=S_sx6^xsmDH zq6V)*%3Db2@F=E?jI5gQ;}58}KA!${lbe}Qoc6P+@NR#U$*CD}6-m|j*us7n6{CI6 znk{g1c>5lEeYxx>Kjcn5AvB*=;K)@H{2&b6`ghJ?#)U8B@7GD2+=K?5KqNp!IOwl1 z6mK&+>S?G-Tm~+vzU#RNR$G_@LJaj)^96&YAh>Qw-zHPDH7dw66GmxOjuYj?YGB_l zT}`LKBRXYxV{f_Px9!Q`Im`$yj%Iwd>*F@P(bEGFv~G&J&hCCjsNi{PQhmVaFtD1}hy4XUlLzvh^iRptQ_m(3ca zcWC7m=ILNr(m=lYCH-%s#ZjUX(edUro$cQcT{>O{TkN|+_h}mEUg>Swmp+(f(j)$u z+2&Gvx}kGyD{jU6{*#f?^yIyYs$Ad)-FnoYTBDHCfUo9CEAvehElFc8)ASpWHSM5X zzj7Q1Sb2WybE``9$l^2jhFDbI>eyT1mTIL6F^D!r^J;LWx~Hm0v>S0iMlF7 zrGyeGvM-Buc>3f)7-nkV>))@J+~03C*X%WCP&|3BX6kQ_R+3=oZJv7}C0QjZgtDnS zwcq#Ab3Iev!<+yTu{tX`hdVj9cC;n)r7XKbaR*aEr5|m9x2r2oT5$|RHV8K0tCTvduxf7q)gg&?5w_X?pr~gB>mO6-b(WUqSA}bXT)?P;f@>TLqbU8=?aOB`w zA8FOV(V#`C7GoXmH?}_e;8t>`%ip+mf(^wwJWgCfN^cJl#(1Kg3eSzE%SM(qH?1u#TQ=f)%jw9GXpk1O()5iw3Y1p4hWsrVnsp~x z<1qSf?_PJ!uLmLqOFg5|N&uxtgXE?gAlaw&R~f|uG6jPKT!*$G#rHSTtMLyj0w-+4 z+;mnBHfy}3dYh#;`~y$0du=yMDOSikJeBb@O>{9?`R*t5xS6R9W*G+ci3qmQ@dP*e zZcASbJfu~g8Yii7ww+?pcL`7Z?N+nt=Hq;C(LdFDYmg9IhDnOPX=o+X>C)cIijBJi zqn_W)-@oQbx6XGFe=290P%|sRu9a5yiCcJY>_U*nVIEHt700F--HSq%(Dd@vh%on5 zw3D8Ic<<~1jwveNAx^cF7wsYh)m0>6&UJLfg)5*)=ve~Hw#L~#e#+I?Qf=h2eYINI zWFz=|g)$;|^^24cXW8$T^kUln#f&>17PjEj@jhSq$hYFewL_(-5=`OuoiKo0nTctgRE>za=PR(zOoYNw#Ctu^#==zKYM3`MS}c=cWV-P8oV!(U>}%ttmwi0o z4NO?eTb|V@I_}#Oq7A$Ki%D#;7`p(&?44kR^68jU@r-F#2J^s1cZ3Su=R*F_(^+v0(ZYzT1A>lM?x_;kVJp$Gf5yA zmO=GMJndw+m|<08+UAyx+E*E*Y8Rg)SYZ<;`X$PB$g9e`03B#nf))No>yznC@d!)1 zn%&`?Qb-i&yHeJVzK~|#f~hCE;xE3V-RG$|vt5`wc2GBmA8!=EEE>T@5MUG`eX(2D zgpbyRb|A=Md9Y@G(1-qA6omMzfLh2^OTO~aa?lZIS-UiAn=st9T%semk=*pf_?rDI zsE`Z2`g%6v#)iy{uMs`A=d2htxx1t|cQ_K3sDFO#@!PzY5-(&_sRL8v{Y_gK#>r$1 z`2w!2#`!oR%=qNVg9{IjtXX-IS;va~pQiiM=h&N@+U>3(lD1F+a(~*iR*7h#i#(`} zDH|0!Y*!_I;cOs$TqM34IXhO6GW@V%p%ViCseF7acW8L9MklB?Ni7LR1jgCcGUBt5 zK>4^4^=@CN;>(Bye4F21gM%YHcAu4H>QBv77X4WGjgi-WfaFS(ni`RNGTi62N6A$+ z{S|g!Lc$~vMkGBB_s9Q=9ieJll&Sl{-4wT)voaZSZ0DX&G&D%!7o%f+wdZeH{vgE` zgV8d~){lHB&K(>cMWaQM;=nvWk&&bz=76^ac9ip#nvwbS4b_Lll4588sI!0)DmrjQ z><6@x(eM^|g8&axzog#P-W~i@H6yshl(<#L z_>1(gVq`u0)AX8*l;!6GAK@*ZPiY&Ajbc>4!^gcZxc|7V3)h9kjzh7-TBuO+IQU`- zdEcwK75^-VaF8>wllp1&`qG5bRWlX+ai%Ng+27FH-T*Obb|R1aVT@B=o)BrqlC6A>3ZHHNEn&2yiIH*P0Lt5Vo#)GZL;^pqLb6~SvB*R3hVjp(kGBJL z*Ugbe;MqaYC}d%pugyi#7W{1i<1_ER1&Bw+L(G~kr)r}pM;AiAIZWNzL1>aWT0B-4 zl79=nS_oTvZ=6BvY)eFEe>m3Jl4rM+EFU{7gdXqKK!e6%^~Y6i3WpEqO!FD3vav{Qdu4}xoQ;|gvg2=&9H<7T-4ULh5@nkfRHqtsEqUPhPG7I8S(mP z!*^aQl;Z0)H0UwGgQUaTlU^PKawBVz|Ab!2zAZpHmwbaP!QB1H|J0iQNMJqBb_ zI|joDL4LhhvQ)}Tzpvnt&}gEQB;gPA7zu9DgztR3lkX=HNrx9e5pjv5Z2~+`)&mHX zxhE?8GMh&n^A%(x52(Spa<>Hwz~k-y!-V741E^MjTe6N=C!9T8Q&86-%-!%FA}~Zw zLTrocdU_h#Lq;G6(R)oo!AiC|Su~O6zhNX0%%a4Y0%%9s+jl?qMiO!Lh&=kqcqPFW z%;4Oci-nzmaF5B=v4!97#h*5n;?N;c@t}FV&GIP8M0@~`4mAm@2$yLgNZ~^LQKLt(s5r9U%8NqP4N zlO=(a=g3B6_Fsn09Y! zoI^*`y6N{ea1A0lidn z8K&e}>5W{@7`E?AP&~fXu_)W&!#Ei1BeIPJoM$NirU~%UW5HAn#HxFQU^2-OoXn@Zj6GnbLMkt-*E;zW zI#PgXauH5Lm(+|#FTz#o>`KX9^D&}>2^Bk})rE#)DB*=XC4H-ET`l;A3*v$4#3v?73*^q$*Mp?XV z$%Re|5%r5VhXKV&pD`GOv;_>(9|_=o&;vT1BVtcE3Q->sqlh+eb0PQdX$55IRfu;{ zO)!M6cJ&e?OLQUHUzgT6;Sc4Wz_^8JXLDfr&;Y62P@C)ijOO_J-UK!;nCZ`JiD6DN zf)e$N(L%oe-%;v;hcyyD#GnmAU4Ys%UNwP8^WL57+6Yrz#Q~l7mu^Ma;8KY-rSYPe z`@ClGZ&n>NTKQLqGcK{s5X4>!-!Xu@JUr^5xGjx$_8jttV*voP@(-{X|xul|8_(g+4aQ;GGf+!D90TL+R5rSz6KizsGAYJxNN zg$Z&rx-{$FiA_{L)4u&L5`sviZQzyInv5fheHf!`_k_TE^G#O{Hw0)haG>~PD>N%2 z569s|xeV?st1`HDaV`4-;dnEEc)d1yDfILiIhzERh$N)9b z*w4KYbcy(2+HGQ`!?oBy;&G7u2Sa%6zU^O3s%p_}`hF|^dUaIm=PdapcmjB6+`hrI zd9Kz36Booq`;Q-S0JR@Qcbtm7P{XyhI@xcDFHx~>UE0?}>s?}_Ph zi;IgZI)5f44!G8gjuzzQ2E?pj75F?B8 zpqdi3ysbhoV2sI$sZw+D7xX9e!M(qd&5HkKRfIqPCC~ryUIa!8*wmk42#>)-;J}4g zq$#xz?T)G26$kEa>*tiAu$?EfPk~(e$9yciOxG=UO~+?zz3SdwOWvTl%5S&C|BqKs zV07b!FDQrHB*j9%^R9?Pm`Ou-7ILbj1Q0lO*4clVTLZ4kQHWm@Q%ZI?A=-|x9QoDo z_*s0+D!n$rs8iWkcTtJ(lezjj|9|tSh{1a>XEKijI;%R(y57q4Re?rLOMybPTFoTP zRle`A;rciGPf8HW(QbsuoZ-Gc4L?~l{I6g~ZC6Ifn7ycIO`{Kjr%@?VGPwADBl)%)DhiFHBx^(<5_3!(Lzzn{sik4?vhTWHPRV<3)-<$* za~4l}Sh~8u$nWIYGf%Hz%i~F3ZZGmbt_9eqj9$1;_TNRBL8NA>=#L_3_)hFSB)h0L z(j60hAMv5gUIIKE>jy25#X~gVmnzX40w*XQv5+X91jvfK!?F{$cmJl!pKz*I3HFbR zn`Qp*1(NQ=w#kW=I?-g^1h(Rh5#bZ;fmxRHxG-l71G5Pf$5oaYeuNDW!RY-8%Z_*W zyp!3TZr!S@fHDqZl_BoM3&NTK53k%fg- zWoS0-rIMwhlTBHz2&FyD-sejVs{3vuM$R#5CusrLP(&9}&|ot;JE^|8obbMX{?h`2 zCd3l53M&2k3w74;iZMs$QUXEsO9I1S03`W>CUvS@$?&!1nVd%aFh$nZXU8u`R#8A? z(&TI$1p+NqWtvTwuqrII;A9T8O@qij<1U>eR})rLlBL-^)$)kZx*>|z2_Jr<#r}QK zahLCmkc}NkYJ&sV?X(R_;tyDl7K*JwG=98A1p1>q_=jGVHI(d|ai`{H5M(Qzg;k2q zM2a3Ce2)FPSh8RI_4NxiWIziB3>al^1e;uQ=;iqtAP2pEA1(0Aml|NY2n$VFSC1Y4jEi^Rmm^jI4!^_QW>K-$=ay`I} zJq2fqVWVj`>L1^syG{+lCIn72@c}KJ|KjB;*b6BM@tJ6n`}!{yAmASie*Un=f> z0s5WChce&*fD%uj)X)IEu0T^#?uYu-DZY>PdQjI4=)}6|9=IQ~3^`Pz^$ZG0@akAx zdMpN85y8S#T~8OmJLfGT17`_)At;Mcfo8YA%S=O3*yvg?qDo-ZcVNVxbQ!zHWI}=_ z010r1RO#`2k3CGY7f{h;j;?4La*8=EiH1FBj!lNOKefI6lx0T< z$-E{>zr_2g&|DECkP*)aG#0*S`3uanV ze9Gdw>qd(@-D62K#se^fN(tU3903!af*g~R{o*4QaFv;npT+u#{4$Mbd*hP+(C4Ee zrV4dS#zG#c*N0>D9f3Cv_@9f>6wPto#%Cr~JvW2R$ne|I0i%|?fD8d{Bprg@nAt0l zU(*Bkscckj`VBdBVHH*-nwcw}=W4a%iAm2X5XLIQ4}zauLpbe!M|p=BN;n0IM=QP% z`Bjb87VtvjHCh`Ifm~3I5Ftgl8+ib9Xc@1(?+jymigi1W9(<{Kk7jfA3q?GNLXM6O z4*|=`qeuU&k?sR$;1~Z&igpGRfa7Co>k5*2KAb~pXGHcvd3Fi={>7k1xyY}Efw2>^ zYZ|VlkD>m?y8&m?ld8;RwC|mPAufTm2aQt;N8?2)YLCZ0Cf3YW30|LL7(qxRs z@Wa610B+(yS?z1%d2glTHP>I-`N6iaBr|Uwi2V~CP#tvfU50Ez}<@^0F33XkOW;2AO zcCk<6p)LaAkI9(j!`8wnR=l$}auZ~sqB-Nj*#FCDQq`w%$`SkH9O&)8BD;(nxKLTd zIE+VfIN=yGcR^};p5tX~ulS+0`+Qta<{K}!=FXi0n+1cGsdayJl12c$HiAU!n?)6R4UR4<)=#OzjR zt`^JkZ)~6t&Fsp-L5u$Vb+Ct)KAo-0qDDTZqX4=n$%fKn=(6B_sT$)D5RbRGhj(2U zxGhNnOStsrOwCNWB;FBMCfgT2=c=^a;Q-3;oGzv-GV4;6*_QFMOd&b-OY^q z6ypA%FZZrUvY#4SnyL|fsU*{v7Rj=p4+;AC7>?Q*l5nh9k7zfO1oYnyDd|yMK^p&y z%d6NqDyHObX#39?u`7y84~`E&bTO8py%skN=TuiXeE0pjcLK!<0hrnDo#wCMuszR* zo*iQg*|(A$tXXs}{z#vk#p2YznpQ4#w%4Bt;j1|iDJZJT6URmstAL@W_#X z=7W)E?5+e=A@CW$wX096}Oo5vE1XYXX8vx}=ICfa=EQ z3bZ};0+VwzUls9CZoYW%T0Ch0BV$yV!AVEc*gcf#Kb!dds4tHpEudOofqMRE+nAjVPraaydf2g z42s!e-d`q?Ox8zsAGUW(2}{PD^5;GVOcZnYj;>n1lp39x&YxePS26Pg5_7EUXGKUX zNfYDY!-o~OUVr-;P@pnQhu>t5Je#WlPzze`=0DySEEPXfd_gN&>fL9v>+pzKrO3DV zXw`MIJ-h3lVz42f61~u#|NMUWznA(?1oMeVvdEm^tl2lA0CCm#XeoCasF5}5-o&5Z zc+Z`g94aKGDxx6m4tN`J1P_#J;pL`jX@g3UIVW{+uo3qK0o4)lW~qQx!MwhXjwr(5 zS+KK4zC|MswEshF~>BnoDFi%r^Pa zF{uB&{NF8SG)j6P!ym61w28Zz-b2XjqdSt~X;fx8-hLC#N$p8A=2=tiDOg0$Cl6uGBqd`n93~fY6uW7hjuAw2Jjb%q5INpQ^Kwk;*ByS*LeFn&!Y^D9p zoFEiMjsz2xbI+j*N6!S~Nmr3jz8ny~kBvqLGnQqr#=Gk3W7%`|;+HZ$4uNyL%*3P@ z6%YpcsNtQ^#-=97N{`V>B)zW<>|OToW6S^W0mAAu!f^gCIF2}R1>rpxc5&ZT;v;Wz}RJgladG*`rEr$pjW--WOna7 zx*>@8mU9q4)PF=YfYv~Ze6t0nE^M%v`J3dpXd@s+yKb-M#lbn5|Yxb-}Hb$odAYXcF{O-f{R z6yw?k^E5TZn4P|P(*YeuYPkk@cNAkgCP?PX;r4`v&L0T`nel~uU{R5WSipf+vkD1F zxOS-!>_eLW!0W&EVapweuWf5u1Ryesm=0dfk*#$ck78A$akx9QAu6+`VG;WnZE6i- za%Q?_Z9?t{E}Aea29c!&)RrhnUC_(Y9O*}klW=y>k)tg{e>UYU3!=)BlqwguAo|Q3Bq^BGjlmZ{b)p-B z12tyrfgIaBAQT(nC~0a(iKRah3ordYQOb|3z#uKjGg=7VG*X|m5(IU1qtlJ>P6JVp zSwD)M*xQXnPqd>w<_FkFjC~GEr~(O!C6NK6+XJK1jhOt~BJnOq@;MJDga0@NB+^yF zlx*q0?lYhnJs$)YtDF`Lmq(M9Hy`%okOAqh4wHU9ERh??x7Q<|pr~%Fl*NinfN-1# z;kc)AUG(3y^`|7k=n?uQNl{PcsNVJI97Qo9IF)B$(0Z(Lr&uk0;pDlpB5u) z8W5jQ6EtbJXY-y$l&WWNYxp0%iH4E_gcI7IU5z`55D;D;Q@Cpwf41syEe_gXHAZ|! z-+ZB?&7Z)q#|xk3I4!P#oaLh|1T>q$-q|H)35orN&8iR8m2pi}&0!%_$6^h=;UB!f zds(N@C;BcA!ZwcVUIDybQ@E=8P76N{4p8_e7pjLsnJW>l$gs0Sx9iIFkZxzsK{*PxXbv`bsxI26CT`9s5J6i-Zum ztd5OGTHoWYhHtwRBV{67?Vl3BNoR%l1YBpfI9z?=Q1@NDc;+1uPTrJra3r=6Ls#QHiDJ0<>~3RX0j5qa_&2&0QhtKlf< z{Tquj@u7>irce$Ga80yBXYaMw zUhBQqdau17=X;V?ik{qF?A;b|vztlg=md4Pzjs~K_y}7pG5#Fg-G3pgq`4<*w?mci zJGe-&;DyEjDQQdGY?qKak0Pez7pRkxlSf7!kHm4;XgZOWNUrcY&Gz{VDKL9f1NpT9 zJ-p?C8%6Q%T<@-H33IHqGLOi#Acc@5>60$j1-6je#;E4+IP62(Xn+^ej zD^7r4CMg*ek(O^O*;6~;!*w^R(}L&`nQQCF>0qIawierT_tQ$&M*m0+M4qJG5On|n zgxfAoqJU70`uu2@dPj8iop0BS>)hE4&RZw1gT#|xw2 z-%8y6a*E{ko!1H_&I31zD#8@qb|(QBWC8kM^>I3OUGBlc7nOA1cO;XKy(JOq)M-i{ zNG<@D`Ww~x!VE2^fNws>Ps2+s?_R&VJ(()=U!uxD;NQ_8Cbj8bNczU3Nnfx>O)w>M zC|WTp&+tSKAys&EWzpFOBejmJr>xL6I!XwIrwg5`-hQY~*mD|<@BTO!0CKneWyO8H zkUqBE8H`l4n7N}8PV~rI$&&{GW1|C?Uand4jDF2Kn80P0e!^xmvi1GTQBy(i={|^0 zH_-zL1#u>$5i5M7m=YhB9YK3GWh$-_RhY-1O4l6f^FBQjIZUREgo}>$yVvdyDp*a< zIzvoqR{UQ$Ekw6OJOgpTQ5O%TtytwsiMg{uzWcAXI$H*9lOANT=Roqs*5vKuwyN=} zfyM2hrB&hDH-DGS+B!8|JOK_*7z!rsKBMjdUA06BWM3GqT~`9ekrj_Ny;1{LzB3-;*w9lutD2PkgQNnu0Sj+b6d z>(ieY>;R;* zK0cHSMPUW$Jyw8`qWjj1Ttin%U7l@v*-6C4>jag_6HGBds5@34|1abvNOf<-Jobkr zuqx9AY@up^KQx8?mq{77TF*cR4hSCVUo86cSafYhp*>4DjQqlc0e+$TEafz&AyJRX zp013`nw&p$iSNmsQ^=wPA=v*~VdKHK8&yrZ;oCdZ{>V-S-zVRiPAl9$oGCtt?v4DlQl_;YJ>ZA#*-`WiouS4@s!zT0n z?l`nlY}JMklt2WD4b9@{KEZ{{^>xx#L?%){jsO!<#{|E;OBM4kQJ#?N_*uS;zv$X? zRfoe0jT?oO-1LRd$PH=$COrY-Y$6+*~UMf^2NiE5-fxwOpx zIhuLm;O5^qb_Nt5P8e|5b)t&@rI}2kwhSPINA8X-h|B_ajI(Qo)c6p$6tsIt*hzF< zItBS5)GImBzn{;alsJ?9ommk74VDU5n6nmauo=S(3ZJ506itXl1WXgF8>%5iNaf;s z@edFbf2}@Z4B@|WcgXFRO=aJvBX}U=c7-(%$ z)ppqGY<~x}xykW^nT=(7NbU)T#Bn+&}x)LV8bIMpJGjJFEiVI z+qODgNCUMryZ(PIjRSd~UWb{!LBuOji;iF_5RX0RPp_XuB&t7vZI82Alnz~{=e4_h z9b?udaNS?b;Z94l+A6|I)(Cz|qxv-y!y3WLDj2du*Gq85M^?89d+Cd2COQ!xry8)M8wO)$`~S%g7*nzb!XHp~-6bZlBIfyhl8i zyg=6-hL{QHSz~B?C$_M`p~V?~yGnQ8ZQJwEsdBuO#06-*wZ!Q_9eNA?N}UfhA#?MS zHzeP={p6DG>`YqQoc=pYW)x_=pzPh}cFaA&?M3_%-*tFcE7RS?9Zy_=-N-&=S1QAr zj)OSvyBX*4_#xy3i^X`&$h=cCI5nwh#26(YIRyP<{+Oqwi+>EpNZ;p!38JAUYGg0P z!=oiD?mEaf|AP;LlM=RTeU9d&E)hX~O|ly58Zcr88h=w1>fjkDd!@u#9upq86`T)X zAU}h$pMfkMu$lY~9VQn10@|oE`rXE@npnKlR%Pp-JPr6svDTGR+W^F$*<Zj|85rYoh`+47mK35ypWcgl<4sn6uWI?yo;DzZVkGzO= z5FWkotPL9e_%qYw)^D$)i%$pf>_1W7f)1hh2Zeot`=!&ZCp~7eq)(lnf7QT&Xm64{ zk{xT9CK+FSw*I?)3iKTsey^G+H==QI^7i-h{{;57RC1pC1Pn}|5*~WC@z=bNHKhU} z1zk^~M(3-32;cDH?sJ|k!+R=DZF?}R@ruF}DOd$Rf&ob!wITv=OJBJfu4ZT}4=FnL$d^z74NatM%|33Nor7`PGHQyTV&Lk!^s z$o2G!D3LvAa*yn#ohD=tWHb7i)~TY=U?qiSr%{H)N!s)?tO6U@)UEMhn@j=2?zX!5 zd;M1~nIe|c-BnF{Xq*`}cNvCk{W4DKHeO;uAv~Jf-z}srw}qE<3{4MDtK!j@GxL1e z5QRS~igNm4;XMH`8i>1RRycVsqeM`=73KN%K!Ns4DaWh)Z$LP&xzdLm!i$jH!sD!l z*C?F27M=xK$>~oBe2$LAi(bN1e7mA;dbwu?>3oiYDr&{JH}1<~s_&6Qr|Jx06Ib6& zeoBnf4sAX;6}+|X8}i^ffc>=!Q~`G>b57CWa)4khRKdze`*{5??EFhrT))rM>wV-|gGZ*$=hY{ZXr>Z<6hoGkHqJt0UdcZE)e(6pEk7 zcW~c#i*&ofG3WH5(12ebw+OR{&jJ;~sw*4AU={K!!@_Eh6> zwB;p4=5lPOqc&6Rf%9VoAYx#lm97<#W5~ zaJj{BYP8O=hbMm8Rd)OE@G7AtLOA7c8*aM5s`krZrjb8_T7T!LBcOnRXOh-Ye8if? zb_245@VsEo?#R#{fF<9-d4Dq7Qp2ncrI(O2O_!bRswKl<(ZC_m9J5894sHXR_9+bR z?BFC+x;H{V!k{w(m?4T4jOXB1?r21+u)lZktM~2cvuzPP%oBpX>+$AZ?L7NOS5XgG zMjbex(&@ULp7?Fz`czW6+G^+AXF|!Lbis+mexo|6Nb1-j{B{}gohV&+ixq>wO=7}% z2Rpo_7v8dWTPwPp36!M#Oq_qY#W}x+4&YUEqN-eG^cEeZ*MKp)lMI&w_=(goIbb47 z)YkP0Yjp|tJ&UISMQlLOKtYei6uQ9Ii~Y$sSJ+3ZsSf306~BZ7_g}rt1%~HmV0fm^ z`1{{mhv%XkUTJeoe9q%6ZYtBa3RTb_s_6(+uXbJ$!Cc^dj{B4R2jV(L`WGUI2XEB< zgV==I(RxJbb0TF1+1XE!5s}mu2*GT!F4@k=fNgBn40<;?>;8eGf><8=?z<+!5wUGEqEJ7uzyYL8K_nAUAW;DzIlQ?|sn+!hKU#{{B&JgSOIc;&tT#|WAaiGKvy z^^3HxhPLaOan|wU!-{mOLvlWolmcBW$loA6D@F`^5swtTTYajoV;B)gX8YW#_J4OnOp0X!ShE-V z-p8hpP3~1cjbHLALc)nrx0>SkRYb3B@5elOoFMAqE+tFk8dbRIS%irQhMoyb}>})fu*a zhfks2eb%Yax!=b}XQz2*2knvXQCDSfp<`^BL7YPQ75j(ex#`*Jtk#DfG}!I==yxXv z4_0P;f3-=>=JHH}3jGfUcxDDd_?G;7iCE*1)7Z|bY~iMLqY2pMs1)v)i z;!g~U4A-rF(3;KV5A20eu`Ow^Jv1^KwxMx#1)6(91_G^Ww%P7(8r$<6xQ6!BNa$lN z^SRW$`xKRKCAnwZC52gQw-7oBlLw?~^Qjzn9kKHBwUv9LqmdF{15}Alc$_nTflU@@ zj|c5k;IM&*Eds~@0C)FZ(_QdSGQUkjVrcyB`4nIYx^MaRf3xPJ3=#nSP(KPHVfe?7 zo#?D5>3JptE`;Lx`%_T_5f3=EKZfPQ^zD}#YqD{eI*{b9t-#i=*S@?JRh>qr0u4ld z#lLf};lJ_}Nm#R*49zAI0v`B-u#{H^i>0hh}6B-iw6zuQ8+{z;a?UszEEz^C!ih%60U7P#xX#{xPemKMtG?*%y^8!C#I?4^$Mxk4Gf&}~kW|Kstv$HgzC z41)0{^YG6)wT;u%MnO-+|FDltdl4{TyC)K=DBmW{LKRS z#2%)U*Mihf7pnqg8l(?Dc8ve zY6XU^@~Z!$f<%}T90_KHRB8pmKZ78d;p-5pyH>obS($^b6|Ace08{<7inqF~P`gbp z|H+?8tUe^EvrSj`DkvC^O}n}49zK5-jVK{n?cJv|156fLUFHL8>r#Jxt_?jwFWP3S zxG`#E>f{ioWXIVRekc>prn7*v>7@Ia`_X-Y=c0PSrIaxE@kM;-T7vNE5P$pO&ssm^ zjXRYJKVMf(+=1pJGR{KmUf+MA?aZBdVl4bWX8#Af*kSAY&+)mRpV1^bJZnZNXAKZG z9LfOBpNsmWD+@|({%tV-xk~D?>siDG84qy~aayRXpdCA`RwFVp2jwHYaUe#ENvD*ZO~t+l<~58aA@nSXV$GT7 zh`KsD%Rm&stDMvcKmq)(C#=HvwLkp9BG|#Us@nA#cN0k7Ar`!Ys#4IU1uG$9gueDE zYepYc30pB?*KRXnMmGy-_dYq*5?;CYBL_*CJMdxW0}!^$QG*?~uKo|pO%(EDdcu`x zdv;+WJ;EeVuBNY{dI;_U4MJZIxpO$W@wD&*q5qeu#w1w$6-n3D=zbvnH;lY;pBqV> z4$L<$pihM?pS{d!k9$or?IiBb|5)?}L-2vs8!YG0XGRT>eC2?|8+Bwet_N<8JWT+x7k6Lg;iU{$L zx&K^s;C#C7{Ia*TakcgWv~e*pu_ng)bX~wQzD~ry(T4m!P46Oi8hYG;v;0PsNJ6f= z@BZY|goJXx8!t$NnYH#P&Ai8*B=y7vaS&EEb`3}ryknFxp0R%$hLVYF zwxKv}enCnV%Z#kiV{BOh5iB?H5ANZKol?!D#n<0!(pAzVr#xd>Q6i3Vh1k@&0j|9a zb@)M%s`gP-7unI^o{#GUJO{IOnlQX>;O|D`0FWO(-e3#eOEdzIft}f~yzvv)J&6PR ztBEh11dDk;x2PP=B*+w4V@dJW#mwLv&Z`{Ht_99{FT1YR*)+x zPwjym1oO~G9_+91&v}&gxw&r0d5k&6Sjrc=Rrb+uHHk|1$FuQQsUcyK9X~_*d}J#j z;3%-8bn`HF6yA?I@YK|}dCJ$LqUZ^TkVvWNw@z)Il)wG>MAbUd-(g#ixUO}(fR|eR zZ_HDYjCDEdW~6V3*(X_j8^CgK=s^li6V-QGmpN;GV8jXlJG7s!rNml7TKunORFlHq z=y*m^^)w)`GPq`!Q24nV@&x!57ptP0P*tQ&ZT8ZE*CbF%(E0*R-bcoFM|X{NS*`4> z=#oSLP?o27;BdD%n!$mE&5?V`;!$8^S6jKsb{)3cze0{K@S*g6){~_~ySH|l&_O23}7%*_g<3k2)hva z%F%k4l>(X`V4&RB{?AVTN{gY*H=>pQ>Wf5hY~LgZ?)5ZA>tZpB0PIV4A)5dN19~6G zgyK{Lh(wn@j{85vo^(gFkB0{2=^}%S+6}qPS2D(JX{=SmDD4jf_hDPGvsbIffVral zDy3;*HJfmn=?Y(>JEX2^nZ?l4IN0iV@E4!HZsQ)hWgJ~%cMw?sxX3CnJJx2)GuUn7 zgfFg^aIwcgtTiJz1C_dTzWE~g@AHgjmDRlUBKga#F z`KjVyo#}X$xqna?Z?#uwx^rX0 zg;XkxVDygi1=kt3sH{JGok)ufM9=yN>OAp|zD85Ht{ojHegNq%kR9cJEFHTjLCSC3 zKOi)6t22IM+uS>TmL>}HyC&YnFIe`J#<@pbm|wdBBtaGDQ^ZQMINP~(er+Xfa}i}2 zdXsCy@W-TS8!_}jQ4c}zpv&W-AxOGN%Ro&bbWcceE=LVF1wVghtp~ z>llW>FwrXfMfs6;3Z2GZoP8-x8xfa<68R9VtcFJMG|^aypR&uUZ=*xeK5o)_H(z7J zbkIblcdZ;ENa$&O*_a`vl4uIoGm+$t`ADLyMCpQO{9Bd=n8`l^-xOPIK7=&HV4PmQ znK3#A{Scrzv9l_E@WC|_pijP~>aTX66Fkm2q_%_VD?bE|^F|Jj;90*T%<(I%(^ zzM3JPb;PfOZGd5Y4o_YDgoPn54_+u4{_manUB+eMRZXHBZ zn#fB<5c+Eq4hWPrtzsCRNPQ)41y;+F#8-!rIL45+^Vospff|q1nrR^Y{s7wm%3Hn zbF_8;^XR{0#ta3M*2#0T@oi~aR&A`hE{t4P0y>G$Jj6&m>+lbt<3LqhaCv38gF559 z-qAgy1w%9WlpII<@E!fyv;WY1zfDI+N&evCV;(;;jZ<;B!)t6;*Vu5I z?0LR+5D)oQ&cav*jlK7%n7G%6JRNO~V_~9uaJ28EhxJMvmq)pQsE(6bM~Xr1Sjs!H z+?mO?yj!S*bgPV?}lM3WbjOs0B72+A)n0hJF#pjubA3jbSB2bdB6Xkj&`4#VrEal~FUaxE3T}?8N zYbQd@qwZ4OTJV*^P(*=bvH9=ARhKL0T;H4F#p0f!qrsbRkI%!2ALeEp3%c_7E@~xq z=tT`!q}6^=G*To5glpHi*ov~@>?`MFH_@ig{G?yyal%Rn@(rgMNUEY&!^oM?0L}ch zRGI6yeOl9Zzdo|$#Vq6?McLW!Ek96y%dHRpj2%HC3mPJS8&D;#_J< z_o*?scGt=4L@;`72;Jcg_;h~vqX8d6#k$$+V>K&L3xU5t_J~{%Q``0=^O(Os|9WvR zsOY26m#Yl!@mly`Deq`YX&zCaEe}ACa^D=Mkam8XgwUQS>6rM0D*DsvGcR%d-B!2Q zfQB}WvW(4awj||b3GK*(hq#B++7~~6NA(se@m%veCX&~?6*~$j$7mrB!(GA0#$C#} zn)Q|C&D389O<;ouQK$dvFvn@GHw{?!p`XEhx_hYw6_iN?S&!#4W-$TU;2MX_DYUGX z$X_>g=(EMd{xLEKk^@|$iN#+jDIu`dXF`ZO$g53uB5HU>wZ&|wOPVmIh7y|_&W5q< z3h`C$e~rsULTmiv9l^baK2sU+6)>o*o&mSUy-;(gyNt9h_4w%R-$3#W{4WqCOouo2 ziiz@|FC+w$IWhL#@?W^DG*EZ0rYs&?b9-u)@tomF*PJJA z1zs8weGU2|O#?igO#^Z+DTHJD^V@c&**gt{o!`P}b#$CuB`k<-CnjkWA)W$xSF&1$ zEZ34)NLvZV2@liJS9Kr_zRCy;S(jmp*^Ik-046J5ia~)&8nw!sG0Hhr8&QcX@2e9T zX1|tm?2Z+Y*!joEarXG!#mZ3*!FjUP)S1Z>MCf=QY3Hk@7cnTD1eS~#*~Ce=q;_wq+ow0H#H5>iAYqe6R4<%$DV*rrTM zkgK=b(iKwVt8~VRJD3q!e||r?j_UN}E4df5c=#^tLMKTQ_^MoJLC>303{uNwghQlCI>N)6MZ5L+)NDOymB1 zL2#^G$C7z-a@@=01H^9>4^n{%NPS5t9~`-I2s3Gj7oh_$LhYax-Heq%nfU6a8j`~% zpCu%0=~+`R{mo0g0-IVs9Pxu+_vjwt1o7V(m}aHk9$6OP9Zt)4|NI`?sVSScD_*(k zpEjG^eb-A<$urwBDTWPwQp-~?!XITqT`nY{XKAh3Qs{aQtH`yOS=d^wBA1`C$c7F7 zf>#+Mo@!+hFQ*q&5LNz7^=gx_Xj{W^gx3a3rHapGF>hNq0xnFXx^Oi@B*AggyUM5< z&XzhGe_kf(Cmjdx_ZTD~XSdte1(j$mVh*IR?`6 zFkQm0aI3q}xOX4IY!FqkYf&8oCEvHh!Nu?ZPQje1Vz zr=De0oVd}+locRTHW#E8H~IFgVSN)EX2R;KEW63f-=dre?!ADM<~N)%_L#2DYw3R< z3l}IGjDCLAf5M^W$vdXh5$C+I_=LfcNv1n8B)D)BvBi5?tEO0q5?dfITS^!4x9Vi< z;YW|0pu>rSd>SSC6?AlV8;S2hihO_UUSt*U^Hlt9Ez$fR0(|HMc5kng=!#BA2v9Rf z;qh}XXX@G||9V!gZI5N9e!vmu;g<5a4vVwy=6cJ=DZ6Da2|jV~Jg1{$_cC#ar2fUW zh<~G|b|0R(SC1vraeMK()OI(uwoc)mzZ1m@r&N&l5eN`A`h5PNTBp_X7yl&WRImxBZ>6nSU)yb6yUSS?BT~73I2PbQQHrmuvqSbpOlSDLaS9sB!IJnd?Wo_mRrH z9xEC6JI`Hl{u5{9DWBNdNtVrd0J)KY-CJrzrabdZ(6<74N-xPIm700DwW+`&sOKZ< z2I1)@>fr2+kF}J#6Y+eST!&BA(U7agt_D3Z2QlH&g?QR7=4a`VH1an=A&)Quol5V& zO5;A(S>V#r(lQsfRjgKpcj=Qrjtz2i^*az(O;=0H;Y^F#2$@M7 zmTvwQwntm1aV5C?bzet6a%~WGEu1ZPu>YD3>}KECp3lf`mZ$9BE$f+YrGEl*9&&IB z28c9(;Om0f2b>lx^`)EqllkAXVwKM$XNN#;;B*xK2R;SLC(UVdRVnm8NNwkTDB*CB zT!+W^t45^)eqTF<%V@WRy8BN&0|C}%7wHClQ&6A`*upL)QZ^SO(I3$HV`b$`%{(Vq z&d&!*uZ$SM1R3pYZCR@%jOpZGJNQowhfVgM^WQV*cw|M~yKeWHBB1O@_x}vpy3ttb zRGlg7WS42=MEVB-$)Ul-)WWNTWujrarbXG^mR&GY2KVQz43t*d@x9Ur`qm~-86dG_ z(cBvrc~+-!Z0xds0@2sTAs#9F2EjKQqh^2zt`u#j;W))RUtp>_>vs_WwZ-QsMV5wZ zB=#gESbfj7KTxLPzj_AgS~2MAm^>(D1!6&)eNDlpDZ5#Ev-|_!f$_o^<^?w;6^M9` zCus}?IxLszNw*ZajCI<>y{=s$VCE^iJ>lpQdalZtgcOCU1!>beT7>gzn5>t*y3@y2TDaLPzQww)DGs z%IhdfN*Gk=DO2D6t^=&nKkQOM-sAV{&AWD(UDJ!J&#_9h)!1G;vh+*K_lMgy^lX1G|=DS8a{pn z179XeWQrPwtY*XE!y1bmQF_WR{xqIMCpr8OOxE|S5yP-L0p4_;*DeoA^baK@zU;FyoT?Q);IC<@hnWDcrEtu2$a0$dYDBU(5xYY(eb*xQp zu1x>gLdd))T$oZnAm!08=l6W>4O7o)nnG5ypJ@O;O1LI9i9Bm20kG_2VA;bl%ig47 z^3Ye&U1T!9CF_E+8Fm_j$qPWyX*LYBae-T&{Wlmo2{1H5x^}S^kbypB_k@CB=Tl9$ zDIZ27*K#;pclNZkTJ7s(x0?AJ9i62q+rv-H=k3h=iyk}3cx@VyJrXljV>;bBGSlze z(l^t`>ZNS*Rs49yiVeHB@x_+%P5Q}Aw7VsphfkMPr}6k1efj46=8kRl;+LNCfFiF% zTfHFK+rrCN&USI1j>!LxT-Osfirk9jI9V<3Wq+)z=&9zyY;>7U&^LEmD#Ep;FGCXj zXS8*uBb^Bs5{S0=2|{L zxFGEw(VX@n{bwFsiTTZ-xbms5>HJ-=s(n;RU6Gn@k-1Nu1@3KdLsPkP>!@x)*yJ5hx^wQ5PmJ6K!qp3%jA`MstX*ICpfCbNVxJ`vcW3zIEAVayuFn$ zvt`%e%cB5AXgC(!%#9V<&ut{loO2LSR*tQX) z%RlEPiXt;Qs-*BRc$AiFY?8`=ucB`PT+zKZi;OX{a+UJ+{tnGk)ygId*%w^S-8=RcRUzvC{O< zR=Sz*C$`Rvi(xq1dSBht(T}VZ`2y+5AMJxNuxN=SCfffsynBk_?>hv4mJZiIC?Ak3 z?)oRx-haZtnfV4lYFY_^)W-hNm*DN84xaLq-J|T@0cA{HvGoH}YCCf}PD4yZFYU(d zE;2uVlJc30X8CSg=(jJSE4eGt->joEonZPHFu~C=iR)Z60X$khbe>KQ4wEt5YGJPr z9M%f@mg+;t>a}siR%f?c$Z?JBwEn`&0b8wIvco3xZiTlT#1%4b`BzwGv?a}KY_tH} zpo2UmV89LwVC^^G!$tQ8P;D##Yg4L4sKZF3I0HmCVi;#|ZP_>eMV(JBL`d2!Q{#m1 zxg&IFlz~@dfP@Mhfc`w?`IXh6k+{_`n=MT~m~ydpNO=T3F|K})EkN92J zkzA%8ixc7cTg|T7$JJl0k8gy1;;ulMO>Nv82D{-NZ4=*zbVjnAOAR=;qxYrVXAGUD zzJD|t|JtxGFM-ET{2fyyPOCqJuEbrQa?g8-J$Hp;S!`c-=OV}OH`68pQkn&ee&yj6 zf4;U6=RMaz184079l$#|h>MHcF=>n`07w=8zQQTdANew2@Q{HyY~(ZpwYj;ErcUnb zqtxQ+sUNhDA{L#RUCQxTWUU(SG?^q3%;R?>7`2qjefToROH7S|PDPF0X#HZ?=Y+R; zs&W@QL8VH`Y6lp+Ho*@PvMvu9K1dj6r=?9q zg-sS55C17_E2F5f%U{?Nh9L{OJ5N_s`z@??k7oZkkj1CJTc_mo?csh}I+Jd$X9S_| zwUbLmgdPUC9;C?~u1sj{JjH(3)2gqzxokkL+Sj3Gb{9jE#97nt1^3eVQ#Z4>r+zF} zV;oF2PHbGL08!_g1{-K>7rZMhV|Of+R+)ENez%9LqDH%PS&O8G`S;WQ6K!xFke%v5 zHFGRf#d{*j8d_7@F|4vi*a(Te^~?u7X#G;&L^Bln9Eqll=faWRCA&BW@={Jh!yyZ`jIQt7EJbq+AZHR?cqZ=V=A*Qa`|;m zy-Ae$!$*4dHG(jPckJGyuO;9;as6S&hmxuYgcFY*so2*%IioWj_eg&~+B8N^`STT$ zIJKpl86QP55J0@E0r#TQI{N)7v$dskHfb8}dss z#kQLp!gZws+&^vFvTN=MWOi1B-sE?MXu7q}`~XN|MA^Mnq4hA5gGw3!+$!ol9%!2S zj%h_1raAD%+FY-buWXL7RIPyN(6Y-rrmky)4KH0HAx5Vztvu^C5aV*>>W^NF6B!fJ zyfK{>+;_doGMgpCquWN7RdjS@0j`NAeMBx)VT^EU(krM}vISiI^9Qg#re_$508EtW zOX`cj!o;IF1h184yFq8TxjE^Q;AJhH##9@38`RQEvK~yaF|KMnX0#L47$G!CTYi(R zQy8wg66vM|r%y~jOfN(-%`|Leo z&&y>gAwk_1joWsO5mr|f83LK{PG|NL@)o^ z@rJN(J^P(FU7hK+o6}oZrREgHI~?m}w^*mPjd)g1^pvxBOf={&EiKM^>A@k&)aNf1 z1UVF**3sF+EIxB{6K#Y;y{dS};UpFRiEaqG0SlkI#WZo~mrE$Gx#*XLe(o@t5{sb|yHhc<(Lg z%Jgg6l-)(5VMDYp0*z#F&JEKG#MA_$IRM~Xs|d{iNI3#_OU{gHm|11FxztF^s>RQZ z(v?Uv-}6XpQPxzdrDg9;mPPJb*$qUu<%>8;iGDkl9dtXJ`$_2Tc;LKW;HMSVGhWa~ zID0Wuf5rOP(@ie|m)#aYA7|7~4p0b`EvanJU~>?RtD15Hfa}>zNs~H{kxwnSv`^+#m6wl=kaEV9*z8_T-5Mi z6y-7C_!Q}Yq0f*Jv7_wPp}8(=4~LJnFM_EQrwIZ-yomT={8Z&x$B&8@#!iho+`bL- z1#~4Gx2D^yG7EnIl89D&++xJJLvf;*si(OwtVpbEPL!r^t9?xXWU^e2yib;eU3%CB zaLTQ%PDSD|Q-fS1wvTgGQ)JooGhnVO=t{hH>aLWr*h#G*7l`y{7d62Vpn(paJ;B0- zvN2O%PW9FC83dUNk~dI0s0{T#g*;7&u30OF(}s8;wkaAcVI7(i5VAm)9CsBT+eKDL z@j&y3k?#;5h5(e>%wI4;i}jzA zmjppcBO@FNsB4xc7~}ZdR^?`Dq|CFPL`XIR=8gz~Fy|OcDqsY8 z0R!3ObbfK)$JG)=c?k(8)Q$v&Y!WWYuOBd!PO5&+Q^;O6x5%-O#}(t4{iYd)FVP0^ zsj?!SEU-u|J?xS%C2`zkbM>J1IrWyxEa;F;&LX*yGe#|DXS=AdYeD#+X7a4Aex0wD zlCSQL(zjTCavL>T`k1KX(xRKO{IAL8VblUZ7;5(|tjMXoeMqei@|DZ%-n9OfJvzKG zD~IRw^@8?nVQ)7ZuXU&a`0X_DB@&F?2Z&a(-!av~n$CW`Be)!$n36l#OOTlzX>)l3 zwynP}%y%O-*gDXX(_Kc{0$~7chZzgUeP*vM)JGOB^A-Kzt6gY->rV+`lQ+-#a{~_w6AeNaHs-*B)$CHXVbg)eq1_oJ)M8{7M-W0_juGTr!8QnpE@k( zuzxd#@J_$fc{{N{dJ8I|Q_;Xz2t{2do?%K+KcQf!>Nok-Ifwm3T*J*&VZs zjJChyHuGnsdFhb1EC9KRJEy~sO#eG#?#_OTsmHEAEY&7czo5Bh$T`pDuJDXJRREKC zXV_$YLW0{;v1{MkKS*ndLvPAIP^iNoW<%7}d!a+jM&JOZ7UbZJKpP%Us=Z!gGCuk7#&E*!|Ldj02 zo}IIh7-7}X@o;p1`kbf6I#WM#mvC(B=&kVFG|0e7!i_yY%_+M(Gim&J?>l%p=;)l@ z!rp884vgf#bS1O_mR~1$W4aDMTFxGvQM>%FaHnAZwJth&!F7)15CWO`dqF;J48gJ^ zk3oKU=lqMLUetD-=Jn_iDSJ+X&W4bJltuUihtpEV$S1LyI6c#>at)y(Ht;7a!9K6p z8`(=HkHOvQUKFify~$^d8y#XNM41K{)38gKzukNd@=4fk zUN+h-9Q{9A6A~hX)o0ix`ejpD6dpA@pb)bB)?=IOHd!$9rye=p~wkOYa zy!JzSdmFWN`?lM2A>kP;X4jMAa5RDz(!cEmrZ2Vz1}E-r3{OsB7R)sCZ5^9rJUmJNKRSKFcu^t^iQKB^0M`# z2JV;+H=^be*J9S_l0T0I872LpB5aZ+^qx8FYIg5Er+EuFy)RoCvXC-d^S3<0lTI6uWE}KexC4{6x>KcFpzSH;7RYxAD)*$-+R*u?+wG>DGZS z3WC=rf^EJ54Ct`XW`lxj>K67Xvc0qkEX@Otgi|UdiqZid*~;p?NEv-|b5m^&ILkr~ z@A02>B{61|MY3gcH&t)2aKB>KJf?KHoPK@`xKI5{t6zwt8+yLZlIIB-247P?TB|K znz8TEOA5ze`@o!4GLGJjExR?}*oug@el z=9)oVdd+`gGFdmVe#AKd$?S!P-}8#9m0p}3D{{$hv*{1}U~q%0f7&QO*cqxYBaTPT zjJ&nX9t<>}uVdlYLoz~s~a^S?ipFg}96LT1uXePCLElm9uN$IxGbO)$iy5Qq zY?$q2gFr@d1;q|ff>e3R0wJ}o(>3!xkoRsOqwT1}5J?GABo}-@C@-SY5Z}4yy5g$sYS~R zT~O1pB1SdOkMa<+d##g5XH){dAF@X#x$$@JWjTn5JSh_xhY)!oXWK=_Jaadvn)r56=RCi#!BrWV3r0$pXK_;zN3Ve4~}e#q2hZ ziqoXO2U92J>$GPp@l92VHve57q%PEEjdL zMIp*z(`!=E05z+>jV1*qHq4xSD~_K)^4s+pA%F@A2?={bb=^s!$$kW`{{X#PP!qC4 zy^Ruy4&gX{uh?dP404MYIH6tmQ!-;d(mO0V{IQG?X)RXGu{_-jfjUUhgt3HGQAelz ziTQFF#^T?hgJ2tTZ|NOQam-j9m!Q>;Lj2$+L{uW_!;2`_L zCda~_eM;0+TABE#O`S;PI1KfPk3|f?m=Hz(xcDJLUcGR6GwZV9Lq$;)bcRB}fj!=$ z%Mdx6MT?5`Kh%gn1oEr4%~^~#@BB%wbhnZeUra+{TZgGfBQ`MndL)TS2P%u5V@V(< zQ!VOtx8Bdo89AX$NV$cbe02-h8I)}aj~ry6*lp_fcY(^8Xh81!3bi|brP9uFXQ&Hr zX@`{AvTxX=i!Yr3OB|GBW%_@9fFjiy9VhR7*tmtxA|p6Wdqd5|>mt^2rgGNgQ8X|vpvxs!H@@t3K56=&w^Z$=cV~N;V3ZaT=8BsU2!uDkQoVx`fNB z=}P`(GHV=glwp!ToLTos=rE=xZxL{K2Y8=vLqFFchR6i6_X4+Qcu>j=?ao)&swQ$n zCHI?f8DD6XN=x*=57FM3{}=(AkTN7N+Y0k92PHleB#yH92TF;1N+|y^V#N&@QGV!_!c#C>GEYtA6n&w-==#@X!)d{DMbEm{lQJhNrx0tQ*d)dFk$coVULHJ7@P%;)z`h2q~@_dB;gDx)$q3Ik=^wPm~MhOp@#P? zn}fr+V`KLanI+*8sBz=sRQ?ty?jC5{MLchc?sWmA3M^{o)pc}yF>3gKZXUeiHo6(y zi$aSo>$E;d`D3jU6QOS-Z2W&~d$y9)ZXCMec)Z9wFd%y17)H2%dMX(A(k zcW@_h&&rvYA|rotyv58kI{`Vq=j;4!u^WLRsoSCcM~EI<_t`pSp_F zOuBY8KOnW(_94=dG;o?Gf*(==q2{~1SVo~i@kM+rt*b3wqOr6IgD8odbyF_LX3%$e z^>@&+A2wamC#w)rOTNhrB8R>b)>mCO-LO5aKoK@*YKHBR=^gQT>$74whSNG1-wyvB zkh}+$P%g$<&wuYABu27Az%qmpw^6%S*$EC}9X#z^byK1Fr_Ug1d2ixU;0{38Wn%Ek ziQ`gcQ^%z>UV-Fj2};O2&h{9^G>S2jbZ-g~w5pGEY*Bdzrte(aP~8iiJ(j;2MGyFx zt(I<;PgqQqU%l9fol*VA^YNMVpwfAq;-;j>Y9o)8fXC(*h+vgGE{OM9G7A)s22^-7 zVm9W(ULZ+BR?%+WTu=M&Ox@xT@C++6KF6)I*j0!PrEEga0GY`KZ#u@+F6W00@#1DA z4DpqWdzp%{g$X!Vk~^8K#GBv$81QW7=JRh>)Sr*8*#nG*A*MhVE~sy+UuQUm#V`)xNDi3~=$@&HTzOJyTBe zjJN1?`?d!tw$qlOM_DKzAc%FR>!o};xaqUovkX1*Bv>w&7ld;Du4fAvhHfJV1(qb@ zCId2hZ#f_7OKb1JB6$ir91d?z<*s61hm zK^x2+G2^LNzYk~jK2wv#+H;XknJyaV$r}}Z^r3%mm7%PHwuBN^n$(KOf$XP}!sjQ+G6Y+2GO$bv`li8~mxmtEvI{vj|6^=~Wr|wb! zwC|ivdsw#v>$sY9EOoIVh1>dABLa8-&yqKIl}Q&Vc^s&Y!B3E@my6db+TpsD=I?HRykxJh z^eyS&!(-$B9w6oLamVqdweo61S2u#%>}DTM^BklOx9mX$OwCJw@T(*RRi&du&EHN>^WS5VyYv5cFRx(TIQH%fPv zLRq5uqo-DJO(<`;Clcx!wVx;)2Vl(vQ|GDu7hzR`QPbMkh($Rdh@%u6BOQWuZ|g$H z@&@9aDH;#Vd*H*`zQ^rU zTv%K8acYqzkE1*d0{>;}Dl&p6nqoL8gIG*>O0gq^M6o@%X1`5JL*Dt=QG7LWCWe)c z1U=fz5$iJ&&@Y<&3mz-Tbz=1@Dbkt}m39FhbBU7lP&dFURTsj2q%ZU&p{^9C04T!N z_T%A3GaB)Kc`>I%Ctu&SlU%`uq!afs-A;ihh=S@(sbkyQ6p6`PhPg4GUElR|Z* z`v^_+L5b=g?&`WnD8Cej94@?`%o-pCOYZ4cXk2n=Z#@xTs!=y|%(dRIFRcjrD-eRq z$HA-jN!MC;fNLytC*DRwhvPyNCn@N+7wi)c&r8WnBb6^mhrTa?Jd6Asq0nC$p_ z?-=y)Ze4x6s;)Z8p~@YC$6rvh&{t~dQK4wWRB!{RW@;8A;}pSzD2Q}^2YR2thhC-P zcoK1QjikWX_Hn7Z6hSYSt#UNlZjUGo0$pM&CDMgy{Z{p!i|kRvVE41?)g~tLGSGk) z*1xJd8(}|4I5GWUAE~vFNf4SmLwD~T7G|+1K(#W2Qp5xIs=Qk}K=ku==!+_DMY@dU zn|ckBn`~CxZH`_LnLxnE@w2MQp$Md49`b0@dt^VPADfU6W3cFSmJh8qpbq1=S(!M& z=yhFes!8|O63&87i7g(5e`?&2MZ}BJLAsKeKb;pmv5CK&U^~>*Oev2NO4Z=k#lsZd z9vAEi>7>h;Rvg(ko}+W158n5km+5e}IWgfydRx*1fYHncvakRXbO^AvaOgM2nc%yx zjF2UO=1;Id8}D2~;(4aMY`vxiL@P%gr1|BtAb$h(9ZEd^w}qXgt28is%8z7_ZwL_2 zsfHQGB0aL^bz7^cep#Zg-~wB11E;#vh_F|YLjnie&PJywJQYY zw3M(q+-N3UN@-FC|9`dqw=us1+@AK8`jQ(mX1c?>r(SPcZLuk~q5{iDw}49)6&+qyd?2uam>ih;(z>WXYuiW(GFlRGSq@FvH1G+DBlAlt;5l-#|KQ~ zEQnbKq9pJG5CW-mY2P9JUvWFQoazKHXe2BDoa|B9VE54n3?1aLaKS+^XmS)8dY!CLrXuf$m2JdKQO}DvIgwr`nX?Jb*&lRIO~ z*8h{hTl3?`h`XiVh?QTl5WdrM5_Q!aVBqR>{NZyQqTqaz*_JJPLt#tT!bug6SBXETMMq3>}BaR^FISDP{w4o zo4gRYnj9bpHo|g*@f{lzG_>stZ(7_eDtN2?i)VmFEHNUVK?|u?nCA@WIdivH8rlZCpfM zOag-CO`ehhzSrLo`Tk~!^7g9F+6`Zgh>{qNsFf;z#YY`QB=W(bu;528Y&h`C5^ly` zmB6~r=$Vf=0~tAzqKNh0w9@2k9gJL60eVJ|$C?lRkFYO~hqC?RZXrr#iI8j~TVzSd zE=Dp+*_SLa)+S`%Nr_2=WG_pJWErw0yTVZRFm@_CF=_1Ud)S)gmA*DEjikD~D#&eJ%j!KkpVw3Gjf*>2H^yTLprBB{K*m#XsP&V+~$SjBI{p zQ?K|Cyo*Zq6{-*f)A32g$@~8%LJAsI^)F2V>sv!uwp)L@!eHpy31kdQzSm~YL`J=Z zT{d4+*{4ANRStW;LHd=0b4r`QC71@Ra&s@7*6S zxrZJ5$JQKz?0BHcC8&4|dQTV^Pc6sO=uPp=m3gX zMupi(1^HuAwWK`qzosGvIsw&N-k;zNa<@Qq3tNGW**mRWuoS9u_#Qj~HTuhdUFzjb zyt^kbf!quLh<y0kinn@YrF__u7GauB(GyFrV?noy(w7OlI#7 zsO#yVKkf>v3Mt?sC%rnDvGMoS^PyMYgUdF6GUgQgAZt(U$YPp5=!ERR;K09za%y2kQ?>tNezCGq;@;n z)AtO#oCxTwE>!HT{o8z<6ASKk7n|{)qn{yAC1>rJ+fWbHHYb%RM@sCYbwO2a4A4In zK3?MzZYuCdRV7$PJn;vNs7Dd(3ftRH&hwKDtpF#q-BydYX7<6lI^V682ZUvQuU?O% zjHCnZ+7-cb)Qs$$pkdg@>4Fe&pt}(JdS69$wO$D@>;=aPn7R_l_6z?<%G;#O^mV(d zXg+63otSeUf;v2CKIc(0w^1lj_TZQzf@$QFUAFI0zK(IJS_4NGtk7j72{~p zGb9ZagqHpuUirVDV#8H-s@(D^>LU=sc(6{y8k-uj_k!H%8w z_B!r>17ah~gIirTAHp%EbWXea`{R_)^qK5Y`F#Qti79|gU;Omq)!oIY2VxlC0s{TN za0(6tXZfT?eI}cF(Z^sA`~)5)pOHTmB*#Oh``63Y{Pl1&ds*NbZIMg6R(#*Nia=TE zQsKaZg3qAxURffMJ)G%V#M@}o3cgxEF^yBE94Ypbm70_UwDB|*<~L4|#jLRacDUJE z4v64>unPLhDSS$9)+<-37riGv5i4bvAJ6L9-h)~Do;8phGGOhGMFs!kAq>ugXGk{^ zdNgDe2nZ>FmSfoR=J`1eI5cTZHeIfSWl-g^CB?>3yv@H2Ls-!%1`9JsKXR;484NhlKvlk|x;_i0%&r zdFc6&#P`1(Y3}sJ1sbK?muiNw9-!glGOqU|r*V{vzm2Z0WLNj?j2^&{-f;v9wsEz2 zjhqn>an%*J7a~U7kjPODsmTCZrH{Z5R8w)@YwfqI^H~Anr64++zgz8yRR(e*F?;SG z=l-0sORlTj?+n{-lfANOVj%SNZ4bq2baxw70CKql>@L*@j^O{ayXyQPU-HK4DUhwV zpu(=)x>i8*`7vjZ93a1h&};(+F_$QgOLD}QNTIOs&svfzG|2Sf-9qe&M~ zRxi8FbO~rAUS4>NEd!)zH^(FkOhjdUwPcC{)CB#QpJ}9D#V|I2LCLeG=7oF)U_fjJ zjp~yW0uP54o9bUEkq#_&yUb;BP|o|5dIrkBwcY|Y?@I6J@j%^uFaixXYB+Qucz3Q5 z1OiC%{|%U@>s79xouU6iV0eRM&e~6~C~@p^-iw8AzM8rBS=j_^6H@~2w0ey|U38bd zK~of`mCwN_sKs71H3`Ck17@Io1M1T?APn8lskF!gYwe+9kg&HbR)$!$M7y9?Y@W@q=#- zypUxL_F4c$|HYZzu2&mPK+wKMoH<(IAOO0E0Y_nHx~P7b=JNvQoDOJmq=uf7*Zo^V z&HAzi{q*Co4{b}#;NHbH{U6q$7k~eP6gTMq6F0;v0w3RxyjJq|$B$udgIiEreGNUZ z!fzkE+dBmB0U!eaz51fW{CDIm3D^r3fr`uMcpq?I1Imw~t;xTUY4{Svf+xXtJY2Q6UVtk0330iDH3A3oCp z&kLHV9bZ}|-nR0we@s&a1?{N!+tW2)1WI_yu672 z{?t1_A~z{nABY;cVe1rVLUUce6gANQMM@z*E2|_?v-UiMU~0ZX?#$ZmU%~tRz_Ji8 ziV%WM1i9HQXi{iRG}(+id(h5XGVFd*s^$>1ay2lfy@Rjz5T78xAF74iIprria$P zvjlX+wA6U+a0LDST~?-Sx^oSY>2XZOfobt$@e>krbNSRoZ{CN#FrfY?aqo4F9Ocq1Dv@L&~ImvVV@B#p+q?Z`Z1tREVTPD)aj$K(VNu+W-UrGxlfZn1Z2aF zgb!F!8Uk>ZzsI2_NkfxXuiMDXtC#mINU@qnhz)7Zkk$$TML68C9p`|=iMRm+pBZ&` z|4`xg?}6hpygtiQ|M?KpzT4mPW$_QevWV5C@|m{#7~~sHccfcj+CWY@Z|FQk-CYKH zm`^L7Me}*b?o2MQ7axfyBnR*R0lh5S*@00??w7zAf%5Du@3G~tpj!*@J(pzcL?$=G zL~+7LIV=1ht_!R2Z~fL=vm-ZDq9k17U(!H)8E61!8}WLaIR^+=1;E?{fF{1VWd^B9 zMoj!9ix%Q$Qq3LP4)eB_A|tFS8=%1r{oi%zw85+SE5n~&%XA|H89O0znurwuq-eKI z+ynkosQr8?nKc4Wv}P$o0&-T%gS1B*UKyr)TVrW zIqE~nLuO#Hj46L?w5DML%K$rlCTYTH_NFF%H4Ad{E$fG!4}cncJo&0-ryc?R%*SDN zN5BG8(l>YiY>6IleRz=jFpZjhFv3c*0@H^x-EuGYvuj;L z*7k>r`s)JT-U!*&@e9&|AkNz?0}Xo(|F72vi504f%fV14OA1OKcpY@zV>8jjgYs;{ z@qD-4ggway10x~;!j_JEcA#t32(#c;ivS}80TW6VOb@ZfKF-!gm{mbN3uYkv(XTD5 z4c%G?lUAw#Z*>%!t}>981aMXz(+FT?Uc)S)a9v*hh-u!Eu%$L-N@}}r4Mkr8**md> zA(gMm*-_Y76#Jos!V&7!z5}{t`bANj%K$8a$q5QFPCwxp4#z>`nnKX;pJ3H}tU$g7 z*m>D75u8<=iTmUR$GYU|WG>%>7$!<* zAcl!MXjXOk3=aGy%QM_}K4*b_op_MDKO&1{O=%3IfTmch8`0Mz0l;O~lb+nH$jb7c zAyETQdVH(}+-opx$rJxE7BnOMa?Wzax5{xVK(|%fgn4B z(yF7|92hS_0;k!=02#hcB&uKS+DMPqa`Y|PzCKF-ylmk_Rh8T`(1`P~Q!)z#yfmL# zl_-5~WAz9D3NC|cFrWpXEkh-*&2M8_4zT{V#@{c1cHqHj*WpXky&l<{VDQrXx4Xsi za=reTg>B$zvW8~#NmKqva>&+BkN!GmpLkMb6ZrCj zTsUH2^tZ!4P&;T|`FL8C!dcCEQU3IPjutW(`8TP1&BX5`=QvZvW)!VRm{*O2d9(k) zyi_1~J3y#;2Bt!Ik9_-1DuUs`Af-a3USycWqy1#MfbgxAb~xeX+3c({OEkEz8}S_b zcxG3GY(WXR13ryEbzvR|z<_x==mN;jnh7}q;S_yk%A3r3HXCx;037>wK(Hd>##PV+ zh0n?gP8I=Xkji^59C%hxZEyr$I)G`}8#K&!a?DGP6<4eA`&kj%z~Ha@JuNLQ5j_|p zYh{!MU<=z_OfgMJBo6rmhSPk$iFrpn%%2|TSSc~%8fhAz-Z|~tafd}kbFrUYI(fJC zZ!lZi1v{O|pDuYRsna1&AoQmhXtz=5y_wzJ@AIoSzf8P;?uQ*|OyX%B66E9h2VWlo z`c+fz*T^)f3u|rN=)8x#~t!ALrx8|KQ0OT^b z-dUo%!e|x!=x5L(Jz6@MJRq}#>}_*=8@=sEcnmr}7@2eAutYDOdx!!0yiHj*nDlXvF|A}6u*U zC+Dud)cTW9`TzyaMgyUgJrKR2)A;>*!W}0l{bFT!CzU&9^!4lwOx16%t|$Zi(=9WI>{4UMm?gD9 zVLyP86WW*sy#LkB3V}&qUCcr!XAp={s@-&cTl(}OjpNfj4mE;J@LAzf4foAm3-7CZ z4q)FjnB#DdV7|f6UEWg|OUhCuKxvjjZ#Ltk{mj@WvKhAFA$jAG{;E4qZ7zdliH9Q0 z$8Bh4q2Szo!&462aeh+`>DaZuS{7Ig$b`04&3hxRCd9ACHA^XAv$A?pgz~`)&nYnt z+^rT%Uz{rQ2Sn-Hls_8doe7|WX9t8)t7a+Niy4l#B?BwXpvN`a^H;ny)bp?LZHoNz zwh3mTq#@eR7%`2l>;aL27jW_oVN$r&GywBZ9B=tT{X}thb=E*m5MYAl=?^|XqS$kZ zwyW!ejOeO-{bi_Ej?pH?Z+WtztDFZ=GC&i-rO686p!<0=pAQ2~?H?TUtp~}a4P3zf zgd#s}VOu)nW>fUHZqA^5-jA2gx(Is`Q-=+Xe61EvU(~%ZaEpXm@HZhvL<#+)h!2l; z`8};){vq9s{B*kQK7@6Yfow5b%{c{E8t1tfxOn z?$|>`uU8SFqSMQl%6DWZ^=cSrLhedZ;8{~R<(KQ-#1zS8OP{-0Wo15P*?DGu^8W7A z-Vu{qRWZ$J*~#7<2LiVl!8s)IJ{Zdg-$YR=GL8cWDP1EK@sG;8U;hl7GbwVSBtJy+ zK#_v1z~B1&B%DHmTwCb?_~ZciYM{hXa5lt^_e`hvJ~&9JxO?pg;=mWSCC3u{x;IR^ zaGNdjg2Ek)*U+Nj$jS;sq=~M;&k*&{c!hxNpG&hngm*Tk*d^^JQxo3(*2N35goJVD zwcEN*xpa{!>5GLpCOJP1ez(hFJE^#{JxoYb6F*aaH!87|=x>uOxcC6q$s)W1^`L3< z()}0_4UW|N-W=E^nT+iX)*Htwz$m5j*}o9O3$l(}V!~=nlSEuR<0a8;jF8c69$iPF z#i+40#%sX1;c5Ws*{947g5#qLORg<9HZT{G-bRt}C^)p7(f){1Ab9Ea@k5o*e!Y&C z4oqmXnb1}lqe`CiduJ>((mh^8N&R;nT$2twmn(Bnr=cM&>E5zHtGoCLT$vfUl-^~N zur|2vbMC`tp32kHChy(n`jd2Rub@$fV43xWen@OC}!=-*m0%Xui;@go1hdD>T1`>Q=?dY_S0d zxN1GlD_wL#*@cy5ixWJy;;R7iXTKubhGBhN>^9AZNLy_JYj2FB@x1L5ckj$+NF>^? z1m>i$ByAgtUcC6{x5)7wdS1H7T5NIkJ;tG*^-tM90DH>D8vMf<<~;sBzz$nkZbGyQ z%P#iMM<=#6`-l=(RK}3F_=Oz*%hZ2`DGS^=BFk^p=E20e6%0AoG!w%bEgu|{3^*VMH{2AZckVW>25`9#fcGLj*f;l zx?qdVvbK#;TS72zAEo0SXOQ8XsqfYk)r%nUlv6tjgZ;UHniDhe8CgjYfJsUeen6z? zGb82W@n)#GtlV?++F??87jec@tte~+Zz_9QF+-%>s2WdLt_sQ?eXV{y$r|Itpm2Zh zsp&|o_&D(2`u5~NmbdGds~pE65tM_q&wXLDvq~2*dr|$KwRxV3z!xeXf9w2u6fQpQ zrr#I&zHqNj+@jwO4egrSug#%>5=RAxSv+>&4~E?3%ARhv4;;F-JTP*%lByu`0Fz%ON=uT`d&E3$2hy%2y^__~9dK7U7q z*wC4UO2c?1i%vw@5yLPT!4tK=H{gqnSrQ&h$nY>;N1gM5AJZ@yIuX1u)wIg!d!C-F z@HV3S%V&-EABtM*A9}1cbIE$!8G)HaPd$aZ^3k&5nxCqSnZn9K5~{WN=&K5xMsd!u z8H5c-+Q_DY{!*eggObG4#;p8&(b?^C13|W1eDojE7EMqXr~H=bTL}BmkqzWTMpqeb z;u5E{GW6bxW|SO#Tq*W&V9{FRA-i>etW1$(HgIT$MrP~nve0)WO44%tE|FX_hQSy! z%w5+f9}_kvL(v-#z47K4{<>NA`$-q6N%rPsgt zXbt&c0bt!tY1eKfb+;JuFELxodwBcOdW97)&#ZGL4t-nM@>3bR1oJ*hTN;E(Lq0v2{A{i+TWSr9v;o&5+f^wo7aANJrw0xs`_p`3Ewl9ek5}l@*n1s5;B+f z4uRYQFbK#)L&=c23`xklVZnOlj8Pfb=Ns5_Jw^fH=+<1(3D8S^(|$bB5NU@Uy8UdJ z&atO;hR0C!hwT1hR`P>G4~7RN;V1sJwxqufaf2)CE6S)XH<+||x~e)`kv;-@F140X z(BOA1$ZpxVY;{KehoVHIL&V#mX=3cq3PrUZR(bLm2ppB?U+u@%KhIvG?lVACr zp;s5XC>w^BZ8h5H)WbxGk_6zD0!hiM{%tB*Pd9oSlt@=-RHs+g>cq2=gfk0xRe#rY zn_&Fo5j9+U~d&vWfz{Ghb&raOMRvTp(x$o+PfKdPKJfAB@&nl1^9~ze3}iGtv{jL^6hoMDV}b4(CQDga5#( z?020RFL?o5SJ)PE=^F>7(DawbFk8~&+m{ySD#r z*tFMhBmgIq*hDduAa;otJyj9p?!k;S=0~0VYiM5yLCakqboK8idq(*{;0x$A2#5G% z5E0?2w)RFS>tSKahYH=2XN+1@6j6TEv=?rkQHHnXL&Nus}qXn~Gbd?2BA)7t^3@c-o0#0=>A0-akVWgx9o+9jj)lUFYzabg; z$$tcD*UAk+GO@m#s&hVxUZvv#|BB}a@+nyj&r>seVWL-V z@N0|!<8=)Up_hq?iISHt71_JGcEz}#91u6dI-v$Hm3?6%Vl?WyPPp8~>Z2P?sThSe zht`*7J-S%5^9{O`m6ZeP2R_d*D9h~LnHSQvP6vw%|Gls6{LqH;<`w&&ru7rgEJph_ z8ErJLUscL3rTpk)3R1eBeD;_m84vk;zz|b6TZNlWWk;7iOc?31%`s=17nhgczVl|Q zeb*=ebpTrSi;K;+1acsp?&)p@-%^G4jC$_N^cbc`$rD*HuXIU zbIvYaR_jj5GcI)|fZJ6Wz{43LPYp_019M^~iSxyvQ%@QnH_g;JPuo!-a(sn5HRM!e zL-Xy=BF19!^7GHH-DRsHjId{4EXej#h>JBXzS~_Pj>2C*_i44N+8FMT*x$pI(e|`4 z8(ZAd^{~r`@yS#<-1S=eP!jM-)y6`O&HaZHLw4QZ;#06~>sGw^CrPWfugQ?nN`p4E zQy&2!fCq1}^@JqFmHSqHT(7&0fi$~-h>7Tqsi%J%eZyB)soA9T`hNS=)RbS? zva-+aUQk2gUl{RkzY{`stNV@e?|3K}QB0v+1aZyJxV8m&2!j828%-pl?*&1~uwX2@ zi0q-BqfMrc_dTXy8=4OFcrUfEh3cB-okTq=i|KFaPbXVmmH>QR9om@GyvYPJ`qE5+ zZnS{)N1(T_!K;tf{^lAT9c>lqsjcL`JK37lpsKmpn;8^3(ww5OLowTvSFLb?GHx*6 zi#WkZ6Pc}35Q>g1pSb}0^H}7#nf4a$9CoSX$<)}Y0qhSYYHqzqstBLQo4QipnunY~ z-WuoXX*wLWYq=>d;NbLpC6=oWhKsyD_aUUGu!d1orqN1I-}WntM83h6j58| zu=U9xAAz>__~B?}${`VzEBxZgP9s8WlhVlMPeU}a7NqS6Y=!U=y9Z=RFowIX>+m5% z>M6aDu4&0aj|UXHv7RC;kTiAaHR2mRLpy7Ka~iUqGVb-N9c$XsitzF6Y*C)=_W`U! zGmfdz-qK><;^VVj^y)MO6{X&vPr}C_L=DAlK$&U243ohZQ}qD;&-X)2AIb z0Pqj(F9e%cC%7d~VujV1eVV|l9DO|9)k7N;P3o2u- zsJXbI(jPy}$GuD~#OZ0J8jLndiJIe5zH!laoicSQ3_XNA|s%fC?Gws^7!%pYKi*ZazuAgbc7Ac1DbV*hI6( z9OGxdA+0aqbPX=_0beDC+S-r6rYIo0S1V+3;(zR(9k_Do!{K@hr#2w%tfy2u>Yj6} z&o^ct^FeB!gro9w&TpA81ygt@do(=Ilk-?1ntt~h{sH?*+wpNDWzuWlvk2zc**3lkptv2JUYA-cJM;)`6z6#3j5caZ59WRItFN#G&bKVtp#2C*_Bq;T0lJx5Vuf%1kD#1@ zt^}JX9{+pg)fNQHs`FohBisK%%0^9&*-wKrx1`suE}vBsVIX0UOZ?B4#L}Y_REzLt zJgJ>@vF=>7_RdcZ-w@oGKs;2!<|b>+a5rxrhxNxL5HP7z$53lNC&2;Ha&oqPrZ;zP2=EsHh{lh@sET zyxd8rA~SBIs!eW+*dE^9&M7xYd)80F{P%ZvKSrE70&&s@MCFN*;Tw1Fx+Hf zc};g!;gb6JidJF;I5}XHMbcsOFzpSVMYq}VKy>Rn2pe_Ag`_4xB)yQfc5rwAbNI$<<^ zuJhQ5y;T^3q~D>JyzU?A-@8&N^vtcgDr#jaYJS7@VzTK7s%}3eRC9+4iu1Qr{}tyu z(=~eYl}KYWs>0-WPhQ_$eHZ%1_0n&dDcn^Af&lDE%%!^*5)wdf)TeqQ)XMMeA8Ph0 zmTl7r+65valTn1|#=pg8gWa-Ql1lF~Z}#;-Tz3@188hFc$@FN7*5Va$vH1KJZTz{p z{%Yyu}g;V#kHK}m@E!6?o8y$MYw{&ZbD|E_vA6fQ?Jl}{#M6%b!iLC<6D2mB>5V8h zXJQjidi82IaWNwdQ#C-H%676PSrvYwd=nFKskiiXra=SPN245}$SZIMSSrCMd1chO zbh9b#82!Sw(X1p*htDb8&>f6n$AgyKw??QQ8^5d7B`ioGm!o0}?p1^UO}NI*sela8 ziK&@3mYn8-D{zeucSQ%$%YTIp;BUh>jckHD{hF=(#nz0Zt+_{MuIMKX0!9nS8okbnBbr1GJ zv#+k$ZT9HHI|Ks-{S)W1c!gln%}m5>?|zVi!=e+Ty4-DTZ7oM}VdK*-Mzb)n9XdRmM`Ii&LyHrJs_rbwr(X z4NiV>?k{zjtd+3uUsTkS`5R|Jy1#DDnE#I(*%O1QB1OF4hK642^)f=|7fkBaPO=ZdZXYoeav{}$MMleS0e|KT0HqP2WPh>!R!CqcG6!8I3u;srn|1Q5ri zbbfdZ61v8G^rpDxNgLfLbOa|1@|(t6rTFzEZC!=oNRg2v$mY)FEK z9;52#=v=iIJxpTsdAGE(5eO@c3`TUPy-_+^tId2un0v`-7P#ssQ;Ko$Z#8VkS&%rE zCx>^FJ_im}o_`ket0Qw{%e(uO(_g9B!(BCZpmgoZ@(?NYdIG+PPn#NL81r#_jAq(j z7zHgpR8cd+5BR;ncMh6 znSbe$brpZ639F=Na1kE7T5r(y6EzmYFKo@5(HxX?0*}+MR(o}!G@e0A=Cniw!OQ|g z6!JC8p7G3vjA!@2qwo9JV?P>yrAk&XKjcyH{K$Eq<}$Pa{}Q5&DAeSs)&*6U<|Ebb zYnY#;re+`Y8g6=G%ic!Z&NdbbKxrIK6(f5)eA=VtAcdgO`#XS;<}tt79ed&S_Az=$ zE4N?}f^nz&7q+u~1FDK2yR}CzB-#&B9*3;%D{Arrf1K;j$*+x#jh{u1m-Kv^dw$&b z&z0yd{L0pbcGbt7G|`EQAbFwJ<}j;aH(dOqS+t#A6u|3>0I#dDXV7)@*&#pPrN2Tm zc-hYswdD=-Hq+YR3)MKo!m=@P^(QkMoBU^D#ZKI}$@#f~67~|eS-id4Bua(yU_w`f`GemFX=>Jqt^w+QU)6P#rowxF}d8PYN)-nYghtvp?IXn;l!aw zyDoa)&KwA2qXSn6_hJbf`k{?1v*=X;T>Rwq5^V&w2p>+@VTmyFxS45*Da{DaFMs=m zR0wNe^TFtkD4dD}j-_YvY?}IKL(Qh%N@M$h5B6osxFI&P)&X4~>GUp__%(H1Z0kkb zV7u-WcMc|f%|>Dc`I|RyR?=_e6$=R&y#JmDB^{~{b6Oz4nw|tkuPdlS)Tc!mm&f8IoMMK2?zy>W*3ReSfUz zMEmOGe3+QeuYA>?jCQ-(B|}lGB;gnYuZpItI-*?v3{w7*p9X462UaGPiQV{oiSps3 zq`}amVth3>$m4`f(}}!wFsevOa5O2fRC{mL6dUrS5eXlovzF(#x8F?;Vp8%V?~T6j z$1xF81@qE~^%j~SPyZ^Ria$2v$d%tMdSM|>`*0CT2|5h3)*bm9EbYTZ*fX~swivlG zByTaEz>(^KXKv}!7TzkW(m5|DADWtN((SHqmxMNDAp2yK-SFFQrxfPQK+6v+fH{{H zFK4*5c6L~UG=ngVJ&b(e7_wRQ&#~*MEi#xhE+m5)g>9pYq!uC4*_>7BAOaLS-^hh? zh!CcJbak@bC?El?kv*@^N59O<%DUz92DzlR@etID+6l!mOM@Q(wM{$AtZb6*3cPNJ zQNFX-d?Es5y!=yg*qcDj3e?cKQs(}{pqzWQMB}hjl;pYL{W3PpoCJ9|ctL4ZENQhb zHQL#1ejE1ay95%maq;pJxPPVA4X(_V_oCJn>vcetzKI+s++Y^rm-?iQkPA#m`O+MK zPHp54+B|pRRb*tO>J1Hzs)&S}YInI{0Np7_`sIeF#pOL|WXUOJF?Kr@NA|;OxD#!w z0&*f)W1(_*B(r1UHzVNGhDSa(&AKC-ozzCV%lhh@cj&97zFoVXTX$J30Q-kKK+a^u zM9o=>YuEtZT8uZ};9URoY5qGycH@wt8NKe!r&(VT;w% zq-4xLGc)MEI7!L62c_kp7>up=ToC1dE0u}xGdF7X7&+y zx#!Ssx@P)_fPSv=dvK6O2wm#P)}j1p##lonW_~lPkSjhb{>Qv>%ew<9HU+9LM8)zb zAJ$Y3HggZ2tE3lV90mI06C%^8gqoY5WxFjWBUTLv4AibXsjaOqoOiaAaq;>ZsuLUA zj3&CTX09zfENh)-u?|su0c}aC1O(*7tVxuNm_zfN!MVqCT3^Yw`J|$ajcthCFlW0Y z`+;xG^#ieiqPD3I(1{0L7?U3vUmsoyK5D&PwNb4}kNtqhjYvld$nv1_AX5mkat&8C zUG>9aM(i8V%U4^+BopZr?;8GEYs8p+kRI~}yH z0-i)&nvL=hD9M4XvDE3kf5heta$AN<8RbV$yH#|x{7smshek?X%&#BkmFlXemi=}v zz?=>B&Mqac$P&~*MhEYCTmK0X9A|dcg+XbCnHN8C7#DBoh;_CZxmXRbb-AJhT;f-#cv2DOjQ3<=|2m_6^H+v zeQWUHUSA-{8kRp7%c8cH7pHvanTng|*>!0g!;Tgf7GL~UhDz%Fa*O>tbmTv0SlHvw zAWtSKH`2oxa6GTZNN=Pi!fBJNUi;ruFlx@Qqs}ot5C{4%$mj$NYDS^l5N`v zxbX4(ur)^Q#+eSI9WGjNV*PMVd^Xag%6Cm36ykF+YwBkVYK_{6t^9FK3T-oQiru@# zt+#GRCCL2Idy(ec=y2kZ@l|OQwbNd4>?V-xt&$7_+FfY7Us*j!IVKEVX|shT(dimW zj+Ri{+(3TizE8gaNXm8qRlhM+Vn#z2;GD8iwZlVO3d2tbAu-wV!`-0TP_M1NYN_{knpciq@)y zr6!bKgU<)d%lDUYtH*8WfM=n9Y?6b5&SMJMXyO-B?j zVAZ4K246-FtHBA^dNE~L-q%+;jT8dWRVf9Th3Bu^VymU8xi07;7~0%C-1SoK}O zxa)tMV9sW(628}Cj#cZzhvg(BCZ5CYg0u&e{W6@(Y7MAma_NsYkMGXVIbT{rk;DnJ zxk!9{R2MEtJ9HhMB97a9hEB~99r8-=Vo6y0>N)WG_3LzyC>+#jW5V8hQ@kxroAg3t zqNd95T}%}d70EDpLj}#s{F~_AvhKfPkh0s5ovKt2lExORy>r-xVAr?Qlq`+=ppYu> z@BayS{T`6Iesght(pY@e;N?q0cXI`Z%`0!yY}}F)zB}tadb&aX z7NW9+SRr|C(C4KJTkvZIP}a;HNP8nRV!f=%UkqvvZQ1AEoU2ldK+p4}X7F;8mm;`9 z2^JBYB_F@G%0W5kSFgur?_z89j6Y!Km;c%bz*BENi)`NT)qq$Nv!<5l!=Qu-gZ8SB z8C3I~TfLtNq+Iy4TcFt09Y2l-K@sx3@RFdM04~?87f*efZZHL#xtpEU(+sF%B8Fa_ zF2;j&p-ugUC{g>T*zoF4L{N{k5OwYEAz+)SCGL**3#2c{Cnoy#f@KT-I$j{4KCCls zyRyt~UG=741*+?m!IK9gir{RpGH}+*SN_qxIB>BN$|3ka%~vF)RQ97opj?fRkCEbN zb2Y_a73C9`rS(cEUOv@*VvuS8q-Z#3OBpM$ zT&V&W(UdHw6DdHG%qQyMuC<_4GIFa4j`BN5Te>hSPVU&bVl2w?D-!RaANyp*gu!^+SCBu*R%`YV# zIGvL1oVS*|I+;AldRr(iU`$>gkyP8Tp*d~EKi9wtdwY*8jyl2+nbDo?5p%^kdRXjiHf8KLY6mBc~LzmfAEVo_{^JnKfu zEhs6tj*!)ABZdP1J)FkCmoA|@&!wAE)z0J+vr#jY!kL;MRHe#{^nY9IE_}qtr+ZD3 z3AtTaRtfg#+Q&K?26mg79|Q)dZ0t90D!qJ>){9Lngfp?mAaRNb;d=9>r5h-5v+EP*g8?A=>3OKhI80#UyXo%<(?y9r?z#>GkxiphU?eT~JmYReIOaW?ynzR_*+eU)#TM zTiH1C^e%N!(w8x#i2b1V~tOj|qZ|@HX%?Jm-6Ucf%L63=&5`u!CfE zum!|iP$~q;mzv05nhgWq#J*$yaw8n=A&0K5PA%M+`(qgqJ6l$QH9(b=_})3*+kJ@Q z?ERWlx#eE21$zys)`Zu+{psND22J?@bGNv48X35o3i%5(Ii5k;M`y+4QF=t?R{R?E z(OOw3Yw~$Ar#Sp)%F{+#e1G}Sq|xl?lPQXB`}5Q_gh@8Q{VTVm($7S$aKgSC&C$Bo?pV*BV$XvSQs^#|h3Bsb~@X<%I z15Cu?)ydl0+6GUMtnXba=`-@VQ-KA6nzg?R*Fm}t6I{IRg1iwbuHDuA5pwP>n@Yz) zATeUwi66d853q(3Hrigk7(LTQT&9VPxBwU>7Xa|trhu*cBP!Z1;y%KYh8jpIp8G$P zN>Xy4H|HqBP3^Ij*6Trpjh$)vY=_2Xvttjxmv*M`r#n31brk<&lh2G=1fk3es7g$9 z4_iFDY8jCeQgY@K{Pv#iyd?Ym!0P24(O=gOT5D{m%Dg*&2*2EPxg zm`Kb7p9VZ2Vc63wByD6TU`T7A9j~D(XR#ZdA?S}3bX_05glZlrFSv4vz7=us^ZJhDkbo-bK0*?q5C`;ltu+Og8 z5g#^#CnK>*x(B(3TcId=EX1vu-O>4x=tR!f5Elve(KMZrUU71AUuf!r(bf5W8|%1=vD}2i=th&9gd2$HZ${;_KYOj4RmSe4=7cjgK1@No zj_&67dB4SRjV7}xYlaTC z3_Hs*R^nIO0`kfgN_yk)0~T;f#XBD!Y{)eDorf{ntT7>vtgp-qrZ~u_EIA$lB@W{J z3F~`6qQ4R=CTTmW;6>IQsVb6KsDkza2WSi@0sn2&P)p6Vr-4a3zDiRpKGi^+grAQ; z2kLvot|VXYza77A9JtcYJy7Pps+29X1Q4Kod(v_S$S9Cpg6rm_zY-P#Qi4-(>3( zkdYSC0ETv=>cssnVrM*T3r>k!AjwYOPjY={}ZTS3{ zDi1WW{8Ubp<5L)N0K9#4CdChSF~FC!c6D7WQR4)q;sh|4et6R27Y=U(Kv^pa_hX*% zNh2pPzX9=2hm3!!tNWt~s&Whb>7a5{m=%34O#I?Sz+|kQJ35*R>bVA6UPk24_(H?a zu(Nw~U!9RJ!V5)O>KAB#%@0QpQuX?&!T|N{0U_1g^c%yw`)#^g;S_#ce{v4j7?KO0_VGzb#5U?IMmy# z@)5lczMADX;qjHSGi3o-qgh^>j@*|=u$NQ$r0wqAYmU)v*mJ;4lz}-TvkT=TipEC_ z&i!}j+pGydx(nGovlUM4k`&C@>YMs2~STzV*C~Y`(=EnXd8Tmvu}~1eHo%}n*s!oKV)VwrLd^}2W);2Tr9N9D`#u@(^hOAdRC_}!jK02Q~M&_>;(_93U}xakNf zPa2Ey{fv4Mk$R$Vc&o|B`#-`!g=0kXrf=?zZ+_)pjm+lfC+kPAvy_BfHt5?KD_(Y`bQG|9xd&N%!rG`H9L?kECK14;7KpZ2vFU93%F{P(FqD{ z3Ont})ED}JQ!vX`&`0>iCbWf>SWN_g{PB(M6vn42)wlSfqbA(cs5I$Iv-l-%Zpkfg z<(%h7WP&G=LvrRyBTmH+(~AB{l2jF94e4)#KWF5)H^5`O;YU8)gBbzA( z=7w*K3Z(1Nb<8pMe9C>MGNuPxe^tCZ3%_Joc@n!sd@>arBuC6Lvjhplf^TDEmPX$i z$8qr`JjP;BVxarS#Pa$ZO?p3WeknE79L~MlDk~z78JTSIe`tH_s3_a6ZQS5iut=rD z0YO5-A(T>d0BJ!&kWfKtq)S>Xh7u`hPz011kQh1)LSX{%TzaNV~5SB1=o%=lZ-p4+UlIp^(bLcL@fa_UeIY`;1nskU55aG2)r zP&!h(JhNPo6)BkzYc&5DGAxTJeocvUQUOZIClUxy*&=(ii?f!I(q_bI=k;G=4x|G7 zps~vUYxa1t?LkVuV)FFpRe}1&HDB5=-^51i*d=#RRX7NmJ`wu0L{I3EnQ~D1gP8v! zv7 zGjeFo&$}(KyH8cG_E;&Fd$@tD)caHvd!QX;tYsZQX?EFuj!dZHRrywOR z&+Ec~9xzfA#l|j~+~J7P=}LK6bx;g&)=1n#4P65^<7~aSKa1nL)s@-mjRWRBD(74xQ;!W;1uyx3 z9cIAKf5ZH^d+&h-h+S$!AZVM7R3EP%4nf+TSqU0bvcPV{C7KX5AmEyP{^^Lk~!QXHf!mn!~r!SWKdCjto{b7Za<7L?j^WhKFR-WD^EsGFW zRjE&B>5kBw`>0ZpRl8!1%bdRs0$O9Eu3w!sns*1s7W>8NnaF6cz}sKchg;#egg3;o zbOH$1^sy$g-Y`g%D6L7lyn=8f3PuxT2XZEfi)&8uH9A7E4%(UhQ;L{~nR_HjUVUo? zK%MUqT)jJzpX(p?idmZAAUD1=Th@9AVwrU_mi6w&O@;kb`6PN8nY8`<2Ld%NfdBzL z58;5=m>xfyWBKB;Ku!#xUF)d)NncE%5cQfoW(H(_-KQiakkE8Ug{g_DwP2Gvoxe%jEZ#_lzX zYdk^k+}f_IWooA&FnZ}^?y`N9{=*!LCMBCfl8~#8Zj5^@ZdEuir8qbwq%@TV6lZ7s z;S>&Q*#qtTYev{7hF-sAuL#KgBOb`|*Xm;YR@cg-7a5~uJlq$leo;S=-osLrV{*~m z%dR(u$KXHDm|t9|m4!Z;Rg;Vudr@?G;E~3N6tiDaopRkLgmojW!GJM$MVL}0TQtRg z|GeT{+yX;v!#lOwVsl!@^k3;r#W<8S{6#7I^Yh;%{FP$0PFB`kDiay4_V@RX8go}tqC=)Ise}@Y-|@gCdimCsOxnL(XL^@js>O@TJR+=n zH2)&->lM2kzHo=b{xFYq6-Rl}w>e9+v~-2<(~F;CE|&Tz9FB%c!4xW>Z!Fi#HtBb3 zdI$`SVvCyTNA6ZvS5e+ZOuR?n2R62DETx0*{L&d!Txbxvn|k?kdEF#6Y)H0R)NbeuO@>N!@w+X8Yw6! zK%)()Vipj7R5lvU zuV^et+x>Xz&t{Mq`8g$0GB~z6F8t9C0hqHf<*j@$Fj2$gW^(4ZQMHy|pUxJkn{LW^ zrp8I2hc4zUPuhO(*!UQ&(2)>;zS- zV5aTB!%1L4rDUu=)~FrY_triabTEuMwjjmeNEthp{RL{>``=ok0FX70{viq=IdDvJ z@-Lo8Mkc(8^s_a@7HBPk4jz0^2d7mdQZqV!tl1eh753^5YzaZxf9!x|5UYL>hWyh1 zkP{(d9-!Vnmi)|tDVPjPP>>IDr2p6fce!A9^?$O#&&zz<`zdw_9<|pKBmRqZl5`|5canbu4 zT8Von==LchB#CG-E159@$u3Jf@q)GNvk8pM5FtMV-l0Ru1g{z^$}x?w&}5a^XAD*4 zCjcL~QWU9M?POZY*I+X&2V`+*@kJei9{7VnxiWpupgh{&dmMgJ32eA(T{ppB8Z8Oo z&wtg$Q0f)i)w_rTs3Cgbt*5C&AGnj8lh(!07vSFMT#0o6%C#9#uH8R&7m{*zHD=^F zsT~tf3m(brV7Ufv8auCuf;O}pA@qnp;XV*x7D`5k43)oK`$w5?2NFO3qsTeK@;^Uk zHf*Dn(>R|UH=qq|pR3S;DNT+m9@4Ta%qr)9LOHJm3X|d6@_qN;tSBlfZuDNQ zZXX3eMjulZ|B)ka-}$W3W+sb{Or1ICn7;dS)0)ogd;9m#TeSh%r?Xo4(;mcj8SO8p zr0%+jm$6^o{mW2Bq0YsIzvI06$+}g6j9je=GIQfZu^eU#M@Pq{pFeL^?Ak{dl2myi z`5mD~&Ef(wL;Uw1c5^Q;KUO`@pXyB0QX&iOK6>i1&bR1kO-% zR#ttHiyQD{g(l%$JK`2C|Ix}zPXj(fT;D_XI|DFLd z2Kf0T+#@7B7BZ=70lb|yz><=Bi9lG0o-ffY29|ICz+8p?dm)UozsM;2*3rJ-83oK0 zU1n4Dn`_o(ZWu04uBn5domspiZ?;{9K8Xh_fJ5i=6(wek9d81w`e*U^*=gQ?9Q0Pd z)@OI;p>1?xYhc2G5E!}?5^sJ9=Y+uB{f!~cd(!6_miI_d_gqD_5Z%6KN&&PX4?7<& z9Da5_W{HD(NrmA7c2X(w6f!?N^Y^RW{tB9@MuQaS4IPZQd(z^HV=0T749h?!VN-H_ z$YRu8lg=iOwEhUkeMM~DuufAsan!xv;$--BaooF*q%k9qGbN9n9B>Aokx<&Op6jM~ zbR7!|J9vGFMwh%L=4dsfWX|QB#AThElR_<@K>Jp;{)DmdVRSABYH{qUBAvXg$xKGn zFm-6|m7D_IDHRff!6Z6heED8=Zq7?mT|`{|gSYMw;fRROa<}!HM-bs>U+=#1Z`J1i zGFA3f_WTx4vW0$}t2htj13X zxf$cXQts{uBQ39Cev}2)N>V#SN!iLmkSZbtl^o_T=zNZZQPQ;1(h{42Cr8;hLwEc) zfAE&`FJ!0Z|IgQLcFfcHyN~`y?_@|Ms^GuGBA`(&C~t;F4>S;yftU4IuFNNo)K6uv z!~RCNH|fX;v|W(iy%)BV%^F|?fuQ#sTdJl)k;Ff*1#QXMyT~r2%6>lzyYMy~TM>G+W!o$4x0C+euaI!_ zgEju&)KF3ZU+7?V2sByyP>o&2$;*A3Ac{6q}fEK`KahL2z5i(=zP zNkQv#f?EExdPoM0IVIsCYVx$R#6vl~lM1F16`5c-we>^}yg)iqBv$wD{vVefKZv%T z29qlJG_o=7@>FEZ9_Bja&R5BFvoGhFfephM?v#;(0?Xnk$u_WTIm?a|LhypbYFGY^y)(U~PG3KK z_^=M-w@LSYb?T+Y4<9bNBT73t(bsWEbyA2Vx9KD6JwGAT?BQkpM@fTlNY}eNd@d~5 zQ=VAp%sDV)E#30g_|VLP+kAuk@V%TvRIld?cB~sujrAp#7IhF;y%#m9$f66eT@b?2 z0(F}!^p9%Z52<$QIi|%)`+k4-700F$tq&^ye|tvo2!C|uEL_i5JZ8T5S&Z%WbZ#Sc z*6%^h+0S;nvgC(h_CKL}7dAsj3d4Uru|3G->~6{AW%O2R|HF)>Xa*|8*x)}80d}N+ zF)8m>dCwN*Pb;JluIOuY_wp}gEv&)&T|_UdtSm2%uHAaSrB}a?0E8BJbp{qEru-;) zS8x5XN~BQta;13B_W$@}{O{TH9pLd^7295JcxU)^Hv0E$CHengQ~v(a39?&#??I7W zTYrUW{4Zn^6sRD}Xgja=#;b_t4~PnWj|hjS>>9lR{drv)`Qp`{qmy;-wF#je_)D)Z z^a;Eo4$O}Jq=Gl}DEBO#{Lhs$CXPhXo}J(aa7qTz5Sw?@TbmLC@Nq}pv*E*QPHn%; z-)~;{-7ft_fnX6k}~B zK6BN0yVd)!f`s;(Ot!I{5SM3a?K@(0nZFmFaO*>HwV+D&Zwmt9m%Egz7fS}*?{Jg= zc}#zRwb(I7w1MB&?Rv?y^$Nji>C0mBW1U4P*o&vd2e+bd>yP>w(~XasQR3$})OF4e z7|FLU4W7<3u4tgz4!z9*mX|9SB2YB{JSO`extLZ(MVZy87=1U|ND&Ykfs?}6>TOT}?6e{aq&YU4W^sDeTdhegN_UqE%& z5g+4x2+-}sBb{nz_B!IE;_Sa!$nE|2Lhje%@*xlYOx|`#fzShU#jG-QyzhK zpCgOpWRkb(1%amlpO?L$nHwN6%yMedReImNIYo(&qaA|(GL0y~Jq?GRfg9cH(l$in z9!2!>!&Tgl^;O4!+QI8YvdXT1o(DvEBw$)6tE%8RRmsR<{%mMCarXRQCyw7nOMmR` z_d43%8wouT2M^%5c9vz?YJ|&)a2(e(p~IZ@rrzmxk;-rf^hB84j=*0V)I%+779)X8pfI_c{T^YwXY+0d_^%^8=f zGaK7gJF5?o-`+v~LvSZf#>36_kv^8}F_0l|0aF}FX>*h>PJLi{TRJ*YbHLl%yCpEi z@-Q0OSQF}2i4c*=F{Yr@Z+vO$rlb55c~?hmP~Qzri5jmDkez+vMsaqkHMZ366Q^H+ zNP3nbsKNYKXss{Nh81KJOe{O|FT+sgu4|7?as7j9C3kQ|w5_vvf*T>LTEPKyK44Vk zae{gzcIgmmIT;3vqTk;-QSI3I`*U0zUT&Gp?zWyYHEeB-w5?}%kp2EdJZz>!Sm~&D zZ+&O`M*!rU-*G*3!9MZ9i|j}PD?BS#N^9z^EM0tDwM~zB$nLKjPoG74h@*X-EDK=OtSW6k{igMRl%}gJ4ek5}5pGFEt6bZ!VQCiWqN$$?MUizQQVcM7UMt zG}f#VFkb6$5#=}lv%obF$R3n9nCH(DsdXP9bYBv!tQB5Q@Jf-uWiWiWpfv%|Z0}OL zz+0NIXMolcukUB3g6Tf(?N86p#Bx6CaUvDZ0)W4PvA)K+v$laU;P3NYx`k`)Wie?q z;CO=7{V)9YTY5 zb|5Q`e7_;jd1NSHulnhrq?L-rsE@0P;ps2ISEj6^wef|2iud{eW z^Flg7_AbuVNOhXj1a#*;q+AnU6Ot-lv`o2+n`L*I)7TODVMmk}#!PExZWE*0XErjwGABP;;P4HZI`U^eO(=)ofVYq;9r<2&Yl*zVn?*V zjSEYRY0@Qv4~f^g=4?V$94@KVQd+PX=G}8IGTU%;Nq5h>LCmx=&QN)B+0UeZ0^#9d zRQTnb#<_|PJfB(HV`wuJaqn{5l7ilKk-A%4%ulbI2t|dobBTd{$u+$9H6AB9lRppY zg8)Ql+%Zzm0+r{Fg;frzt-$pwRf0!XdU}>Y?9YSVS_>k_jy%QLrU=rq0ZOt+9BuA% z0rWMwUZcEeGa*KFURffe4wVvd&E~0*Wqz@t8PhtLBp4PbS)jk(v@^i+d^{x~KGOz! zt1&J>Yr-F-!1y&pY59w514qIzaFBO&_bWvKpO04-wP!-%f)LC*2Va{EDug+ z2xrcB7=Te@bnPIkRdAQff^+t{#^^yEwd))B*w4nLC2L$L4{MpQ1JsEY!44@Lq7bTV zobb9WxXG@Ek~R0yd1})oF-N3Vrx?0ef3d4936nP)Mu^>;!*{FZON+ zm9T3{C=d5(2KnJq)D1Xev-xs`s_ z5P3k2hqtc!#puP(wTw76?u8HLkc^ARSpZ8~2^goKDC%T1P2eCq2uEQ(5?!$r$WiDd zQ49HE7rH0%t8b~uXj@yUC=6r`GYTa|W;2c}KQHJh%Zt>i7nuu<2nG{D1HtHGR{=(6 zvOn8UjRsBzrK>Wwhw>CdHK;!B-g-na5o8<-F%fMK3hH&}rvc?g|m4k;4*-k8!#Wyde^y@34%7PG>b7?^pKdzZ*hbPe9V>Oi8fNX%B zcI&lRTU(<%^!BB}i5H}s(XXDjc55o1xITJiXf4>ty#E_IJH)XT&UU|z%|O!KRl`P+ zP%f}=iyXZnc4N5(BqLgSZKOid0B4Ft>R)tW{MiJdgrY-eGA(79pjcY|7>s72nWg3N z5RC`p!aKJf2QY+@CspYs#mag;RGn7Q_a7TH2Er_a@%65MNfBl8utjadG-f1{TP6(! z>1&cblA=M=xmX56X(6^YubP%ryjUH}rz_9k`4~)|p=~{HDwB*2Uc!tOd^HR;n9RbB zr*^Z&XH10^%_t}+JhtgBv{EVV>~pT7H<-jCb`xEv2N;}AQ!9;`=XlFl;X7$t120n| zRQI{(Ia-Z7&>P2+`F{>@{uJYv2X7mU5g) zNau*%w|u==i_(WrflLp6@M(oK;tT%ZpCa}AS#GvSq}KD0sC^Zv_p;HA(_*Ji{S3{{ zySn#JOeJ!|7JeR3Tzm#6)7?nlDz}|7pMVXYXW_YnvVR`lc0Tw3aE+()11FL(2wdO7 z1cSLax6-~7m=gi-j)}yBjYw9(0{Y4{fYv6MBb^U5FwOa-vIhEsqGJ>cF-yC_e^sRM zNgIh3eTIwt&bJB5K0~**d1Fib97Z(q)v#}{h&o0LYw)rB6_IC;oUrj>;6}(ou*MJV zP6CF+U5T5Y{%*5n?cHZeZH&3a>PAGmT*^QTmf^Kdf ze{`8Rdm8g2gR2KzEoSo&R}1~YS9|`%2S7p) z78mVIlHqxTo>F-w5O%k_Bzm6Ee(?UTWouV z=iJXkH%3iV?pMwzh*@%SU^XRIWMj*#t2b_gr+nmxmiKa0sm7N5HnQXFWTPh`eMzEM zerLmH;4A}1ga>)sHpoqh?A>{yd-m)(0v0!d?5wPbot>SHk>=1%WKEY1F_YQl_yQZQ>~8 zwZ?9dIGrrrrUAf*eW_bDqnKAuAfo|zqoN#F2dMjw_#S!i1%y;=s+bkeq++hXl)dOVHjHyFZneRnJcn8$ zt=$w@eV89+=EgIER!72CV!Cv$mEM>d0BiVlyk>%*pPvkz2m8{rtUm5^9|OCiebnx| zsaet7%kIa~cC=y1BaWqWdmxiU3`%KU5R6`NdsA>px}{twFvVZ$Au9hK?uhW5klReJ z`!v{(7qw?+e7)=jy1jN0ZXVj!0F3kGCy!D@;~5R?kePLbn-en0Ig978Ywm7OU9)o^ z-`7gKB>_{F{+u^~1QJEkNs3?~cNt--Xe{CyM~=XmK4YJ(v3EcC6rMn}1k5{yij2O> zE>*u_^okzRm^&b@LS&otHEc}!s@=(u1q z0j`*(AxunLCDqw@B;RtsG}^a6Pciz{vnyNW*G{SzQ2$d3Q2zPGpN_%^-rWyUFX-dy z$qos=o(mRc_vBp1H3s9;qN2lx zhM$Y$=jAXzn&ReEGm5}0fli)Yg39|=)oitBAc|*CiR+g)_f0Zn(9e-lKKBAuf%XcR zLJnc*BjU6s3Ipv5B0O?LxWBEM$Y1!9R9Rwtk*5K?D36i4XH&as4}Bg`#R#3O%g+4- zkge0l=A*iU>~UPGn#!z1Vl~$F193H883-CXiaG-$949{ijpv-z z4J&i}D35bC4^fVs)TMKwyVc*F#{>nu3-CK4b?pG!7kDFmH0*lJ{UJt1fd5`^0b3IZ znpO&ov;DlB{OVA1{CYnH9_T-#pFe+2zt>w8`O-XBq)xihT1xlCNQ?SKy1BmjfjEW9 z_Y~1E3)_gjy66RNDt3EdRnAf(i5MNRD^GFPm|&2MtO90?J5@|>{Bx#s@N`hZ$#&j^ zh`8y=NvZ3AmPRyK5KOHO2h}Rxzczl4$AWA1&c&V)9io?dB_A!M+w_}ov&@$@j58Jb zj3FlBqsMHX+E*=<<37z|eHZ4xlZ#A)4_jJvuD-oE@@zjpnbEImlZa$G zVopZR>dv2IR?nDv7FliZYx2bki7)$ofWlaIW$B5@pP(8iLk>_mM(^LH{Zuy_-7O!7^~>_t#hrifB2N&A|wt5cD+s4-vB8X!npCu8wv#_x%!jY`>q#x5NC zuWHgVU)qAjs9h&pksY0<>dpL<#TxU5z;B)Xp?KQ7FCtYQM)|u6I|b{V)1?M#OF0E*79o5Eh`;%lx~qKgoum1 zA3D3DafI3}Z~h+e-WHfxSXLDuGrWW%Zd@99hHqJruEOYuXUSbNrCkq&m`v+be88@k zOHUo&N|W*8aw1cKjT)~D3)=UTT$Fxpi?;laiH@u>Xm7j;9V&6nMi{|7QdL@#seIIo zkG}i$G?pi{fxWz_8|GE@a}_R7*7s*8f^7E)wYwd0{S((0C5Rj^I{D}tok|_xgTEjw zh|LV#KPTs&6n*)iJVw2M5z4k6)IvLPT0f zh%!y~Q5vUdup&ka+mrb&?M=VV%b3S5(OTZ7(jDD=DE&Idg4oBa>9?A{4S}l+VXn_U zFG|beWI_4GiVSd70B)ACi?XCYIDqT~>V(dm{hGk_(Y@AE8QK1oxZ}i+rCJjL=Ie&N z;s@ULF>5crr(qL^oNj}HIyC-O-J5g2D>qRB54u_$G>S0055(Cv5DnpjcYujKC0*E=LYe~7^W5CrGZ~oyJ!UWoT*4E*u050GyV1~l zqA53v*CBcwN`yPX6cc)5XCx)d?554Z1f}lNtdRN_<6NY=zuZL*y+w7rnPJAG@3E;C zyLa#259gW@AwsEX?T#tFvZVFrHfZS*knOCKx?MVaVOz^%#>`l{ zV(VgrNZkvIi|kYnz9gSqejW>^;7+=$Pf6X0Zym?-T*crz4RB9i6NoyH5P|a|8LZ@Y zGhC8>=bI=}XY@fzFy#gAM#A5md8k0z&%y=eET=QRzsYejBP{6%O0orh`+0=AH0W-1 zbksZ`GHeLJ+qdp|xw6m<%?J^%MLj5uoi2HKHI8H?lh>pYEMn4}aFu8;`TYulU6Ze(EEU;nX4A=0Q;f51ND~yE zC1SA|I)bcj(N{DqQI4i`^0_beshs@lE;XE%o2@fr`(`Qn_><{KLsTR1Yf4XBO~fv} zFEyN@1cA@YUi-J<24W)pHo>J=~Yf zR)%(2p18VfvM(6KmID>M?c6FPd)xCTdK2;bhnwVBVtq{P$gw&-0|=)UD5?rU#1PooFm2pc78n?BhN zhwU%Wk0358C9J>}h{{~{MQ*NL6C~C?B}TAEBBP!L>tP1d1|5Ld=5XW8`idj&{efB; zg?ZN1)*YEk;AmH#h&!Ynq)9F1^%!SZ4eYr0N=3jGGl2J-?VV0o=cSf8Wfz)o@o;v>PHG?@^DMojq?|ID}MY%m}3AhZ5{@)Tl1D|FM?6lyBnq;+G zC@uk#Tv`GDk)Tk3eLuRFr4|G$XVjmZ+sJBa)RBkkgppVw@OX9__nV+6UGI6AAe3zarMgl9m6!hd~-)zgL#kI$1oQoHFSJvZ? z2io#SH_kZPBIuRiCeTU?J8{>TZ2*9)Z(5Va)ToYYx6xAb{QYwun!RoKV@V@}(}f6G zm!{_ix-Go!V=c+m&V6MrZ@)b(B&jRS(2ZQO{uc5=YvQW;dSB!n8Y4R}^dJOZvw^0P zfN{QgZq?eoLXW1k9=%Yl5|9(Qd;p!CI2_GonbB>9A5g~3Dt)z6ZWs+Vz0LSLs)+i( z3J(8Irmjsq3cI&jbzvz4+?E=pezsk>ZhsMzh~Pgmb8#(WE`rhV9AqyT`?~XXFY1+^ z-#`rtRvU@wCJy4E>CSx&7nWQ=G0~$_3r$+!j)hVnoVodbu7?8r7)JD|ByPoUv(mO- z&-8=lv(bXV)aRnWkx;l3LGMOH;OpXG(3Np3xtF8uP=kE8S5FF#Utms{UD+G>j(xDj zC>yLU4v#pxM_=ZcPO&sqHCeq{3CBIp%)6J}w*9~pbwGHcZ~$A3apP=Lj(-HHc@fid zt*;|~_ew0l2~EwWig7t9KY-LtA=^wFc0FzRJ>2$C)ZKu-G?QAp1K^~U2iI=d-C1EJ zP6=AMqT9$nEt z0#4Ibhk5zFNvE|l5?4i0-9fQ-p(*3xscH`jIt_9)`1zv?-H)c6!2F;>bzi~kiCxk% z4ZftWZf!7`-uSBa&MEnc5yFjb=%N=G*w|b%XKJ)=H3_R_rBe${>(aLIz6`Y(tu~vo z=&dR{+F`J98>g=N{l@6-yBwF}e*ky>e4Ox$w)mI9Ll3s2g$aq4xC|C{@*1`RtgHyr zRit4Ke&S-5q+rjS|M@J+q92MVCoCzOF`x$y zB#@kP-f!rDNb+9=yB{(pr4RyXfGZ{9I_2z(o9aA~J<2_)?LQ@TCax?mFVA$9I6S*# zsDkFd#7j8+G}2J|9`o!x<_EZ`_CDx4IcI_=^w2Il4mqWf(KfD#MZ_1l-1t1&lDzTZ zAhY;t0jb#UBdKZN>A_`3(_5Y32Ue}U5(EO{^t?Q_q+x!PCl7_=_oY=^Sd{^*HMp+# z9UP?@x~HvL06lR@s6vS*#9aj2<0)SeA1^z|Jr{<+6p=C^cCP5tq6PDhQyUJ zB7>7@_g{6&84{-)6thFKfIOQ8u#3+gYVTY*Hg$n-HP$zR>5?^=O^uD26@T4tAh56q z)g|%dOdQL`cxF5x)X2}=n-OXDIi%jE2=cMH8z-GD^hoId*cnD za#7~U>;*s2LvzxzzC(FsE=l=}q{N~=xI5JqHuyT%Q#pnQVCt5jOKqVSH|-(uSkP$s z2|!L|jkQBfI!sGA!U9$)8&2y_BV@*e>At#{{gO?(*h(#ot(EgbAYPvb4sAIZ3j)`x zz&Sd8;1-}9r`)hk;oo;xv?`Yf9(Zdp=2#jOFuuxVYVPAmMJB-|0~+Alpvm~5idk0(J37XG&ligdO~`W0%z3&?c**ZnTZCi~Wbfm0Zko+T z5=~L$#Y-DfXelvg8Hc*ROh>-;8!;CEI`{e-gL!jS-6EJ|=MolHl&UM*X+2YQZnhdo zr7cB&tebK-20PYcQID|qC|b4)kCU+MoV!yQ&35J0ABYo3-M5`SERghJkx&paBz^crJWg-~%aH1NyJen~XlQ8?93qoQ9&hB|HftcBB5Ssl47@t>J~G@?r}HPq zR@U#UbG33r*E5;+*1j2PT3n;W496L;gGvH!+ss)LNSk;=mZ!wI2Rt z?p#c>*2~!S@kK)&XgDo8>(ps*HT&2bxV-$xHwzArmb6h-#Qf;ULG&O%=J9GNF zVmE4@(EmQJV?2>1FmgHs$8wxT@HYB<6>i>veN5z3*txZm+aSEK6wJP$A|PBA5QO}E z-b)m$C86aqW4-awgDPwYgrB6KFDvVc^20hJ@waY-wmaE1w;0_91;~!*;&c_vMetZ6DIqBRspr- z_`_RcA%*&U+&($>n4?4c+mA&KYxfwvT7inV61`R*IM>|ATqi) z&%#?dqu ztKT-*J(o759_AJsMVG>Peh$DlZm!Om^A(sarPbDU8f*mOk{*dZNOg%z3s>TdvPB19 z+rPCi!brncfYpITDIcYMwmpuDfqNGgVp!*Jn*R+75!I3W{m>oHQpTptpw`WpBMWZ! zjPk8;E79_Y506}`lq}jX!)m1N#m=K060UnDS|}{K1(6#uCOXGyKGVHE|M7l9L?->u zO)EyO$3rqO(7lleepmX$)zWNmho}+jgcQ-wSIsrBC;FUJG%_p+T;Wc%iHFlu7V5w1 z+y`Hu1z74Gi<}rERsc$*G?>g?C(f@%AgTXMwcZQLK@q6a&e(~5cCBOvLZ%S7AgEI< zml#fr=m;EHqzya6qy7Cfj8keAOk>l>X2P*kh{!&THqVvg5nPMseL2=uk%7MpJymG2$5=mZ z9Img`t22c)Hv?MvZtI@ZAYV|EJjJbQobw44saxf46ae!49h@O;L8UsTf80HZHbj6X zObv9pp)w2Ph)%b_y8L==pp2q|LNgbNM|~*+E-3D>fJ;AeoI2Y#pdLi{V zBxBuNV;MV`r}(O!I$r`R7oNKN7ywi}4^UE?$Ybj3BFskb!g$N{X4z)92K^pL5OdAU zi;HrR@VtVpl_$t~51diL7_!kC$Pa(}nAcd^*F|jKUr!(@(zM3ME|F6^(7!0pefCdw1+DP=!Cox zmVRO=C>8Z1Rs8-*r^Vo?Fwch~qml=LhJRMtMN=DqF8 zvc|A=T)S)3`@j*pRXGXM8mLw@@5zN7yZkN}v=Y5u7Hhp-(K*L}X6l0iM%2QoP zDcJ6#J0|@l1Grda4wMHD2z<&jt-nUB&|`RTCM5Y;{KNU|&2HL=xlu=%;?CzAAPY9<06n z-|SW@BFQah05CZ%SKAemsl*lh1z1kAb>e|a4PP^rCb%U|ROdPRJf~`n`Zi`2b(-c@ z`ab(QbhmaO#2!xrChi+8WA6JH(lSn zi5{7UBcH;Z6u;u?bmy2^=8&D}b1F~ODB;!Zsu3CRP1AUSIn`4r#ZPel>gVEH7}&N$ z^UoK;2xCgbbJPGJSU2H{`_$8tTuCisZj+OfP96M5p4XI~$E?32?yt99cZ+7vqAOQQ zxhILW(Mo%0O_V&WVr&<@bhV>7KoB$~#&%OP2Mnf5dYAA(BGZ|SKqc5mA1iOXv+<~L zB;|PEKGa~1NLK8Um{PWNOF~DvwgCo4A!^cCXg4}zE{{&}@l3ZqTYaIOpM~2bkeBplubu6nx$%kM*`WCm*bY70VbBuV$VZ9Cx|I_%iDe z=%1VLbr-Ikx1!g9d4LVIyLxstv^DC&y2WV$CAe{LF)R!!V!ppgRXG63{M`;KCcykI z-2N1D{ub|D zP0dq%(kS?%~CZqdEP^~GrSR8$?hdBgto2X;99SXej+UET_Ww|7Y^A)mq!lftd zNgu}WQ4M^wY*u(9cWFItA-q_n`-{k7mBYQFZ=cmIP%B007zF<%vEBYN?FzZKQ-4wK zcDEE6Qxh*hj%dAiWLvQH)fuF&oFR%SYdHA}f#*gAPMEqe$V4~xop$Y-IUmo` z0lY|~4ryR>a&Jt=1#4nwYB+h?b?5CV+xRXD&jmpSS2pl9m>i&Pef@0=m0wM8?cC{( zpTB`|_MvsxkYt9cYacqf9}~>gMsfH^Bn%Daxbp7edW#`FzJR+?gSq~K!VyEgB)7_l z_7xz%qo)VHN-|-weWj&Do3iS?ddfy`h<3_KpOsCC`%m(8D!^5{i@0${f)NfebZ39} z(WbmVq+!M34-SA-Zhj3Mx zN8ncN5E#wAEQgCTekiQpI<$-91CsC!EINaEV4D}Fd(MU3IQKErv3*%m?B$p{B`XU{ zZzLVu-{*qz@L7%Rd8dzERQlWD&V!mJR_Z>}VE4<9dxylAynK{^K$;RtTR)f(cZ4&? zTvu7$*x-buA*+L2k%N!Ht;OZ~tB8S;Pd zV4W#6;}-#Otv4~38I+=k2gHUtC-|2$P$$O!y}HYkSe}(U_KR~87n7mKU@EJF%_TIW z=lWdu;zjq_IhFRBTDR-vL4sACHVg-7xNWrfks08eT3O zOm_B)GhINp)HjQki4|f6uU8?-!jjv#{N^RWG1&ozxu@CQMMHfrVinyS2<5NJViyoc zj5vo2`D)AhpZi-l$QPXJkSF0yVAhh34;1zZkGYpo=!s|h2 zr(9hygR(HRR67TTKM$Rr}>(Ni8LB3G)_Vc5J z@DcMyreo#Eoj6J6z<|yibJr6)>Q&;nvM@cCYBZV^>9HGKeTFmuR@yFE4#(5>#4HJY zla3JNY&IBq!+ITvKt`HCRjeC=+ppfnEUf^$=)JzL9#B{=u2%FSdc2KElZ|esu~Sh0 zdIQyWw)t&ZQEDKg;9{Ce^o-FJYOrgm#MJ04#%S+6=GTL;Y{gC&mHpDI=V*wOL8I93 zywGX=8&b!g`HkArI+YECbXNgaiV~E~m6#vddrtRFxc4u?X!o(XqelMp?5wXn!vkN9 zTdtLWFfPZoPyDPfRSo`;W8O(9uo!&~H^($-*YEZP@Yfl6Z00U!k?A9||KCh&DVLVM z%st{D5BDK|7tW0*{C{e!H=536r^pULar$ZBXZrbNGq(Zw4|9sA1G zj5I9FZ+LhxGBA9C%Sc%LX*K)jDrzx>Mi|n=;Nad$tG4tL`W_iL@+3u!`YA`tz!t-RAF3UwP0p3 zW2Af&2+fa;nLRB-M9Pe9AhME7QyaV8RoQhhp2paC36$ex&-}|v)IaT94W0Pp*?vMJEs+G7cYLj8jHILekRGb zruD?yyao>FbFR&QKlt|V+aN3+yR4qflS1LnUHjtZkv^e=gz2`rnCMQQiy240#}`hp zHC34ty)aZ{5e=V9Pw~bVe-lhUfAvelGIgulgoBHrVJ+Nqmh-_t>S$92g=xY?%{2Wz zFZXVp6&H9uTwwTc-3qRis)7Qm3dw%L>$=z4ks1E}J)nF0|j?cyZ zxTzB;6~jCgn1kSYs~6497iVQrWq_A9Hjd=S>QD(YAmvER4FvzVK^yqxU+@n;1pmnV zdUNt>uRL5MGtGmVQKh?o%@Vk+e9?`RX4ALkCptE5DolthQOd{MtQ?-9ZS4r1y@&zD@8Y#^k~<_Z6**V-|3jxF?r3xT7$`Zd=EV}MDc{idWt8mV&<|u zl#EJo8g%#tZ>QGkDVLVIz6qf?0aS>#_3}rX<2$EW^$NIA zxMcA#f=GuTo0hY;JNZQ7*K{`zE9c9NH@czf4Do4oh%fiBHi~w;;Zz_S;>z$q1*?O% zI>VA!jjQN3Sjp?YtelZ6JomLtG+y07pDX2sxUoEiw_=bta*Zw5Onl22J1{3^Y(+)| zC8$p~8m!zeJY_K1gBur7YI!Rx06$ArlfPtj5Lzz=M#j_9fhiD&V3{dOU8*+M5-{G0 z`i{j;5AaQX*{3IStRGZRtp+Q8>5>mbwjmSlT_Fo`eV;7Mm01)re&;G_jt@O#ky=7m zN6q<}eL$m~+0Vw9k1%KaO4XoqFO}I~klFeT+Y_s^cC?nxdotq(qMzm~+U2sE?cIeA zIUD}zSu5k)Sn;|e-{&K>7EjLQ$IKd-5;+IQziyZtdJ*ND>UcuBvDaJd(OhU;jZDG?t`+4DqfF3HAnBm{UiYqxg6!$wM zHoAu>Hy>L&clm1;KVPe4l+0!W6vXm*JEdRr|6Rbvyv#tFerV1@gev5oB$91fbc~OW zryp4^fs3_19a;UnYctV*IEoxGJlNArweAuvf!#*DunTOHs9FOE?>8M{ z%!N?#VM7|q2D~Ggb-hf!+^80~J)*e(Y}-BN-$uBnm3V| za&S;>600Bo>3P@GP?D#uy5-G$vD0M&D(86TWx@45a9RU+xLLA2K*Ne|j+dCNqU$jY z5m9%}%K`-gjN(>o9!oQQs%T<};A7HMJ{TMzW1GY2qqKj+)|B{{6{V4TL0A|ykL&4Y z0V3HTzbFK^2MbPi0Nv}Q&%B;NfG_dy$unbc-)oOe_@Kiti=ta)E8rEd^$b#eRY)R2 z|Hy+amv;dmtb@vzKo6MWStn}4pxXt2tnH>Pan%n7nxC@Se-F@5GTz+V6bXsjHWDcT^_b0T(Z^0J8z*)(gwuvG%MbT$BDKT01X*0H zof4yBcsZ~uJ%7}NFiS`T8f~JrpsZXXPKobx(0z={;Y`V8FtyFplgus>f_z64mPK<} zR)+jeIwgY`lj5^6rG_toEeISV`n`Tzh37Z=8&K$4J^oo!ejtTbj;k@!E9y$Lv!{rfgvQBsOAiOA9j zMcF2VY&FOdktO?9V<&{{Y0=n{B*t2#vJ8o_WGiJUgX|$ZA(X8eOS1j1o3`Kkeg5zJ z{XNHXJV!@*WIp%jzLxVkuk(~nb44E4m@<6unZX<$f!`1B&o90!z!xp~kZa7cX~Exo zes*>_-*@E}u~-gdG_E6UQlb#U;1GxvM-vS66}&a$7aqfPQT>SZ-ZD$Ji3?V^PK>I* zAU~6$9(z;mBxssjk6a?(f@y)sas8;$XPd;PwAMHvuhvpE>BJ>4Vu>9bOt^!{A-RDb zQ!xJS)#=4NRJ2+wNBqJdToWS>Fe75Yd_!Y=?HWgC1$BlCJ0GE2$_OVV-e};aI2Nsz zw_}hQsUPoR&nUEiAncb1$$V*xLVMl>Jc@C66dH}M@_Ga_c7q_6ydBI>0)Q8;k*zRh z2x#ea<>J(0V9ud;v#=O|XNf4h9h9J*aO(!a970d~#kuQee_zX0)qByR_e>TwDca-f z(UnwieU!5QwCeCm2iWgShsm{p3X|1H0h;*-nZO`U^^>v3Hkpv1kW%IX`V__2Bd((gco*!YkU(9&K|SkJ#U z#|e`=E6M%Jy;Dy;Fg>udsGwD6)N#n?1wmv6M~*+ zR~^RsZ3Rgc0gR!skFL##DxK1khlVQIWUtb_>pNDAk9h=67YbO_&>W>Tm?|CXAvgMpYp0{gs%{I>UR9(Z zSux{jc$u5GZg|>BMljgvZrU(}R1nCwabdFC&j5o%jskJAJO=`Nof+CbO;mk8zZ{Mm z<~&GLXaQ4f^F8|NI0Rrm`Ftw?Q_ zI^4ICYrFIC4h9zF8r0I;krYniMl=2?F~;=+3Q1=QAF@IlxzZFA0usAz&BKN>R-EHK zePCDnSz>;SX{i{-xBy1MscYu!3G;|%DtK-z??7el=djVtHMcKtGJ3J?_bm&B8ezj6 z5$oD_B%#eEXyfHKTv*prjWZGC&Rjir>+=ipkAQV0~Mtgo{ZNiLAR0JCICAN@kVrdlkbz>4?>7*2o;5;eayeH;(M2nG|Nn8kl zdxhQk!4v=Wew-%l515!H5)S1Wht~&y^$vFR8bK0N5{oP5`xghm(?wXE>VR4}fECdQ zYoZK5;wmNjh5*ZR{pKsS(X!LiJZW}fx9{Ry11rr^)1J{!lucJ^fPS5~6L3&WLNpg) z*^C!*N)lmW%BMdWpp$=uXW#rIYpV%*upp&&`-mkcv}4=m6196 zcJH)_xNrRxE)vL%r$XMna0==xiw}1UNz7a__ zjRoQPe*)w4ptW1qfsi$A_g0Tn22Y4Y(gSS-pmX&$N9-9h}3c5J$j_w5($WT#@ zwVTf_W#Og|YHp?-^p2CX`^cR?_T)O4JM&ejNTXZEL4QcC!O(G;4Ih|EICg~)2+KiT zAc3or(Y^r-H*seq*fFhD)^i>L$KkcEAP}3i>OZB3?W~gmBiF@Bw2uGbm)|?Zsn>yb zx3}7tp|H~s)uOiN=qd3`&PO)dxB(5zue^?v*m2U0LjifFrS8bcP^!+ExbPe#5%8fK zK`H$e>$5N5G=wD~Jwin#n^X_T>^--U- z#ZeBE>fiVNp8onN7O5Bc1+HD-RZ8RY=g+yoBS(ZU&F~z`_Z5DY3m*umN!Ke|6J&C+ zqBc1SLlCqQn~kaGwr>Hew3ug4gCBnYfe3*tH{+bsI-XT8spV~OVyO;PUG>8tYCx~M zyUX82dDsd1c_RUZGEnkoL0E`xf$Evym{ykAe<|LlH~#EkG#uDxUNme@nc@Qj6TW0L zaeTxv0qJV#Im;H!r)|b%S!^-1LVE^Ukvi`}(d?io6$POpNerLX4N*26XyaGE-hicv z1)yC&Xnxen>c5VR%d7=#tZLMs4E4M2zZDCeqLJE*oxcL|23kBKL?MF>LQvz{edSGu zsmb&4aT$@s)Oh z9`%$_TBR?<9#9O-Lr0^9U-gE7s3|An^KBbO$d+t*9MHo?b+>(RN2XDK#%}oSKMOtZ zHMysPZ!f@={R)&FvY_tK8a8U=q2o~w=9Hy;P$M@>1Uz|-@9r07)64gUQ1hwHIBL7D zvD1K5ZAliV%T7rw_34A7>sw^K*4#rb5M$`BSshi17?PhJAMf=>HnRi+H8VuMudhV? z7XyVWoPS~}#R_ zZ?E0$^?Y|~)8s~~ibs#5HKRZgXwp@?+-qAS;fQurf=tLTgxl#XHz;!5^pEKd1`-M8 zN%_t5du;jvm=Od|2bEM6{3wTwyq<*Mu5CAvD zZ>9!vFv75MeSfJT_`}xq{(LQ`hT1DK|Jz+UiEJV={E2+=pa%t|+A(BldC!e6G6mn? zYIq>x`_-H!)+3L?>Ib*<&x=yyfTbc&Mm94fyU;nmr#Z%aZ^x@I8`Md}E&B#&^?JU| zkCpB+w$Q-U6%BGwyPtq~jw-AidnPWL9-Q5TU-_&z6jpBqvKm0|(ad+q`kJ+(v+M94 zAn7~ubs-f0#N!hp%q@EwzlYa*Y=4}MWo0l3Ht%o~C;ljlOo4%eo zkyspBuYwYzI6-f3g{*^}N`W#UyGQ(fXnlZ9tg(SU5&uc(HUFtgyqGd0u*hR)4O~B! z3Z;8yemn#7pyLM?^B)m{!~B-b=l+xF2E`Pjp3$GgdT#TtLC1zoZ3tZSi9ca4BW#KZ zBXqztG>I>7 zR>(h^lo@;X=8oeUhl79N8O;)muoi;veVqZ{LFVK zTkpZq7i?4^TcjTW9NCqr1*^NHC01K98u^NeWZ5`KJ#0gH4YFSIsrxYs8g3VTDl0I% zy{i*PQPF7A2LR`#=n2NwZWaduvN$#4U@c8;{Hzi4)V0XYAD^`RnWwi`O?zLFU+*dA z&~{y1TS51ynbY@Q3JqjMKfctX9NDay9|&6ANb$GKQ2#Lip`imV%T=tmUj3gS50~>hB9G z)gGi%>v3EoC#4P{xLKNRXi;R)+2I2k3YB}lZo^s}!c3ohy~t%ld70G=m2x_e3V2`N zB`SdANjJGUse-WdR7-vuXz^rmMwh_8W7-Zs8TN&I4SPMSRUDTG^R0y}vL>@Jn4U-I z<)5@7*o*jih)o=6f5u6_FW3&`f|dKsAv-!ga;V54ZYEcZJFu>EiBjU{7=FF4ZxNs;4}~cVO-+10VtVz9^1!P_ma496BLIyzm5tS z;pxr!&n-!}wg6SNUk&T^1yQuEPYjub0Z zXW0c}3xP1eH;w7JCdi{tlhID3EG_Ixg>}@X9Lh`C_=U9nvZ(F><1*o)_#PM7Y_U)e z;OT6kwbJa3QKVac=bU-p=E^a>;&M(D|4Y{9SE`FYf!so!m`3$XLui@$x5ViMbhA-! z?~!=AI|gLak*p<-=>chKk^TNv9oFYce!k_;C)OZYq^Y27S`UA}=)`c>*ib?r)yg9xpnf69pZion zn{iMaAtiR^7*KxInSGD;TIXNV+@(BJ9dpTM!~^P^h()--au{Eg@F}^ z|J>&#EZ*z{wsu2RVdr?LopPA=3$`;}&4_+PJ1XRTbi>D;QBlC201+uEzB%Dw!t|mm zDti9mg-hy8v1A-$SogyhT5+W04lem4CU8DC9aOg@S{< zF3QIyBqC3;_E?$2qdV+RI(K$1Xg$<+G&@pTGA)VMT;E+hY^;~dQhD*yQz3zBBcrIV z$iy1FXis^Ey5)^Z*>t|wV@5(h%(?rVNPQ{Hk7TqtKzmEw;tK+k1s{RG?jqh9nHz&N z%YSjFp+{qkOtU+6JCE?bjuHnu)CeP8aso6&GYCm4I9IiC4O|L%oc73px9f?;sV{-X zWdU%KH`YKf^C`e6E3C`amCH-RSYY3hZPoN38L(cyw5w;Vxe;;*U-Ljme!3_FN4JOgJ_NYEK%113oZGcQAyOoPVB&8%oJmp*3xP1 zd+xDfBR;}+*Pu_vN5dB)#YTu7+jt|lbWVW*Z63OX>bQtnQ{!>+9w0ET*F1rnCVOa< zCR$cXPcZ&THor>ff>fxWu{Jl!ovchPHG2x=I@qYQFbw_;U_n0_Af{SMn0(+-)e2WMkuzEjITEMlBlFvbgdA^UVlj{IL@@n>vB9 z%yp9cAc8Q9uRX`(NN9)t$aXik@9$c4LvXjsy{_vU3nIM*YTMQ`BIyZSiUBF`6)^dF z>RaYE-uGD2tV!sYoPEArvk9_$YS&&oz4$2nWO?;TQZLLMPk^hK#VR0Oeh7I!7g6j`PYKe>);o?v>@}0kzyS73<_HI-{Mt{Jw4_vu(863$Z1SUXX6T?)7t_K)!Tjsb^JoE!}w_8~>jHzVdDlSr|saoo*{6bk{i zi)B%}?sr}b|FGU2dv@v+-w0lT$`?<+)_X-Gy;lhIw1BPs23#&bcj84V;_liq^w6hS zY3DAby4~HrHbJIIxy;YC*z$wO>eo7Z`VM_`iC1^<-ea@#^=&HfvkMl%v3xbQMlw8- zVCp~$@aT|KvpgLz;sT9)WzLUj-wr83$x%?ZX*3)nDS4vn94`|dST7<|um(2v?)JL^ z&B!vE=*$(GUtECF_jgs~4>p62m47CF2?63Ao%;^sl$egH3=PGrL$BKUMaWu=e^@m3 z`&u}GiHLBp*JLo9?NoW1_n#KwXYsPW)YqR1Dg-wOW1e3%g*HV+fem+ud#S(nTJtP0~>Gt4RP#qt3Q!$Y<4BE$tdHM@kb zW>37iH&$0llqyEfk6%dQ+3KaAq4s0?f^eLP{89LaQ<`TB($gW@fGtGw8V&>PFSIJZ z*P-DplH61w%n%~xf4&Y* zs#>Q#FC2<9V@;WzpI11_@X~=V-nf8%KyI%3>@6BtUZNlZ0k$Jd|7~g)>}BW>WDmiQ z#DQINxABRL_O;T)wwd}3fd)f@S8d)tB3GsmZaP%k-p(BL5xAqC!QCN$@?hdI7hRKm zFPC9%DGa1bz8ipUt(q<7=;$-Av6du8)5$CQQIXq)9=>gosP)H8533+^PlZ%sJIOg$zDWNcPCX_es z)edF1FJUsEpV#+x?pyvyeLuSTI^w9?VB{HRt(5PB zGhh^Kud086G@H%e?4OYJ&CCv{;sx4%XSzQuf+2h7<5ZZR-){IA*vGIM!xlGEZTz~V zZ`rYyo&1u`tz%MyG5L)84pFL^jsIx`u2fkzq?M1Ph`e^zspZ zRmIdu#j@=*BOKFs%A>xnLLClH+c#d4){KVi$5NZCK=1JT!?Ewi z20<`I?88b*jxg`0Ywsz3y8=>J+RBT$AnjvhYddi7+IXrXXEWKN%*%Nc zJoK6{Y4h!G2VilX^5x5y!)gIAGG=b!%V-|Tx?D1wX4ft%?(%7OV7fUg8S*GWNe-7G$f+dE&TJp2sSv8G`Rww%oz zv$C?Hyx_l*#ZJpDH8V4_HytF31zeC>`Er-YzErf|TYWbWKpY~By&0Q$R#wIe z0%A9xEkbD;ymUTVvdx`sQeD+?(p^@#4(nM;^Az^4GIq*)YmTJzw@wtw!ebH%b#--} z)*@=+i~Z%NX_-_+vDl&};(#xI;;JrY4EN$Ry!RjrEzV#9eBkF0zX zjl>5u&4e+d+IVhR6ZK3qaiKt`tTw*MVerZBptrk`@&$RO42lyvO$)X#MVF*g`0J@c zYotxv1Fcc&St&>1$;yY3J!41DRK~vIf`hY~k`3aI=}sj061!vRD@R|@aW4&le%zD> zN85aW_vf}#d;%Rg+}zv>IcM3Ak%C&OPLgHf3@7659o?s*sXO6?#RmC;T|t7wt4%AfBbIz5Oy^T|i)5Y-wZ6GNCgKW0E^~(s2?eGaWHgCYRdGE#J%BS~(5; zgeqJ>wdzE7&e0zTMyu$D`uQZuO<#WRlK%B1kx|xi*QtZEEO>eO!A}q&Z24i$c*UP$ zrPsv)&bSjXw*-8_|Hm5h4Ma+nhogy_BSogd^IawRrYlx}u8HhR6A&wCP&jsk$hc5K z_Uidy)KaOQSwqx(807DN%pJQ6W39J5jWdWI#iR#Hb@4(jViAT#Eku65EKF>e?!wE@!~*62K(tjj=UK%8}@ zzTKA%M{)cdj@UJy;WCx6$LXjQ*j(nr%x^54upSu&Iodwo zh%--KsAqOuXZ$#|D;H%rh$3G(E3Ws`#m8a3>--&ZS66nR)0fv>BOLj zq7y95N8FJ;hYg!NZYZYrL(Lg4E4D%VH?I79aMTkAo!exYq%th9f!qvPM_aCWyz!&^ z{HsPFf)I&*m}V_(ZHdEBZkDPDYDe9S99Yo$k!BY$9i6a1yKuqY4=}HjN3Bj8vH0N4 zIictzEJt+yc<`F$-c75KN6IOE!k8f7$mGEbNC$JJ^FCMW-%T-PN}xDE&pAnVGHkct zN&hPZbN$SyY&HPwv(q=-?G_i#xr)v?`r%DsgMMIf=QGb#H?D_VE7CeUv9Lqf98Wn< zvK`rem8T?)Iq#%0q7iBycZ5;k&H+90f$;lm{I+Ku70%jE9GCDJbv+_^`x!#{3vsaF zXTD0u6ul_N$^5D61>2Cj+m!cQjh%mJy*7z_BW|g3IzFeGT2F{92J_xoc4W}cbJ}pz z;|U$Zq|jQPxdW;+`Klh)jOl-p-ScwZw^P-Zedx;R(GK$x&h097#CwPJ9>2TY&dM=q zd)yNb389J?Gp|%K!5lq$)-mV^w!tIaP%^Pkf#Id6j;r!H0msR?QnBrq=+`>!)1m9n zOY!}G;=PxjPas?2ej$-X_wMvuC9ZN|n_|EPETWX#@1Horge|F}kKARf5K0Kt>FEk( zhsgZWnbKt}n_p9?{sEJgL)PM{zCJB(s~^AE06I@1^3#)(vsfY}PAKUIr-WxEp86+P3@mU01YJVz%?P zdVa<9j*-^0pB2{?2$Z|b$G37+ORP0H1rejt0ky zNrp$IN{01Z2D2ivni+PzeZ19OptU)AJTja76jHK4;Wv;``FopJQ2hw4eRMSn8o9D>cty;LzlEDLVe zgtHECP&wsv819h+IZ$S%6X@j;^LOW_Xc^Dw^Wlb3ojpojU}OuV3ywWhb<_eswvJv? zJ+a>@!Y8aZryoWhCi1D?9YO|;*Fd9SE=O-rMpxO7{em(-q86u6>OkT`6*;rcOy;#g2nqj_KJ5EZt z1=^FiwQ(&cC=cCC$POL3;Ci~Mo)~Acy(@(1N2mvOC)3jtin~gazSPodS_?g3iq6*j zCW|t9LviIvlZajsHfKCs!77joB3eG6JsELQ@N+Zp5N*p6;isen4b8;cTh>By&pyEv z>k%7>t}>B~DF_VvToV`?VrU2QeDJ&Uq&=^=$aN^+KS6NA)}>M2k5-fkt&GqL+;C=R z&$~Bt8{VbR_5_^bAh)>jXTu6!BC%O*%#Q@r5aK)K)Sj)c{D+hA_sYurT3}Rf=WH-) zK7Q@oU@|z-2Y5583NK0}nsYRJ@p0cUrM zeV8iA+K64f2av^Eg+r4=f;&rAb5GwN(n9&9lIO;+XVHQ{W=HO(*G@;FMh}8Ts1u>C z`50OZyJ+ouGke%#@`c(5t&T6xzN8_MU~v*C>`W#5^ji%Z_{ zpwZ|PFxhDnG~e$Aw(y>qZ<+C<8j{TfUk{MB$N zCQPY!b{KwH#h^-G8JAVOQ*`Xpz-i<0Xxd@a6cZbJX_AbLV3(Q&E4Nd+;i=9r;!ov{Z!H9{L9)`xzRi zNnT2>YPzIM(<>p;>;i74=~^WeL;jcey;>egLJ9py)Z_N!oQeJSdvcnSI|1j(GHeai zP(zn?uzCFRp2&A>4TZn1vHlVtdLLg+`OgaGGs5Wpk)Kn*XGr?zBgS3l{fK*U{t^`T z6CkM9yK|#TMPBum*$;w~xuBal18ksI`>gUoacP60&iyMpyvN!?q5ka@W$;rTrU69_ z#x`L?D-bTvq@lo+9)~kkj<73}gK3Cu>!q0Lm;nB4-6iRV0|D?PvO)H_h$WR|Uh&M< z$%(>wQeslP_TsK+$MvHv!t2Yz^$l7NA9;8kc2z^%I+WD&01Ix` zWEa~vEpcihx)C3>_2rYCr;D0deR)VPZ)n)n=eB7-s=Ld0JgY?0Og)qJ)V6yt3amOb zpr^D;PjdFk*3UFxTH*@=`DAM~8-wizYY9*Wsa7~2br-?VILWdXY(+R4n}kak?$d)U!OB)@7tStTIT~8Y2_>Ps?phJ(2Esz#-Umg)0#95&qwf{}5 zP@J}=<}3XzMiyg!Ctyn~BKtCm^Yp=41>w8V5^lX%pWdxBoE zc{qHkDDm0<>g(Z@(F$1lcXs;674>lKLAXG2dCc)9ECec%y0*L4QFm@1X$;-{H~!;s z>#{77kKtviMEYmd1W~xgrL2nfjtFz8>&T}W48~pH+pz}y4=H{qQ7O4j+3FP{ zcYOQH3eWv`pJe7~K0h}%B|&TtJ-jdVoDxnRPYTfRv_g%yw>+PXW0z6mKn(amPkV|9ZyTF=}l*T0_>3lpJp}aHn+TG%oPg>vOqq z>GJ6w@zGy`8Mzb0>APU4ZFa2L19!@Ac+MKg_PWGC1*8g+a`A_r$&>-#OB@VIT zLhoYV<@ATB%9r>-x>vCB`f$IzJ+}`>BN;*GJ{N-^>&anex*Er;i}(6IKYfc$ZZ@49 zy|xp$>}l~w4{Vh)mog$_5-9Q6XL8=SuE`1S(U;)=F$JzPtB-U(e#)NN*tq@>vi84y z>5c)LcL@437|(C>&X3NHQVdjtBi!k`Tj z0-B=R{$h+VZi?J(0@Z}V;Dd|1k=w4ikdU_?K_~L3XuLta)p{{CP~WD|EM=Y`lUVA5_i(wl8pnL-?3 zrd2XJA1?T#vmM@)vImXdG2e_GCvAoHM1+$BUvataPqUkifFdd&%%&0dGmd}plh}CR z$nNOvU_5Z~1^n0**iQ|Wn3GM9XFhnGjR&nrWNAtk;8NA(tWX!UY(s46&14Q z#rGJxz5E!mGxJJJ^0AregRrk~!5LPdy&FcUt2t>FMrc&`aoi&#$GnKd?Wmft*szfJ zg?BLAPkH@rY)wp*gou}TjGX{o5pHE~Ucp8@5~$m96tbXc}=IWcJZl_l_} z-;N^hRw!hKg0`czhMmXC%o9W{D+3R?ypkyjZ!DB>K7YJg%mI2@uMdn0zhj3Y&rPI9 zJ>5<7b<0}M`fCil-T)QAc%yD&Pv`67H#&XGx0txXge)s$PC|;rGgnf>HY7QEBR0Z; z1Z4P=i3Jg2(;8!OEyk&DW3#!L_@ZQN4WB=ZOlUmrLPU0N z+R?Y+?Bvs0zTJHd8cejKdX6-ce@?>kKAPb7$l<|1m{V+T>+@NE7)v>_}axmKCu8Z4ezyzyqJ+ zFvizPvkU~u15u^Z*SK6#`~1=-i3_tZa}<`%1>zQ7fS*4kZSvcKzt_2Q`w3Ji21i5d zy?L8+S)<$FT@5Gf4zK4&bw6nkE~5o?Fm9nDv!AU*?vGaxI_hfUHCji^8$2@fxUt-7 zxZGz8jZj!>?Qi=A->Wb5fB;4#tqDgRTL-q;9_4|Dw*JsLV32R5Ok}y_gw0fRvw1u& zLl|I~)aC&+8C=oPrknS4T_Yr2{w$2I^Z*1k7uEnx1a&`8PWEM|rw=_34Gm3=t8IGz zTycJE>~Z#S3$If0wkwt@`qo(=o?QIY$``)-uuLpL3y){L(dB*dV!0piRui*7MXiY= zoJRa}8biUq;*E4M>0NpU$Lj?2ZMMJYTc0&}My|TQ?4NJFwvoxN-P<|Q>ag=EljWQJ z0Q)yuhKv36L?0qUjcc%hVTPKa`InHfx`wZ&y>MO;NDBxZQ-mJva2iLy_t8d*EyPEi zbo@PXC9l?{L7)B=;N~;h5Akn36xR1%WhM1!@Mf%S=$Hh|K)p1};uk9Exc#5x6^dWe zbr8cUuwhsHBE-0%{HuA|)YKG*ZW$7&5gY);FINkTR8Tk3!nJ_sd}yKuRAFhN3{gOf z8f;ZZ1JEMt3S#KNg$ssMpn=+h%agr{VKCINU3~fnGizNcdJZ64MckTC<`1HbnkicT zZcfJ&S=ntW&3@!mRRvAM`PYTlry>pyY+ayu)H&4?|3(x_x}>8wV8&5IO;Zk^3ZL_~ zjspIaiJ;h9<2-(hz*%<;hxe&&&()UR-m)|NF`;HdH@JN5UU!mArt?^b9)o+!;$eSz z_tiMS=?hP29&zY@0)6@0y-@!hxdHRAk4YRw3wd|>I@T<_VrT5PS2+-AdVYo`1nZV% zvK#T!U#pG02!tO0lP0i%2Qf9K4fzgDzCHVfEtJqzE!EFozF zk3#{^ws`(skB3Sg-03kDbiYuP{2jA(li}BWsgj*NWJg&3Qw z^YHHU;*Z?Ft@W!9v;|Rxr=5W=MK+Y8RbZ@hcXoG|?qjFxc*hc*YV4S5E_fQ+x=$GQ z6Kbooxr5uV9Qb}B!63Yz7qck(C?UL_rPEm8a*vxmDf#Rx`gHX?6VP&lCHGkUWdNzP zDWlXp+mWGk57F97$`&wf@q<|@vu+UrxOiEQy)396B!@!7CML8>>CAV(Q~w%VZjV%ak6%`Ceyfe%(mu{D zvE6xxiSz)Z3%x`7vbIZQ_l$@a# zJAWABE}pa(!LK+l(VkE2bSy}EFQc^AjV#8W8FqpwmeGEB)Tg*6lt1eWRmO(Wg4?Ci z>p)Ua#7$>aZvkR(1&o=dPOAs-xx@rXNUHa=I~RcN-f_GMUG9Y-`H+yyXJBdYsrtd!#2FS+d< znD+*E7~Jx%=rcKF?$qn6nP%k9CR-NgF`xbfWPqYaW4HFPShn7Ks_IcqgX9mm>jHCs zS3qF64fWiMW3F_;?`-;o5A**{qM|ntc=rY?WA49j-~8a3QzH4m?(P&0+JN&TXFzxH zBt)A#&!UN`yZ2Bb4fsF1D~1eWs1j}*?}Xl!`hw7lfKzYLWMzpYjFOm9b^9lu^|2MC zIzAvzogb-cJbh_U^Bus6iWONxP@o`F-}vXSJaeLrzbj|dWrsj^a>=&6a$ua~)BmB= zv?=uPLo@KDY}WtwxK>3o5lNat6;+@$c;3v+%)8n8RQSkFkT(!KQj=}t9#ESu|BC)B z{%G7Q7qy@mz?laDY`O7Sj%xz2?L&uju>-?;YuavO^g)!s0{qCgwq^Ey<}fCGchhlW>YqG#@8nPDwD!;M z{ee!sXj{JtHu(y92r>CB*@E`Pz`itI-t4|FZx|uc>0rL~GP~94gLVmp|Ff^3pEqER zAX$0<^O-j-nP4Z2rK2pB!3$HsF4<LWFM=8S(g55^)TKK}bKVzp)9Fn$e-n<23a`Imcx z7jP>wbYy}>Cr>8qfbO>mXDEm8uk)l+DK4=lJ4RC6mOagK0agOtqV^O5VZ2pFsxPyLv?C_HF~osMk&|DNrH+{pv`zE zX}&p5+WMCu-xDT!Bw|`eU1vKLkNxx9ekCNQx6m!>>>6G^20eseHf7=jn>bkc>imLWr%s^|3|b>rL>0v&SS zs~U~3w7^OFrFes~({_I4{Re&^T~GB7WgTKmlv|hbB4GhVkTvi{Z*$w+8jph zU}~Ykw7#a40zJVJYO$AOaqw~Q#>g1pZrY9nq*pT<5>Ma)N3t@{i4$E%+)Mph*=FbF zeh9C;6kTcneLPP4Nn3WwI*=3*hlv+zVyAcq-0T0Yo?4xz|hpYwk%xs`H$6D&`aofjwO zLb;>~WD^DObW`p1mj?kDW?n#+zkRavH!n0w?YPk{uO@Tl&mVG@8eOX5NB1yH%mPhdm;JcmE8vZXxg zWJ7jIX_0vGrhK|+uy+cp_fB&Yz#x;;qp5=^kB!0CH5J!h1ZJv z+5?jO-#nm_Ti0**C}7eA^gpa3h|q;eQ?<#(=_-rON>BD>4gf3Li<6r>C|(TzU|X+q z8xGl;OHro~pxEg#Jkt9D9QLLF{sB&Fv6#J+;WilWPeCR*^(C*H)`6pWewh*WPEAD& z(~^jc)%LD9^~}namX;vbzGt&5`_qiaJLKf#UVyY|tz-I4q;k~R6zL0FH8lCZGw-ir z1io0nizJIq6QT*EXH}QXI6}yfK?1Sr4J8iTNfao;%x@tA-0J|H8&+}Z9+blAj`WW0 zSHCJ*rXZTu(%S*i_^*RP?%@Y$G$JLYfosleo!@9f=`$WT3@0qU{q%;_^~L0a@XGph+OEdITJz6~`HR82fa5uwH#pMs z=KZ!;T&tmAo!1AXse^O4bK9}2ch{b)a{FciLO9BSaVF14z7oMVt_Kzoe;n%kUP1|mt2vJ-T@XXe{&{U z+hWxyzAG0=%U#g%C(2-9a;My~ex1rpx@uLr{Y-T&N>)jDb z2N2S%(5cBEc9J}e^uwT_UfhdPYIKH{ph#{^C1THzw8+8p7cHse%H+|;X;)^P-{?M;r>{j5w`YJ8kp8^06tM(XA2 z(b}maqK=c_ye!{Otw`8JI&ktT@tmM6eTS9i4hL8HLPPbwdWyDC@^&mYJgJTvKQLN0 z5j^BE4|(bhyw3L*o^SBj@ZZ;Ra4}X@z}u$$wf_shwn1&&&En2L2@)qVu`FZ_1_?ed zm+q$@^KmnbHWAq)SGOYv{aFjb*E8NoiLLOK?*1=RxA5U}>#W78BMD=k&febhZm>X> z?nZ4fR!Ak}h94_ygUJ6C@&M9ow2rvjM4*EUXW@-3#W0g@T7jsX(M7G7WNj$bNnhNo>P#K zF4l!ShfIptV5F;p>iN@lC?+EBU?c0}$Ucqr1ocG1A>D>UM2+Z(U_U-}4?nkR;J9G7 zSGCo&uoKqB^Hfuy%bYk<{)oK7jH(cC9nrO+*iSZrO*|(?MtpD-VWLSk%+uDk(Mpic zy{BE3fu&aiQI9+ud7mHMeLxNha^1bE5)spgpNFEuICig;7F$RZ@ISrMZfy3pC-vE% z&BmaE5X5vUMc3gnaXRb@*f}&HW4OadE#pPEw*O?$JZ{_cg5lRkT|XRTjD3XIlM4g8^0Vj4~k;^X|{KYP|@KzWZNQH-i&rUQa^OJXYHSC zDnYG4r8EWBVA=5tA6-&|G?V;&pd=VZYSXuaY;O)^80@}wKr5nE2s4jr0rjjZjc<*O zA~CO*4*vk+@U)z`xVvijX#4FC0RvTrxVv|DHm&g@xsQU>GXsbV zkjfq$5yssy;Cv`psULBs^HuU&6`Y98NDT9<1G&x6maK-F3^_^q;n+fCaSRz&$oNTO2(XYAQ-_%3w+B0O_p6~NZTqI7Yci`pWIa8UzQXTPy{DOy(kWMWbe>xy#! zXEwy6p~N~Gw0_&VQNdq-mtP}Tq%W24)fV>CLT^GPKtW3b=&>A0H9pifpC6_=j)xXBL z$0pDs)B_sWmqZ^%$H`h_scW_c-ODdUOirDYs!jV^{Bh!`JQtPF3+!=*w! zUjyu8MwS(=AQ$nVfa4Du8Ntqo)X9hd3YmUj@J5UXf zARDSwu;<|=cCdbx0{7mprG&kB(z4Ijs=kp~hzwOR#OBBzrs%_+Js(>Z!J4SY6L@qN zF}_@=NfZC6R~s?zvZ&=y;wmEczjMb9XLVx%(yR{dqBDgmZ$mjr9yfWM;4XpUywy6Y zg_oBB8py8Rj;mrQAMobA^Yp;iArJ_+(36d`E0b9m4ZPaCKW*IcE3XzZ^WtU+`S62) zY~cyib$1%ew$zSlq+YjreB;Iq=fVoKw5Fz}=}U@s_KRkr?HzItJzI?H=vf65JgMhL z5bq_6I*FuHNj2|0IuyU*_VeXmV#cQG>%Kl0z!rkkg#*l^+Usoc+EAC9DLL*>YZnv_ z-f_?ON+HjAX8^s@BB?#9T+n(H8CtVW+mogbpww7gQ}Gd{g(0(wXP}_my59Z;pZHbNT4SbfUF+|jmLrcaA|&s8GaXBZm}U&A!3SreN_ z3jjb?`dXU3b05Y+r2AG%3B&cqQP)DKb8?|)b45i*z54R_2ghaI&(d0Yy3XWnhGy2F zsYaJ$Q58sulr^bs3ZVuQgKljY+yXU;F=Z_4Mi3~ke0l~-bfzD7=%=Gyv9(8oTiY|p zfq7^&ZXyRVINH_(3_)>2;}W%O0`dnck}PXO*;{F4+hoM3bXR$f^2!u-{)nO zy0Q=`SE5_6m^p+UkNaR=H#~6r>)7{{(aF??k4d-a0mgdH@Dl~uBfRz5pA_>unJWe> z8k^ThI3l$U<2AuQxvM=2D(hJrqu06}9C8UmS{^E)TMWg`XK<2s%#+h&P$_CdwYBL z8<)x98K-Xx5j1L&|Mi%x)pIXS%>_XTFiifJ)tXhd%Wrw zanxe_sC92iX*+LI*xdFGrHAxVN%dEW&6?i~al%hKwdA}d!HF(amsYC{C*+ov2x5Hm z_4S18kq?2r;lza(@t(=dcN?z>{D30#$P@rpjJP%U@qg5$fJWX#9+c5~QQA!s@%^jH zJh;XPW&HO%c#w8bUQp^YuX_)j6Uqm4Hf&E3k8a7_9=r|&j~{~`b6>_C-)-2q<)V0> zRA@BnzUrw5L=mGdG{0Aw0hlHLN<+dxWyYA%`}^_d}YZp3@D(uH8hp~V$r)ItE+k;{41r=JRv5%av51J5Z9%#yFx=s@%@jl^Ip8=2oEp)pTujLVKW%g3g4?c4I+^pXD9 z0X;Kmpf5i%_HQ$mb$ias8{mSi&57&@%~sbX3?YomXN48hi`?kwGuHm4kp%O&=FzUG zhzLzI@BRf&fm@kYH>Zg?h16i{Mc-PZ53bL7;}@FPmkw?VNh&#?b~@wZD5Qpj$kwdA zI{cKS`#f8kw$en_;e{mw_6XuR4ek346D6TWqA`;LxSbsD^!FlYua-0gGXmbiappWX|87uER{` zB13FoB3O!wX~S4c9ofGOXcj!>ia z!2EP1>MhP^N;F1PI243;j0MidKZE)nlV+=2Tcw0EXEL;Ua61+L=i|6a)BBBwAr9Cq zox-_O%43Per^3{y4jSYioFRE9l~&ON^2fZ+aJwllvP}vU_wEtO(-T%GYv2Sc%(?tR z$6lw5ZiXR1G44)O56J5>wI~7a$byMB_LW{o$z@~~zQZ5EwLbhMr1QII`5(BJ*LZmU z@2~KjR^Jc{Aol<7YnX6w?qu&?1Ch;sXP#T#;-qa|pBk+0e)=vQnUaVy3?BlL0A& zp21$Xq55j^KN;$Gfc!5*-C)0K~MF8 zgZ)gx@$d4%1;oX?t{qf1BX_t@H9GvhSHYMPWsmZU;aORh%v!4mUg7?Y? z^x4aKj{=(52m{J>R#{<`CQ&pMPaok2#SUTXeZLl;b+-U)Gr0 zMaS#;Mpq630QX(v77yXc>ceS)xpo-T>ZyEqNnvoCql2r9AB)R?z z3571F(x`gtOF>kd$EYC(ssmdd!>q1S*90_ChNndmFHEjbo<=4A ztp(WJ5lSJ>=;38RbPPL)z3OuSMBlcE+?el(@2kE1&ujPP48nl^u~JEY=_*@C%hRV% z5IM?1>FeAGS;?e*S3Q#;Rquo^Or*6paK$A;_6&ezqxoZb8!Uj98XqV<6$`SM!0e~C z+1ba9aJxEsQn)qNfSI*$vn8R1%92OQmb)BK(|0qS^H3ypX|uv@Z8kAKKmT(FWcam# zsDsnvuaCs;Agip$E3=|lrP(UP+Xtm|s#3!(zmUC;K0vrFs!L1Hgtx+Crx$-r&eBte z0vDLm9|t?=T{kYHT^Ig-=ziwxn5Bg%9xaeFxGHoz`_^dku2B14EM?0@1*p#6H1*Q4 zFhr!E>jg!d)Ja0@ZSdw&7}0ofeLMwW^&2h5ykV@U4WUbG?0W`91RH3rBfNAzhY{7$ z*Jg1#4XpY~{2}rjJMHu2`+tvv{PD<;A@bz_c|LjSchZATUY+8WuyIbGiv&d#G#Dm4 z2)4i(%A*bQ-dg2{AT{U`c`qc*L6V|^pn_BAKmIqVaU2;^d|gM#A`V%n>o$lYPEX}< ztUGbH+)36AQ=58?jRyD z_{z2vn{q|UV4igVwWr$I_g|oF>h<+s_U|WDq^AE&34ad~RHO*0cC_Q*6FI?hW=^yN zwtLR+i;E*5mY;8jgzu`F+ycZ3mjnZa{HNFfT?yt|`!LaNZlqw>stu)2+Z;;y_p;0i0%uyv$sgg$Of z9qkA}URpzJ7XswfFeM+AAH+6U;)>;7eajp;e)dicx{kzh1O%7TdIH#UM-wshASjK! za&&=kQOAkdf`#j{M9Agb2Y*rwCpH_C(bCeoHPtJ?>rTP9aThBB+P_fuYSYgzbGH{O zmS&pmd)gW;^26`h+yF}erTjBBJ2@4U+bf=_hzBv*ckEz<0bfnP%9FSaNFYmkRukt! z-oeDN$$^5<)I29EIHfn*rdPZ}-Wy`oA7<%eijMl2{z9!j>s$~1R|EaO4oYsr19rI* z_%H3R_fKiaudrlY>U{88jRe)L^HvR#@`C@%5;+px%19R74i0l!}EgptMD`zA$ z+mmFM6IxiaBR>DqRO(5a3;@V+fQbdpy#jptOt=`@SZ-pI1XkqteRgbV)s1*r^uT4J22m9^Ov2S_qG6~q+Nb)KxeNe~;hgzOAu)?}a#m(NZ)LKG8o{WMR& z<1r=$wD;vpYV4@f;k!A-CFypKa%WAQB_)%viE3|l!L6yr4EHiJSVq4$n`-sF_{d=( zwthM!u<*F^fe4e=D_$$B+tVemtL1(&U#4#uwz5x0_vKJF)$apvUjAPr$8?yv$}VjV^-Ss-0D% zbcQQi{I4AIskUbEK7*5|_~GvIsIb}>09duL`^ z^2>rEx)Zy(y0NkJ0>WSJp1_uYwrQujvlJSh7lz;vAH_@+Fp}1$14>n)xTmK1CIUN{ zA!$`uW%&Ug25nMY^$ie4cN49|zQBb_f}|nNX4cUSDF{JFCnnY7rOr|qqIvQ0l;I~M zofp|~V{eKLuA*3ZSH|^FO*(Vv-`C1T!CwQtk8x+-4Gn-$&>Aw{iwqjhTbOPBm^?K% zJ1eS+mZZfsQAk|PP&qfz*8h_U<*#k&Z4eX!nV9}yl#Hox3i`gm1_W3};rL5&^*Mxy z>~!W78Fw`Z%a&CATnbOn)0WF^X#nL&>hb!oU}vh%dW2}OLw6HR*PaJb`WEL<1P_5D zJ4FH>pBvb|2D9B#7V~w%NV^VVqN($T;2O^;>W`Iw@-_a(*USZxYhWckx-9ySE9+P> zyjd1IFNG^W*{IP18=eO7H~l=_*5mb_-c;e!I+osj*42bAq!PrUGPC>bp{V6SV}MwE z4p;?yVR3gqAviX+h;Xqzp**jgN&NMCb}CfltwN`KVqKlmYkC3Nbg zA3A(G8bAGR>=M0&$4|qC|Gq*~*s*moQE#vC3bD@o^p88>=Hg%{F-Md_inni9#>QQc zK?j}K->^mLED*`^485eXTqrOFYY8o4>Ak6PwFJ@rQ2sZ8DE&I2jsfohbo(^F$rw#4 z=dW$4lmslT82zsTZMWXS3j7*m&pfCfXH30JB7Q<%@(sWk^3Hz?%r1cV+Pj(GI>SN$ zBM-lOq^g@59+}IiCG1KVG@MFtuN;Mir=w*s*2rmHAoo9YPz-%2IXjukvpc4?nNRbL?#ibT6R#yF?klLn5$!LT@Q zJyDwt9)PeQ)5rTkUAf?=w}h<)^y-_81<=0Aq=`kDY z=k>*AJHz#jO1>e#YlcWEv0`Y8T%|%hn06-7{7x66EH9!i6cVb_Udpl5(^IUS!#Gd$ z>YQ_N+R?9Lpp8CU8_QB?H5`gVz`+e6&TsFyS#dWA+tr6ESbFkOc#3yUAyj$FinG7T zLQ$8JjbpU*SS+4p2`0O;W{a{0GE^t5y!9MUmtkJL8kQ?s!l&jdS zgGDdk6LEosItNwq>kQimc#{r!s!(MdD{0T^CPd>IUV+)tgTVB1THRdKpA;uGx&$*% z@?D^XBGk0@VuE$o&$XUDlc`hi8o0f=>!hcCjFmyy24Xz`=^ATmQ#$MSMq#m(XXx$P z%p?(NTb<8Y=T&Lv%rmFq0*_7mV#Ei=e?G*`K$w2!7-|3vx}!wb-$|P0kfn8R$8O>u z2$!Wc7=N1aBf6uU888^o6mU88YL&>jutr_W8l6hbpW9Avv5r(@-z=5-)E?h-(EN{eQ(V8cU4 zT{;uEy8r^<3)FLf)b9S_qo8Q?;?nH7^XCVlefjgLZe!Ar>-T;KA{+BNm*m@nIE;U` zFPM0~5DsLFo3Ig5CHK{;KUx>J(dD=Ou~{j_%X@!l_mCnS4JwLzZ&86en&;D2Pk*cq zZv)9@DJSXPPrjwB4)GqMp~RiOFlVVc1XPj)JmG@HqKmU@6}Lp`I8{HUPR{ioBZjha z*5o4tk)O!!L;1F!!qYA!+jVVPp|D-W=z2zIe_H z0?C#V+NbwvF+W2fI7&n8_0o1Dwn@F`eOQs$XBr2KA{+1)SOZX;q&`pykzY~=S6!uR zAx_#{r6UI$4}0dlXbh{M!mB5snzb*}qucK%(^vOC^!G+Ilu&dmL48)4;RJ*^dKjzr zoHMP4vR5Hl;x3&HrQ_VWjLAB@LXYlYksr?b$C98fircf$88U%eL?fbVxRDJ+z> zTt*cL=+rY=Upa(*ncIiH--Wnmr6|>&dVD)fx~%mLA%Sp+#9(KlDbl?xg6+m4IW6>+eAEt7Q@zAblOnoQE^Ax*hr6Kgf{I{F5D7@P z9tgKj3X=46r|%BmY8uspoUf}Ei)Zbh!gSq&zSzSJ6-bY)& zA}bd~Y<7QZr>|Wl5+B{TK<)R9*K|Z5J(PJ=VR&Bw?GAY--Z2y%ZRpi)Zc;8tcCJo) z@%v$t(zSbg@MK~3a;^AGUmWbV$$wEee$-sIrHhbkf;r^LMXg?}mR&SG@Br%>R^XTi zV{E1g{%Kgh1Bl4=5`F_@}!P#qN9n z3rR!70MAMn0NgyEl~NKpka0P;K=!ldb=!QG{M)YG*Nhgo1DY^#uHF>Q0PQ$Hj1u%E zt7~PqCf`3*G6Dj$vkRbRq07ze|G4j1+Cnp~r1a8RF;F5Dm28<^%!Yz|s&w^Eb96im zlGo5(Piay1?~B(fV@33&(Or@w6#~oRdFe&RZ{;UZMmjk)=UtC^wy71~0`&qNgX-dXZ?Sz)w zNPl~|;_9Z(HVXvS><41W0^CPt*^k-rXx3JmdHhhZBSr4vRllZK$oMTiPF?@gV{~Gm zI{nX@Kt&r!brz|=WLO054o7Go`J8(={RnDJ-^Q5m{$tVRAk||!e7AR zG_OMq_A;u4yh92y*0gHgyvSla(BvFP4G7IfX1gKIL%mY3-K~Pza}YbS4fKH9?fYge zx8I1?!?gJXAgXpM1?zUx#g6KO@Ux|cOOzuJB6L@xF{|f=Y8DK^Y}Y!_vdn5ymy)|! zvy*np2+Q~TqSUR8rZkDJe2o>?=>ASFM5fmbiOn;S9S)ybOAqwXZrN zZuG8r0!OYQ(->qA-vGf&8@@&^D}KyF`~AcIwjgRQNeJ!S<0kbML6lUqOfkRB&&=44 zU8V|u#nG=^0J6T7<2>|MKfIuey36AOH0K@M&__G|`;qzW$bua5&{SHRSey6+N=PcB z^8(O#Bz6T+Dgb*BkN&Q1cVqn_%J%b10sW0(m!$0xH?419SX=Sl)Zs8u*#JBaJHc?z zamr2)?O#bXNmo&gvShESQbB`*PkmAgz3q<~OXNHTU@(mVkN9~CiBBt)X+|`Q4fqQ3 z4hhtc6P642UMkCH9*yf)@ADGn%X9R1321uJTpOTw_JyJH-iAM}c>h)*A2;{AR^!CC zTrsV?0=baynhm{L=+z>*j&`OKn+ZuELGA)J-*0TIbV!#a()Hq0zg^qci7LmyjOi7AN*D^Z8t9{bUO~XldGe>e}hYPnmC_aw%N4M9S}(+{`(GuhptqDtZ93-%%<@c z%->Ash_?=J2D}yA!T5zW%i@hgXlZQkTP2(ez-4lxqyieij;CMi(GPi=ScvK6FUt^oK0q(Ps>l7!x$6>_C`{VK;LiGtGrr?jmrCq?;jIaotfAUo&lFcge${ zdn@dvB6F16PtBjZ9z3R3)4dG;_*iR+o(RD2Ky>+Ll2KnTK^@xU#Mr0+Ll)-ffXMZnpd~e2yuN~-xCv?dEzeZdoeKF+rnj)c&S$n z@~Q7{aX9%+yT*tbsT_$spHrNdhr#!4?1HXuCAZX9(YCt4(8*1O34jR+7&GjKL}!XGj$?uMx|tBnNMubE&3 z(UTY<0U{&SYGI&w@oC}DuuWoG48tuLlA_l8Fkzn|Ss@H64vnq^T^t`yKc|``<}=^% zlT8y{gr_O&)uLiEJtpNyNtAW$cDLAbC^!?@$fhmT9V@}yCjKwMCs=;ek7&=ab1pUO z>BjloKSj{-U*1HawzVu1+)H#VMISb;eH76?WjN{dBE8;QmNP8R;o&W(Dys0|9H0+A z=<6=pCUfx%d&l@2{q!sHytcd(w^;#|SVy0q-hR>T93_k7&qONGblt1gt*zybABJQP z?~VfbO#Qfi`T_n=;S#tI#{A|_7mwX6Ws2G^;h*}jYk|RQPr4_el5zX`ZH!9 z%2&OJtKkXs(!~UeU3;iEl|T?3shOdZ;D<;!+jE}% zlT#TxewRoBs&qt*Xf&3&_RI3sn2B*~f1p>xwf4QugYBdf>J%(@UomzNuIInPILph~ zF^cdUJ2LGY273K_q(oomdH#HK^31b7%%z+cTJN6prTAzEf4M^elE`S3>e{EVhf6x& znQvNpc0PahHH3uT@qDZXZ($*&BNaA`B78dM`@q1!;!7+@-F$Q(b zSGh`JHJn5Se?lOy)9e11-~?)WV_FyBEG&{G(0D?-Skfl|@fW46H8Pw768K+q|KVR0 z3HszXosHQw&c)%c<;!}R*REZ2Bw(9pEf+YWkD9B}b!QK8wpgD@qNlA^D>MZiNSltB z^yTO8n#HrTvY_wJI_6UE^3Nravu^@bXU`>055IbEA<-~_vRv>VCjn#l+(*Wf%sOOL z;Z?X|@7yS`tV1N=nt)qbt^4p^=1+n94+Cbl!JBD`8&yDG$V+F$1k0k;(+k2hCQge4 zTj@>d2W~eItY!4kv)|3GnCks-aC(4~#Y&(jqQO~o88uA^AEbuhj$1*{5K!^dXl96Y z>2kTY67X>n=EMTv@u6IZNjXg_zmmYT0d@|f>=!1++gVh<*-plqO*x<5c# zyxrkb@HYi?){u;1(rHXZ&ZP8bN9yB9~BTYsb?kuYnk=U_0!^JO#C9b=~Yg5An!h4JfJ&Jdv?R&jQb+@O;o9i@wpcJhc zJ08+ina%uOq@KpXbC{&zNF!5?y%anzw4Vdw3~7iywvSCxaq0=X04C?HWJ}uqV2l%n za_ityRRa4WdAOQH`?l^?yQuTyfbRbh{iHEUwS;GdXHrUYs7fLQMVS`mAh54}Y2P0* zzL%cINRmo zY}d`(GNj>2K?2z0+|5lQNG_*I*5U2cw@N~I)Cu;Hai*Oo-eQM94q(r_B0YF`NT>w? zf23wvsdIHOM<$?I@40Ob3jp}pAvbUy<>lp3)2;#OUFeppW>aFodP)P>R;!lj-Q;9S z4kwcbMcEZx$z)foY1*($`VDQ%IngvrQ*x^+g9o^Md2|p*Tp5qaRBcfG*b`jLRo){$ z1eWKkUQ=G<$9=yh=bMt{H)@e2i~r}r0WplM&x>=%e%=O$-R(2(!c#!RY;&*h!LZa> z@=C-#9rzOr%1})SpRsx+RDp#m9HO8p942G(qVvZPVzC>GNCzf9u zgKa8y7p7od)c+xFvk!$W{^k-ob7BEA&+^L?j(rVTUXT9*0c9}L>&=g7$OdawEj~^& z`luki*cvdifu2BLk-%39lGYaED>+-*qe0ZD>3fQ$=Z>@dPeYeHmCN&hMqx*TBNNXd z@}r$W^920PB+!QnaY``HVt)n*^BTp7WBV$>1Kt)VzDFlhn|mc%KBj?gDgm?m z4Wi46akzzEWNM(dX_wds?SoWA+@PS#wqF##B4MYB55y*u9A)4)sM9+x6I3HqcVu%7tDwyxtAm)teXV z?U!z1@RqvF>bPPxcTMeU4Od5Xm^CL31A@MkBY6LCe%$dmldJN#r+lA|6C-VscRDi_ zJC<5XOBmkd-`*#uc5`qLhhoae!Y_$Z?A@FRv)G)5?5HL2C{s1#%rm$CO6J6+St9? zg8)9kIl+V)XxPen5(02Gb29F}U({UU`ue)2O^8yKx zjJL6V3}(TbciyN7vyj#rnMK-vz}DJD{@}cTpP;+8C&W2(Yc76i+dZ(!aBO3olVq{l z!M+bzc2{NTN+a{u?q=BI9> z&>LqRVzr)^vYG!3vgG={J)~fZ8dhGU3_p?F#)q*qdYBfC)TO(ac8}Gmwn@~kF@FE> z5(;VdDPQ$A1~!F7%$iIEK^bWduybmiNfwj5n&&WW!7;r$tRkOM-*Q1nyQ8BA&z47M zh}J@9p;>!k@xuE-Q3~~;XH1!2p8#e)2~W-y)5+B0y^|F5XM)6PX;wCK3Xq{z<<)QT z5-<5~TwJGVM(iMXi6Y)+qPM=!r7ag8{t*_GY(F~;61rE$9_9>(x7Bg*xMGf<)Hw0> z<>75bMh$A`IQ~hsH;b2(*6%v7)GJ3l_-NrDWqR@VwC=*V{)CtOFXtX%f(ug`dp;4p zxT4+p9%LktPy`N5Ssh{Z)IX10MM_X#TtHp=Z*d&=6B1)wwlIHW*?VxjVItKmJw+)z zc?<&MYR(h+3_g|8s-C{8U2v%c(wKZ6<$y++D*RMh(atWUIM9|@NO(aWJO@hj%9g7~ z`xofiIROUAD($Mk@=sgUNksvBmJPA&tmJ9k&3KO+Ig7->JjABO)Kz)K&S3JXYXuAo zS6oYW!X&(&`m`QuQJ_J%H#$p!&FLgxO_~O$iCqiBStI#~J`6Z`7Qrq~(3oXAb9@D$ z8;ZP=yX;n@qoYZej#v;i76tX_81k;td0fO{@H>7Nk5wC#B{@D$I5^!4PJ^yn310K~ zuV2QbIRzT@zV2b5q)C~chUU?gw1vrr*x|@Uwlt0OZu*mkuzt5f*L3`~j7&{%e72>j zHsiM6mj5nuy1b4ia(%khRx9xG)I$tDT5*An_Nb2qG=GLokwPjQPg_S!#T4mvdyZw| zd*)rL3Ro4ENRj~t!y|xE?@0*ifO$$BptQ!4w2s~vKa$r##Vjjp$RU3?E8>ybF?
    iwuK_S3=Uu#|;y-EVsbc&C+)Um3wbgYZm=nCkf&1!)c-Q{tP*sb@3$-2|+ z*}|CAs4^(BeQ(aJGq(Xf$lje`rfTf%TDleL?KJ(OzGa9BCHu^%v})S*M4T&3T(Jee zOLcf&lq;oZS3n!w$5mBna9UYei9w^u*l2WYZf@=q7!x^@=DJ32Z7V>fpoNY8vd{<`}`?qMW0DxTt5A5#Oo zu3Ibynwl}RsuoM2l4MS{0=eXQKO`NoTW3(y@M?$*{D$*j6M)B28Z7|_K?^v^{UaYB zQa2bBZ^%qQ_U&zyJr|^UY0S77pOE^ZSow2pQ(12o=aptSpy?!@Gp3)=D8yv$tA#bc7g=K9#f zvZUBD1o$t*NHIvX>qN!QUl3$mrx9s+CjHUV%vNegeb;u;E7o{wJg7cdR~T^et8!pG ze~gWXKQbGNH~VHpZ;X6eE5sD;k3`U4S4AE(ZXe`JUMtO@{U5MU!!5XQObSj_JnKt& z^LrbZ+w-1|W-0SD=oy+RCK3h~db7%hKI#(&x)oZK&<)Rd!ENK`&CI*yCgOsLrJp&`6G}oM9P*kp7^;R9+bnhH z(|(c9<2Pi=Wfn(lW zIyjdcbST8_jruW67&sf3d0q1;YyYO(TL?HwaFHeq-90L*c5&ii~%GyCk)1g ziYHJj~`;t$+Ttn=u;c2#&E(tz}jrZ1No7=7)yb;UXWa}Hrbb6{?2r>lOF!l*N zQ{ByUd5q!x73kE&C$BHS!pmWZTPwlN)S2sL&r`Ow&~IFS-Gf8{$5Z&ifmWuz zN3Q&+Lvg3IM5Jp%3fE1ppZfM&R5}3eba$dP>L1`vQHHO?YKzGRK<-Kq(pU2SbZW6P z0V9+q0BzXLS;EGS&d%15Fan0CbfKn6X9`z`2&RB+Xff4|-K0U4`(r^qxpR|k?a2?b z<+lv{4RkavgEXq0)#kS@FDIP&z2yf?4VCwY~>nWBBJw9x}W4X}y;EOHUv9|v5tf4PnIj3kbenN0=c*~*} z$-mWc$o%ye%0<{F=R-_lvBDY*-`}QS_k)6joi5_pU{9e%oR$+#+Oco0xysI zOM!iE{SEUfBdr@`ADU`+#ffQ)aRAdyG2Q$jOPV$2<-FFV(AlBb{{VeC{+567EKKG`&I5O7?IHP;J=Q zrC9eS;ZnE&@8Zx;Xwq_s+YJ13i-(RKg(ToAvV)9@1T|(bL<^pCcZ+OkV`s z+{*lFiV4ODy@klqH4-J3IE_%ugEP+#CF*bKffau{{9Lg3hIY_t7<-(AztH@%X_s~_ zkeN*xqVELv+LTu>8D2tB6F!K4C;oBo__ zH^AyJa&{P#7Bvu!%{85xjv2QRXxItchR7MK<)co+Z|&PxoE5}qG%a@xP+N~G7k(y! zLw}amf7%Q5Dxr_S5m$2!y%^}ZxE@kg>*>!mx?u=;>5R)wI{|D=QDL5Z8TO@GBgC$ez&CCYiu{FFx zl|b`)R{nJ3Q=><=5CO0oUM?1p!S&|8%e5bAYpd?!m=<@cFy`b&`LxcekkXc5GX@K% z&FUI7I&11EpCNEhm~w2~24nKwbkYsoRA@k#-<8kI<8z2Uf09D~L9ku!#k*8)!qm@W zWE||^I7i2{(jHKm>@LSqk_!s;n8s%uYf~?Z^1A8o zTjL~ZMnuUWLGt-+WTwWGVkPd!H(DCm=G68+Xk*YOJMXmGTc#;qG4|Au7 zyCx7vr-GXNRcA{_XvX_OzRMTb2On2zxGQq}7d1$OOz^R95}${{1P>@BW8WRNf1a^a zKq8Mu9;B52hyU{5P9eEC3!JR*^{8Y&bp-My~ zysm;*6j+^v6#L0&RkL)u;JMj&W%QHiuc@9g(e4(${)Z#B)=7ixu(`!CQUzAh(MW zJR~$`ScwQ$gO`+O?PFhaD(=#&IcpWuySn~_V1&~g?p12{)7kP`tqd4ZNd*M!oR#|K zDv!~-8qs3sd8fxHyuWLY=MbfEC6Tl-#0@)cLb_QOM2l1kSQ3VGTRFMck1WI49dFt6 zSG0*xY#b>Ch#zj{YgKa5!cpdeo;WKdWJs_Q4fG=H|K!~Hyo3|` z1JQqrvw!m;Fax3p8~Izm3AK0y)x^4tcTBs_jq@plLedhb7C+ zC$@Te`Hf#6=Bb)&+5&ATGkp9W1*fOc)M=^~7+iJZ@R``nI`|0auSTNcwUp3iKsAF* zkjBp_1}kibKW*xzc{*D)lRq7dqY2p%G(e}2E8n7sHh^9?>rN;`WWI4f?o{1kKRh!M z7+~#cIXQLabg@kfc?uOTL?Q#4KHIs};uzmEmROAHY)9Y0x7uA*wWOOhH-Cok(z=t$ z$x&ORd^LOqlAp^`=t?T@vM=-kcx^-DC%3)!F!Sy}RROVz)hIk=oDpF?6D~U5jv)+) zR9;9_Z7$uf;(KfSN`(diA-tu^aa zArnh8=`pOpxeMi_qN*K1BV(7K&zKd4vD?G&2k$GUL&zhGtW}0xGhq<~<`^Y2BH4aq0PsC`&MF zrlZ^^vSxpH?rTxl<&vq#4C(@d|L(&zwl#56Yx9)FM|pAVX`E(LE2^c#Ak$i)i3=?& zg*Nmv;p}$G3DF}mXdC{9)kgR&+TPSIjAn124)@F{K2^r|rkJpQG5HS?jQIROc)^8b zFJLir6$b>o_jX(X?#e!$*czi{X>&p0z8vc1&&mr5yTDfQ#UVbFbDqG6S66xnAM*`* z`9!KJnrL!4l|8)k%Di~{+!d;zu$%6k;O}tfjB()dV=`EknJ6R-xTudstFrQ~oOJrD zdUU0-_e@ZI^fKI_5n@dQZVHihUcI6z#;L zbkXq=I_icp6a1!L-k(~#0s*dl0uk3^56Q-@FEe!~vysvDf_pGva^fNB=u(}$Tp46_ znys#{i>8O-G&^JHDMTPuw8kk|Mok)6kHkgS5e*r))%#Os@O=FI_N!9CryTH-JJc*h zY2-~hQ8XZcIZu!Mx#6clktZ~8s_yVoM4pLjRd5hx#GD!Xk9+Un8QUZ(AgIG+xfdH; zER)u4fS-^5ZEQQfGpo9r#^L%7*b%DGtIKyL3dKolr!R?+T~70A!?%YKzLQZRcQa?U zW}X)ccNMobjhzX8x_yDaq15x9ZXQV5rD9soo^L|Zx43SO7ZZSlusvxOnKNs}86yUT zVlIy;J>IB1Qi^zTlrQN5(Z{nEXQ_@q)BnUx2X5x&hFHDT$6tdfd&U*7@HNcG^@QJC z1bBdsmLW*1-#vuKUr==vU56wFTET8Sib{^@(XMfD6ltX4leSf3gPhK)ameW$jdQh@ zaB4`<>wKz?vVU9boPGXwrx%){?2FDE*+L?|RZU)kcFEnN=DYdnRnPsx(Caz#)FC`J z7Zg1&gDCGWLwE!E%><##c6^BaRf>+{Jbcdg&Elw&UKr8LR#!`&-}gY4{eII|R(mi0 zO125%X>+!oi(db6&inc+F_%ROxhyEbbU7XF%_7dJ30IGPzLw+ck2;R#h~PM77_wi= z|312r2ScS=7SuGXWL+@}*Xlp&7d2I*oL^BfOfya=>c@p!D@L!r)uPXwH3q8`IfYr8 zrqf9txAbCy=N-EkwdEJ58-9ER(q{YBimb9ThkT|Ce!jawT@#L7hzi(W>KC~9cl+!f z_}5S2pXMXa41nEN9Pb>|yBnP%mhPO$21JrLuakN^4x9dSa5)nHpVDdz;1TcfF;j31JC8@e6GZw*F9 z6@9BRh&jQ^dybAy@5|_@)Yv^;mnL4m{dXfLcfzX;p!8X?XK-*^t7so|f!5wT%ZG&>~;l6sN3q1_X<%5C&S=gclQY0@Z&L4tCIShlK>7(xa+ z6?d@Us6j0bA?&>;ZVkZccVG zP2m!ZeWBz~kF~~Tr_ne&l~G@Si)0bzWW|;%idPCOy54GoFTk^sfPsapWq=an9RMG2 zwnM&zy{T8#F}yR4Wca*HggeCnbY6s~@|GmA-Ehf7#3~ z=XSK>J>S@-d`bRezV34~vTD*%s_mvz6l0N>gyP;_dV2HlqkoG;%e|fdQj8uu*xQQg zxEV?Q-t2wOA*r0rcZj5^<$^%TODifTcRPiFU)I&u0lDmesI z@bKQfuiwCZio>P5m{5Go#x{}2t}C;xShcU@Dh)I-ge=X^4{^q{irC4SBy}I74lar> zB3Mhk7>p6{fB;8imkzc4aoJ{S|K3Sre|LYu(Hcl`CE^O`rMUE9;BPGe0`nqylzOIK z3Vwa{JpaVP(W zi|}$&q_X7ZGk%J@u2NG=?Ea38GAE&Yjc3IMYhGeMe2NA`#*$3oFB`_PksoEJBz(hL zaJ7DpJiJ%ivW91%yqJK@_9>Hbn-15>D;9E;64&KsWAl92H+u3}D9`?Nx z`L^f~!n1KH8Z<_}aW#}Rn(Wc)hCT>GV?%9IRfQK z@MUfD5=#nzNeJ-nB|0Y|6NSyxU}pWjTVja_Y3+5CTk4A@CWEBGCmVH?%Jb*^-msut ze*PFo6@Zh@8)kvmA9uv!T;%}YABOfaO|ws1Hov(zIf+zsdrXQ5*S=DVPVo?$gCxLZgG)t^ytwi zuo+r=-`DpIF5u376_>=_Ab#3^GO-Fo8ntL+PuC^0x8t`8A6w#V7?UaQFR0Ty4x4jA zk$g>2OWI9zTQR|_vZI_FAwLxlmiS}H63@7PE9nu`8QY18m`XAp^QG`5(}Y2E>M)vFAa~y_^!$f+x zov&dh>xZjq(y)Z7mkT+Ol0?PY4^MATzu7Hl*JKO-i4XrfNR3AV><_M}rJs(Sl2 z!3`qn;ARoKWGAOqa{7+?wp<$naxbrEP3?3<4hJr6D>Zv5@^@-~(YBm6iMe?|rooYN zkmwP662tf$c-j6}yJJgEI4Ewvg8%p1>j7+(M_2xTHvFao&8&{kW7r`Ug{Ko{oIp!o zobs8=fb zcs*&dUG3Rr)Q^XjxI(e)9iPdZ&tzHQb3P#3JYS0ZU<0r_l8k$ofoMLNT}O{XDTUM3 zFc?o+#%^-K5PTi%|E2j96lQj%7a8~HFnCtfCa2;%X~!22i3NEZO_3#N(5ghDVcBKf zx*^sCQ~V}qbr(fZ#{HuGU5Y%DE9!6vxMzyk_DsX}wcL}6FaM4RVp0wSF}Ed-ES@O@ zC}TXfU&N=)nKiZyf|@uj7n0Hq!k+USyL}ZwfN{&xNHoBI{Nf8bd|&x2C0~~y|L)5% zpC1NwQY`b{l|a^y-JA`qDd!6D3+p(`8sT zeLz5a`NbK=kQ+{V-BG~uLL6V{A#C@icn?L9kl49I9KxfJyb*tP015*z%a0H*pt~2` zG%M585IOJOxYg)`ou@1w$_`&rcbIOIaR1(!raP<=uZG3WPYL3*EZvh`q0QqK+Aw5t zT?NJfBM3mBYkcXT8hVJsVQ}Xs58+T#D)_UR5*~%Q*c2jwIgQO&#(`i?C+RU^iv#WW zK>qC_`O^(Z3+>*LxbAn6TL7_!2sED5{%5TJTjGc(J)~Rk!LE-D5HcI){TAYfS6Pxl z%BF|1cRxGcn-0Rk%n~J}dS1-msI+3*74|EK6PW|mgRam9rTRVNgz*_u%c)fh5 zL_*#Ha%0Xe|J{!EXSM_utBxV?gOBp%VZs@jcN*K!E`Q zslQ7%-z4%s3)Wa%v24Rw{Fho1$p}tX;Z2X7n}HI2$1!*m>NItYadX6bz-mZ?|U5_q>!jR4{K^-9ivlU6$0nFlscF-ogm_Um{H-Pa}8HrPHvmj12iLgbCcGSz+ zQwZUq4)#0RayWb7^$;9zDE~rOqS*ls$Pjf$wva^l&c<`Kubd$4U6Ok8+4{nQq67lC z$xhgdxV)=PL`9fQ4T2_41IRS&4bry;OSJ7;NEV7oRAt&-_Jq#S(S76btFPfix`sUL z_q={hI)8r*$lqR@JAH^uJ2jHUA#pj^3XeHDIW3I4j_6`%{$Lvm&AJPEPQt(v%Vc`7 zftKV4k5d$`qI@o_!cT%P4|H_oD=I6uPnoWrTX_7z@gN5CCeO%G*6 zgAqsKT%n;;jQ=_2eg+)FrLYUNEDKwPH==2vvj<4T|X}&pl&T8k86gB)$Qq!Sw zXn*F3RVxfWAmwxM*_IGzai~Z^CA)2T%?*_~7~ZjKfA~z2iI_Z=48G4;vJ|Z3e}9Iz zHIcXJ>c#awx1SGIPlq$*6xYP(Vtl<0KF%PDdmw_u_^b5F+r@zsHTCFx=X#>)nkJf{ z1s7KRH%a)+1>xl`I2R%dlMPQGxqGTI2ZEIBMBijh9K6OqQMnv(@t9YF3Uc9My!fLI=5ToGPxj#wDFYQuOnMU2)^~%o6kNF9XS~HZ?_dKd+y_aq|~G0;@S{jN`)J0BK|bq|uFsBQo?nOVA4~E7ZDszcT3rP%RfxE5A>%6wZvg4gbqBeCluOknCo#_I2skb3g~Ri61V)0L0U zP-U)L4U#c@O1soc3{1?h^pSPc=d9f2GdH+b8fQOuSnWZz7%%QBcsfw!LSv7YpYxd3 zr2dqC^UfPQswq}{E1!a^y&q#a1EREzg8L=Fer-?9m`sdq5(${Uih{1+_h-Xp{jW4@ zobI4U+J<0Ys?w%AD7ui*wzIzdiF^J%!P?2_`}}-73}uA3Q4S9VBnoHozc&b+G*DFX>p2G3!$I-7-z%z9t#nNdY zO8(3stSt#InQz5jiO{i}PAt_e+Vzket+JnNxi29i(ijY)hMW-VMe2i-T%)C8DGa|8 z*5~pm2%N?55(OJh^NqAa$1;i0UWtYhXiWibuV0UYCNa>_ z4V3{N(ac7CoaD*1AOymcA^y|i*YWZ1C+Tm{#|R&%kHujD+UsIx_bVIi7ni}VpGGk9 z`XvOgd}{7`Vd4!zr)1+a5?)J#+>$pRJ5l-fG5vY`e%NYUB+EKzDF*pgMu35X%I$!r z8kkA5SQ8(OwS%A5A(ue_aLmFu*S=<7EO@K!LTOXVfe?|IFh;q#qf>eGVzy7{qN`!T zsO}ydJU{xIVDe`utuM(ltl7KOzVZWhya_JLwM@uKndT-vb+Yyy#@6<)Hdt)aNKk%* z-)CnBSYuijX#I>z;bg5CTT2(#)4La3=6Lt<{bwLX&_GMxLKCNUyX0XjujW2im#-RH zshmrBl|VNLghX-%U$eQxZ99I~T%M`K4nlza;it?O2_OQW!%$;(@4<^2O#OXp4qjCi zicB}wU|q;(7`=|xtbb5NpmEUIy{cH#R1|3+yQzJh{4Pmvy6#djA&k7k*SrSk54ena zx_VPx!RVh;^G?-^cl!TO_T}+V@9q0j6k0H3%{q!KVW{ks23aCIV=KEPAzQW-y#) z%K(@BKviYsx9u8}I{x~fLs~g^wfy|5a{|qB>q1vO+EQe^dfWyaR^2AIo^}D_P033i zvPxhk8zVWWO-#G)6d293d##-;rj<5U3w1;uUQkzse3_afoFSWI7VFcrfKsh=a zB54z@lPmIWc5J9Yr=f1@H4B4A)ZjwwS6P+ zZg-YfX4g0vj0#K~1sAJ>357(RV>n!NW=@E?BWR)_iTVYnhOgw`P8KE-LnLQPf;E`OH9L} zKS`ajzgql1K{;|CFkUa;zy8Dz0(CI}HjC2MDU&;?5Y_c$6SNi{ygGu~FLzO3+hJ*Dgw%zY!}vKdG6J zq!+CP4J z^I)91NPQ^vgHS8=bOG`fqSZsoSclT^K4Aa%je|}8i+_-P*toytvNx`oCUU3SdG$14 zQ50&Ldn;V!>(q{_MGMk`X|^yH2B3$L5)yhE-4x{(4kY8b6yn`>f^lBr51NaI&!iD9 zIOSfj_O+)bHTxj_*OcdhA`y*~pRKK@*h5ntTN3Hf_e4b-hAG%ZAo1f9tY7gUD}|pW z@P3`CL_NfASP0sAwseWp*{=6u6N0h{f_3Tp8!gQO+w8sJu_^AOF{yilms2HC z+FwJj~eZ)KC6zPz8cK188JWVvuN) z{UbTdqJsa!Gtt<;8@Fh!T%h3vVE?2gk-9_`tkL9gu6U+qh`E8AQ4 zR3`~yw4W{Z2dss}Ga>Nj+J(LMXrdjN*>%2UE7rCd77l7No@74Tn|J)^-nh zqZU^;H$kG}6r>jHtv7S7onfI3_(AUUG0(6_l9;G^I@kNr>eA)5Cy$QD+MSZF_EC4c`-eE9xAB|bboT=wsatwjgU(>a?lP#ii1eC6^S;mvfjni!{W2cLxs zyhNw#1R;%36^k1>F~6=RPS>@%J&$YTPxE1jJl|3gihIkM#)5Rvz(@%%X)9RbamElr z(A1L1JB8jWv41rrp^q=_(Po z(id135M&c^vP?6rk5JBMAQRd9D9)A#b&pxJBNb6vgq zA&n0(#YPgok8xXJ368|M2mwqfSsO=)a~g16ouB{t(=e5KhBs6F#sn>H>gDh4K+G6F z+y$B4zrd06vd@;`maOQzLT3SY+Q`0fU&lr1`E00C)zHTHz0tCko=DcZi4=V|?_NL~ z>DImk@p%i0J^iWHPM^x<*w)J~JY}J8KtnF88>GF%;NZ5che2 z5p3`$HFXGG*E0#?B>&g&^)FcU?+Pjm16ktGEI#_irSyM(UYL{ER#4}0@qvUnfYdw! zL|O#daGWo7b#u!+IMa!?Cgh72(BYR;VHe1woo8z7TkOHSXy?S z$B{ta!7nGgO+01W^~DJ?O~hGEdP4doogsnU2!A~)5c&=I%n_wn&Z^S&jGeKk-dgI!@u#RFX1nGp1;v+BId z=NQCJJ@JUe8AF1A<~cCplEO{BVwR``Vu$EhXz_vJe~SmltM1RbdRV0_5C6kw zg%JU(rg?e@z{~pM#OFJBp$(%S( z*vgb#GQKc&%_SHzMPk+#M`9Mdw?Xd`?Bkn&<`zWch`Rbi3C%#pUnvX(ubuP$kBzjS z&92I|9Q)4<>R;|rwpRPFrug?1r`SI~ZyO45G;ERC;T*K%&eUF~G%`=vL?BOQDM}$V z1!FeJZ6W}-!)n_*zr(`KyqOztSCRO*W@pU}U6dx0VsQ6c6YoIl8V-%?48YfMdli{& zg}CH;rzX7Q1b{EuxFlWj;@%of$X5~q&wdNAvv$wBELwG-2u?36 z*>CHj?v0+=B#JQ|j<7rR!_<+uE|D=bf6~GzWrVwhw5YxCwWNRM)s7*8>CXCKO z6aM3#h{k~bFm3^bXGMA@6IMhD#$$MuA*9jFKHBVki;Gs0xU54c(~qn)!VU{E8HtZdNF7f)4$D3FoarL@^9Ykl z7R`dR8eQ75^hKy+AA9xuS1gj<_f@DWP3f^OUk+6?JWI822zvfOc+0cWL2(p<$BA`V zEV4bIXM+`Ux7+qNe;kt=H!GFtqI7Xjo6Ok#wMi@e1V^mBSU9Kq31~=gfTBn`|LZ7? zsK0KqocB}t8Zk~8A^|=V%{`gHdj!h<^L$Puo;#A99_-7G`-#+u0O}qV&LcY@p~&;C z30b1=VqoC$>oxSZmoZ~zFf5wG4176;8d1OPgP-*D3c)F1wt>T4x&RejIIg*@{Q_`; ze_+2sD3uo#&vb5Qe}Ei%6yCD>L!+ViQ+u^gIe{6R0H~-wF66*+QnhKB^Z2v}Jy8(= zIXl~tErV}~9W|s!yzB0e=OvsKm5QsVoN3v@LI56UUYF%j)1S`lav&uNAG^$nKfoo0 zD0PotSPQ%%4)Kz^pBlp|dhT)1mp{a9;2O&WF$LrB+)K4`#s-B=>EF2rpeL3(+P#PU zDskR;#i3QvrBsEn8%SP}C9Jin5+(vHZ3?bPXxRT6WKWypiWw;JN?8pzFD!l2vZ-dh zb1SbE6WBO4v8f_0Y9@eU51wpknWvBJQ}?H-ob>1H=`T@~gFXIs@{gY>KGF&y%Mufk zYu-Bn$D$8EXv332cwr-H45UX z*JH+hrc9QdPs@I{>$dCW#nim(7U;K4v|5ddx%?elAl3+Q3PGC1u-6u(?6(CSNkM^n z|5S_ThVahX(!-j$`W}SUwjU(yNzXDYeYiy5h0PspKKRHB$U}R84Myio?SGLTj>}k1 zN{|zSf$OS(NqI4<|IWei+@Ps04<^t}M3kSlrl92;_tsi3LLfL0%9u+!&FUeW;+l`O zrk$;2h4fYj!q8@UT=QfKVo!>+!#KAHbo$?^}@4hQEBnnF> zB98|C;p@FURIxq`CYwson|Eid1E^W-0$=WBTJ-1tY{KrO;M{W4cgSEFy%{#B4fD9@ zIRV>*;j19cWA*A!(E!~ zm^vZceMobEFTv{~U`HapXM9>2Gew+(W+bnr6LwsKa~~Jo z)3|GUO+^OWONTrLKwD94Ah1*VCEtZ$G2v*bX*I!c_|j|LnkT61d!`1(YyduOYY*|n zfvi8Z+cYnwn#aMqeRAnrN7xMDL#a5G)06R<NE}M;RH7!mtt|(8 zYk9b>GqIa7_3=gNsnA(iPnmviXvO z5ob1TJZ)_FwkqxDWL%@GF+3f{+wR4~oZBEXMPt!IKKbn4fIQ3p2R(odgp{zl7Vph7 zA~!v@?N2y7C*Nz?YW!CCublA5v4FMbxWNxwydV0{kcWqg3~|bg?zxbq1#Uj(>4yp% zbe>f;(*1M2zJZrCvXvb9b6*o0FIl8S)MxMVrl9u{ULBVIfFWWsW-OJWa8Ga6mC3~h z+xg)XN=kAz6GSt}tlwbls*Sm;rzogy$nsEGc)*}bCFozf4te&)>Jo(D~ z^S94f_Q||bi%VR-75e+;AR~KJd*yZtuM>8Lf!Lp5gg{Skcnr?_87P8OPy_>C2_n}E zH;*@fkW9-L>=JRdmu82+-BPM(L$rR=GqdYw+c-~p3K0aeTNs_8JSh5<$bmY4?Xdd? z?EO`<9`OF9faMi=a)p)@d;k7kyps#_)yzmjwc?E8{z1OD=l?6p#j)3wr%TXM;s1GM zU~~sJpTL*SqU5~;p4CmIcXQ(^s597Nq9`s#L|N``NFzVktAxFc(Q!Av_4NER z*I<#Gx=Y|UN;Y~xC5NhPeuQwOUIC?CCf*T5c>9WNc(t7&vf11fRAUG!J73Wy*tB+Q ze0Gv{{T8UdQgFs>%|BqN`2kb=pd)VTxj_qgZ(?56d9eCgc1CTWMB7`>NPP&(>UazK z5K$sYE9yGd@vq#y|9p8l7x%lL9O2+bBq{b0oZt)AvRUtM4PH{6bKiLV-kE&=hG0BI zED;ABPAI@Pcmp`l)W9U8In|=quyC&uVJlMeVH}h4s9ME>B8m490wKC zJWZrVujcM$u;Bq>NxugfLJdB4jPTioPyh32|Hi-na}@Qwo7@&02XkyqZ-X(*E(c1% z`Yq~B(^AD%Off5R`!xB(&7hZEH+?S2?)Cg8dvGiAuN~+KHR2}5C}`jINb&$LDR;PYGOq3^d79|35Wqm0DCW51eq0t}4>qYguAfsR+L7J!%NrzL zJ&0e{2%36nn>26{q`Fcwth7*@pH<`xd7$p$5*#6MXv-M*`6#>b;vQwnV;_ICR0xwl zr0e+2YaW4Ar@+n>;m!!!7S_ifRCj&F7Fq21_pBQ`Dhp6nYnsrq74~)T+cmzJiPh!h zZ8I%XklIa@arqX&CPByW@VvkNsBb2L$*ytSB^Nhnf!{tlFwLkH)7s>=f=b)wu+_CWpD?frER^3tF>LQmW=iqo>xj#?NuLeC0Dw%s1D z!IdVYC@WRNxU{*a1nPTZz&JX#!N_<+KqZu#_eT`3&_3(lsHf=y6+uf5NIu8ySS6$@bj?TLsaj!D!Mdv$q5J<@jjHX9<2Z{4QWge|itI z5WVMmTU!Dly>+HVJ;L!KtX_T$vTf%aSR?}ruUPA#Gxa6w{e)jIJaK^^NII%Y6ok#5 z(^NQp2bCHIYfe4^#=<-C!U4lzX6o%@d}YiZi@EFQtzOnyl}9jfdf(-NR(GD~?e%e$ zO|AAO5(mX^VRB6!@WT_Bn^UN{j5mqkF(f;rvSd*q{(_B~kqW>|D49ane`987FYni; zpdaDbOV`VhL|dpr9lmY;7iORl85I4t5wb(9;Waw@>Iuqy^~5vL#m`6n8AqKX+6=X# zWMl?D8AQkb={!h$jqb9vp5Vc$=oc^`dBpGBrV6Y-`yy!SltoLcAS{9&Lftbum_9Ex zNewd@;79GCS!!H|=z6=^}!#6pEnTRjvv1wb{e-O;yC3Bh!%9&3RHPHk(@#2eX@$3ON zVrJg)qa!JY7-{!jJ8;-g>q?;$_tJ@gn5e?CKE9Bj6p1F>K3s;Q5sS642E?)jOKZk0 zEnes)Z})q%u!`bn`p)gwDD8mA&A%I&copLBLnXcYUYY!C$2|*?{?I|i6^Pd4dmqD9 z)U;0k4e-?hcDabD&ed#8<{o2_luTIvT)gr@c(4Sjro;8&dv^F#1q@Y2m(+)xbQ(~V z9mS00d%!+>BX)D3X%cv3GC=By#TCg59Gt!RFFh2XJ5)`gM)`hI!7xxAJ9a{*ec&w~ ze-&bSgQgTf((muVL*2*o#a;Q6{rZzeYbq48li$j0U(wrdZUf5{<4NBqmLl7e6YJ$V z>t75yPRhSA$_&LB+u|$5UChr>Ki`{B2;4~OX-O2*IBQaV_xXcqZoYFJG$xpp!XDY^ z?kSAS$$S&gR`8;@k{sSR2Z!d+KAr`Q)fe9D%X(4F+0p%p34y7TDjcG6qL4n^bMyH= z4^xchtZ&`@){w-)huAk+u@iqeMf1-q^cPU7DJDiiHB)A~oOdT%RnR-PE_}Ey&{np991&Ob)udalTSMe1%<6dlxK#CXZ86<_E{oMG}{y z3UqDye_C2)KVwylqG|$38O$gf?y}$b^RKz(U$5hJzC8_0dI@n#?%AZB@}?|1o9E}X zj31|(!rSIus!MFP-ugk@IXfhmsA3(j{L)!}ezt_yZ_;_Y=-UDH7UL6;o!>-Pm~1Tj6UP}Og!5{@bfIT%lIu7Itx5eiC}{h8N9y z>GwRj_a9Pe>gWs_=+qpyLelkFI$cV6cP98G-%H#>oWO|oiMr>CtZ$XSn5?cmxvKNF zrN?#0XTr6;IxFkdyL+)IDG4re>~it1T-s$;y&QRTzfHQg3wHZ&spRZls&#J&b&5q# zbjHkVrR&?<&y-8PD);`xxYVZH;-sce|A{DM7XHMJVuN{;iT3#+%dOfV8N_#lg_>4( z!qlD4-+kXN>E#>uZ9s2ms@X8f;K9y05(I`kJ)iCV5ht5QVrBuoY&FByQb|UFQqpnu zrW2&^kK(4}4m=OZeHP7U<^2t7^s<46eNcCL6O4<`B1&-{kQtmxu{+TTvdzSqavKtO zg2OnEMevmt!gFt&VW(X2L5QOqZejkMwf~j)sxPjN$4tI9>dVIXG%Uq^z1-F0^qbS3 z=U~ka<}N>Kr-g9|!U2D|hDw;fik3WrY+%i@3{8kz&tLvc>qY!xE~dlRk&e!o9GCN@ z_;i%3R$;ayK1_f9j8&!t528|BVOTT_ooXYce%Nz5Cp&t6b0%uztXY+J;ENY8eubia zc)i8KlmS)TY;f{rIMNu)_PWSP*`kx>T(t-Obf;R&#Lj1zWWOW$YTc$9kmTgO6V_A~~C0UF0(pW18P=%=wQOFAlngFhj8eKS| z_;mjFRzdL2%XEVE;&C%}2izJ3w8}a3JaXw0;Y-(bY9HX{Ax&}mj+x-9$;e{vxrbB@ zPI{p1cFKHjkIpSrtUhp`Wb_tG!_P+WCB9m}tFs)oS!H=pI%}&4+4oV23#H#^vS5Q>lLv!UlndLS;VnnIpjWd4^5T7ha z(F6^`0C;SzJy@EG-Yz%Psj-3`(%|2pp3Sf}ctW?ZtNm4JjB`u2*EJ%4GLyqfy3;(1 zuyecYmD1PUp-gS(-3w<)>c!8-wT#9^`>Z`+Mzn?M>Heq{1vzsCt#iOLM)hI^w&_i#PG9qV=IUf*H{x+wEpac@yoRNwR_cD@maT zpE4d_jH0pSE+j(?T7*huC~GPkph+>a=#r8{yHa!FZMPAhnMe=lpPO+pS$%(6HTF*y z-JLhzqi{pyi0p=#i<6TisEt=M<-J|6YV~(${ED&csShpjlS*Fvw2AuW!j zK0Kda(DTg4`HlA%JJVh+l-$6ss2v$N{mjjXQ-+{v{QP;_eH?N`YTLp7zv}7TX+21! zd}vh0=s+)wMz%?Ydj0K2ce%=9>M!_ zO*Hs8fy7k9!uPpnHhMx;Tk>mdvXgH33)|H%R2L!1#gldBruKRt?xc8P>l~8w{R>P6 z(#d9M22<;0ZNeQ@cG}_7Mme``UFp`G@Z1S;l;kJu=C6$W8}EI-2H?U=db09sMRk{` z<>aqlpr$TtxMgrUNb~>HbPm!9et0c1%+}9+`VEZA$3PifS_kS5VAM+V71bp<_#7;^ zWXk6^SZV)g39HEk!?Ky*NERm6q-mqTB^JY73U%yA-3!fVHQ}4aG-KE7mG~EL`|m33K$~lr`%nJzfIuhWv z*X#N#;P^@L^~A@gaQiAtJ0<>)sPC0a$(6XX|vo^yO1bz zANP6Hhd!|PM5+JKFE{zxIFz~5z*~nB-Q4;F_tKyy_Z%&y+j=yewNBzMFe${aj#~|*D z5cEWi4CA(llY$(>yqDq~*i=o~Lb=&hn49u0*NV};g+^A2A*8M$%|c9TR#RPvIs54( zgZl7lugk6RU^2g;+CJ(S*P;e7+RdzUa%e8BWzRn&A64SloMX)CY^gE(T+E zyh71|EnO996hSOxNLDbo%G@Ma2r->SeS-~6UjTe4Z@ceN}0F?sl?w6OB~=FKp)z5-(f0x?%W zr{A;*u2X9Z9UWPOiKa1j)y`^TLmy4g-P>*~lO*tB^9R(ORP{6-&Ch1yx6Cc{IV8hAKz)dP{_`PTRNELGR8;AvxaC5lB zh640OFqZe_dI(F^-r=L-Mi%A^kotGbtlhdui0UB+cIZy%ly$ke!^&Eg{ZQ2602n&X z!mdaA%fFx!8TY8y+m&u?k%66BkQ(Y9fv^%Noqft0!I8bt)}s(tI(do+c0O4-e9gej zb_jQgJb_br^&-bypn8K-ESR4ize;VY9rzm7y%dfz2yVHk#e~BIR_V02J95+F_8Rr$ ztdwF>9#U)EeURFaR~+O2FArCq;}G%gxUX{ob|D6Pomh6%owSZ4BVdTB4jOdWz}0cG zKJeWHQs0I@XH{DJqR z;m>cp2ScHfKU5nT4lE5GZDeV!q^ZIhf#Xu{{-KCzFYNjK(f(E^wwe-Ab+W9_BDAjv z?g1ZGM1_R;lAVRx^qYI43ZOlbMKGpwOd5ou&pJ^pbRun(lmgvM+>>6{IwggB%gSlF zV(hnLpH7U8#V%Bo_m3*x&c zIz3$K#o0fVr25weg$9*g-Ba5w|3qGtN<2Ed%uUDs)kFSO7UZwb(M)Ye{rHa-fQY>b zCS}f6Gq9!4D8T0)KG$!ygV(_PVZ81~CkNxm4~wWs`KqT+LUv zPi*qamb<*M&Aa@~U*H9F2oY?R*z$Y4xx@%WDYPBZ%$Z%hTQasu1;&n7IBu3s;O&fh^9)6!$CP+o`A~|g%VE5tF9|ye%8Oo@uVhFQJG_jLgSV(vcJu}} zD&+)IH7A`Yp9QzB2}RtT>a|g^jEteXwY|_2!-=9rE=Kz^m29Te@4)=I`? zsHQ%m?5VmVj9a?`G9sZUSt-ysyerG!ymoG3zq1(rC@T4ai5Vzop0sQhj*-Hu`3Rjn zFL%b?cwCdj1zn~@NyhQyC-6yvjSkZS(g~ob)v+ON7Jp%)%*_c9 zN`33jj9^KbAZxf|RkMK6%QNVZh5m-qwjQ~cv>=y7h|L~u) z1I)yOiPP{)ZS*MG%gkMjKf%^R(^1!hrN17E<6pb}?9fsAQ^AOb9OBUmobXxNn=C~j zb)z|rY*(lLbeer0!I7%rhv$$d=B}slEvpChjzLrHxHSgU#^%FAN7>XRF;{q(RWMtc zV0j5(VACl-V=K=_X2uiq=K?~!k~!**ddQ?_FLg)GdB&CKwv{bD)Ew|3_Q*o5$EW&# zovde+9dm;#Yc4p~pGHhi;mQl>gb#;^r#TGXs0DPaijh+;+CeNBA=WNK|BY83<^ju0 zGDI$I&n#+qwd|>qA}@gk*?#DLFHfjL7+O-nSo@-nNo~?P^H>u3Y&y)ljFCk1BqvxiOlE12cz{z^zsF045H#~F zMq3*niioexk3`d-#dh^{c8ZEe#=rWLaDh^rj#%s_%Wun0Qz6Y}H+Qc5?98C$VK%g~ zc{RcB>tAEc4Uf32qP$a14C$pNXTfxd_}o#S`}#b|QB^09+p)2+GYYlR);?fl)?j-+ zoR7aTyXjZrLU=OB(HEbTZoLJjMa*%qG*Y8FiW>+UI)IT+esTV{o9wwK9Jcf-LQu1# z%I|CRx=A-f%Q&;J_3v@wT0gjQkR20z9|AgVU!vZ!d2y{&XtrUgf+WcHltx;(n+(bq z7PC_WHqT_5KE$n_T3dgD4tRVf{cK%|Bsg;el@P5J(H%eP0HE5-AyZNjTE-&C%4Aoc zqLppw>A>p0itUoWlLZ2|TVtN!ymj|dF$v0d5J0&>=ml90TX1G6urdl~fG!p;oG0 zf|g7-vC5`3BT<@p2W&z*~&pPBK>qGY?(^y%~^wOMvT zkEwCCkhLo!u4`0V*^tl{#68X+t@ozr_l|Z?e?z)2p{MsCf`97cFE@)8&6uND`;X1Y zLMFPd*TAG29BTg%xg9j6huJb*KOH)qtoqfLZ^Xh6Yo8zNq!e1pjh2Qhp>f>Vg3mJA z%-eI<=~K(Vf$u>1>~c$Js+jx`Z~KX<%{&fU9e2uVoo3j~TX~p5D+ID^hehj!m(b9d z1$*e#2M&;6?ouP98IW-#HQ|WYw5Ye#nruivAXw#c!#<@(;f+eFqH&-y6!6c99x&Fx zP@zW7f)h}$UT%-Jz53R{whKUQn-OmDuY#>6!|KY4Y)ozUQbiPQDpAufe@3un+?{;tkF}f74lKGPRi2u<$?f=6A zZtf>2w4b6Me+c&}5fvhPmv~AxUaueMz(8ESI=b=K<;$0OPP5Xd6J*&DvaugD&eFDN zc$y_eYE4^W##n;NnR%wjE-i$s_(u2K6GGs&hoW)$#XTtutQYOKm$g(HZDYa~wpH@Y zB*^THeYN{~4%X0t)tCa6V>fm;lrO!^w|T_R zQCZD)>`X3#V*`cWxX8MkyVv(y)DH{;7j@^2^Rry`*I#_pa+B)WwH4<{>f_PM3s`&l zE8ZfCEj3gf{NBS6!OF(?<(y3C$2_&s%cC?Qeoict?*W|`K`#+^%2aB;V(AOirq+&4 z+#2)ns-s$SS>A}X)5Y25Yg3|&meizv9njUvPy>ksj%BK6+SyTCvpdaLVLQmQb5*dOwyX9W5rA?6SotH1JISWG9T58W|ZC z#OF*8W9{3LM<2y085NDWst4r$p(9?=E$890*tP@*f93j6X2^DAgp_ybYS4*$3o@}d zUz+VJI{r)@Gh60B@Kr_z%BbI-3?}8GT881{ftt+7DzTFm;rzqwt2I|JP1xp-v!FqTY`OzRVg~b*C6{#AD<&y ze<$ZtN4}Gg3h#FQ`TE(Dq7mt=4eP$FJd}ARpbyw_n#m_jh$IVq`QGUyRMcad_(!W{ zv&AXN2OLrb-%tvWj=qvA+#GUBBKepG%UME*@3_Va#b$@t9m-G9JAzElT5Qm#&Co<_ z`IK1ll2|d>QxLKwxeN++AL4O&`HXyQ4M_Aa0M4*x(73<2&b9 zKaw`&^n<{c$NYDg`h|LL2g!Qc)W2E}H1VroEV<_u^p#6qt(I;1_KTk-`YOL3;YRsP z;CGu$v@+vmzVx3FRTbOSXrYkvvdlj>+nmc}=^t&0@U@(gYxGSEz*da>zG88VCzvEe z6gQP3Ni)PR%jRUV`j~hv{tzeh2pvNB9$@Ok8X3jGWqRAVF5+eKq$e2-vU;q0pvopx zqVP8d`&DP2wAP+}6F!QwfRYt4?uv(jv-?P=X>cV{=YdtmQPkYeO<#@5OX|AbTVKuz z3op$3FZu=JN@zMB;qiDX^ePn1LmD#6I;w+5-M%y%ji2V11m))>;*RCm23XMZ1Qu8o z^_e{6V3a{}2UqkO=9th#57B^-&bP4-5bf}Tho8jx zFGRR%$NBk6q0*oz#!^cZ`|KVT(e9KD-}>lLfzCbru67@xf9x*HxMUv`m&&FNE_m37&`(YDXE`OY$Vil1B z6N!pZak53u0?Zf!!LumWv`grbt@o6Lc1RTcef`V;U*--OayU85 z?Ci;X$T(n!7vgPQ{iy6Cu0NJwlkzik;+P%2{ml5Zw_v^*cht}R*EcR#IXmvr*J6$< z(A#c<)ljIEM|5?Zn@OC)`RZ3Rn?d#Q^QT!JN`Gl}O0sy!VW5U-SHSEDH3+}qft}&l zNwDGEn^O%T5RNwrm480(@-pf5vH0`38^lvSCcQGJ$R7q8VnlVj^GAO>iMwH>2UoY> z-NYvk?thC9>WPy+3xg1-$A++q*5|Pt`c#MR#WO|-FFcB>s@QtII<~N|kOB-{mWW_# zEd|0opsUXw#ygZ#ppiHuw`Er2#Yr={jQK+XO=8#@xIe#sQ{Jeu0vEQfcx3nJ=p9t77-17PTiD%u)-SsT{ zk0TPA-20>4AaAyY=pzcXy}$um;GM>*0D6TH`|UoGM8v=+m9b=(3FI%AUv7fA75i4%8b4sIl5pY>R1r0KE4tHZ+ z&QsHSyq>aDyIjk#R#jcURACR&<%O%^VYr@y2=;6Jn)H;#`XL=qH&DU1v=x%H*e@dP zn7H=_lV7s0WI)_8?XnzTg%&;3J;M%f_(U{Z)AosKD4GF7wk$n=2UX4`7{BnoP9T~y zZ9d?)?QO2KMCwIO)P@*pP7T90nGx;$ApE6BS>(qcd!5iS5DB~xXHS|3COP-O_t5xY zp6rtrimwu*m)h4jCVWtvBUh{D^>aTwy=elX%Ho-F->!IEiVfcC#Zw}~0e6rs%^p0E z$NhaA?UW#^=4-3#x74~IT56|kzMxrHp`oOFcs&qM<#4So;s@yh(J@6BbL;2)e6?3| z?AdhPUi-bh9J*amikAF zb^Oy?UA_WvS@{ulP%&{|G2k7xDo;(i3EJW?ub(;ed6S-ws5D2N!ZIpknaJVlD-%ek zqo~`5?l+)PF9S_lO)-ASkw4kPOHnp;xMuWB=8*0NiyybhL~DKg=!ahTZlM!Y(h$9J zXnR+k>#I;|`|hWuiU`_{KD!Ohkvib3^tL;&?uZ&ciSW_MB~RJH8)G4XjgogsYK{Up zDVpEx#t7QUQxnIBSr3RP_Q15XGpMCKi@%EK?msE$q^|1`JF`3kZVIixkM}>_)&wt* zs9hxspwoDQ6I7jAK5fw=)#=fNE&9gr>Z|W@L=Q>LLG|A2zL!($0r00Y9 z%Czi0j2h>HF`_U`4*WRD9SWp7 zCn^l*)g|8Ip>rxNT9OvIey6MSLcli+cl17Bq|#^W+}PY?c)Ya9Jz})6f8*425gG5U z-)i30wk^V~=T+{yJWi(t@mC(5el7XK@0FNy&+f8@hq6oJ2@Puc#p9&&@+!MB;iE}W z?URNqG&&o$_*Js|aypCeV~(f;D%jy5HyCe=LDp9`Vpc6bgbcx?tEzkCEQQ9(sk9D; z$l$X_83O_XOjqF@8Dl}Kv1oP;um({PEWKSo)0tZx+qwp^nzFxqhun%#&^0kSg z2`vrNbUJDZ(l^+_TIdQUqG8P>1Q!=gN>@QvlF9U!j>Y^Pv3u$_vv0LxZ@@`5UEn!s zu{Q2i3>()Yi_}o*EDUZX0GyYs%+9tEHvdScdYkmV zzIv_PzIg0Ml}b(oYUDA+&Z}VOo8%IT&N^9FW}OPq0aRn-0V+9R=WC^1e{iP78QLpR zs`p@vs4c3+$;LkhRBOd8;hes3wt{zRN~qb-zh*<)#2K!3`?Ro98mWqzMs%J)LzMF4le+b#`33?h9e`fdy0QW`cHDYh%^*=}hi)!z zVL_IA-Ppc0Mxs={!8U~N`{)QvyV5pVrYf}z>qhYeG2MLrO!`>16~kYMq^4yR0aHRUjJHj3kMXkkU_5Bf;Bb28b?99s zeV-b|LB4j%e;{Z|pU9?kPu;{DK6i4iop82jSzpcz?%BA3@Gvdb*herQZC%3wXC{Z# zp{iL5jt$grvl`FT9*|8=JEg*;SnIA-Uzu+l#3d>8T_iJn^iD!YiNeMoMzFrIX)}i} z>f#bDz>lQ@O;M#Y<`LGWtU+)>4(vbu_z2=n$*ylQA@EVt^e;Y+##^zOjnjT$F>%_8`+BTm&xJ`K^m ztoHQgk^GstNH8mV#mJwju25^Q<(lts*AGSp55T>X$y|GX*lb*q(^x4m&X#P@BvURs zV)onfW&p2wX0u@ruP&l|uwwGapsWk2c@|XWvt7{8+)EtxKA~ zxlJ%M$3yEp{ah|#$Lm}n8^ZT0MhTV}lSjq*sUw@$3YjZqthH>IEzC7&ciZJ8>Z-rHbc|Zixv}f_2u#xfd#EhJ|JVpteq}WCiHDuT3PlS5lt650jUv(PE zC}k+rvWf!XWK@%2_(_o*)G?$Fx{LXo7SMHOt-NuvrFcCmV6%_bxhsxptok&{nr&o{>j4d1Y}wQYG&HC|;wxo{O1 zMCDrv0d*{u>`i7rlHxWyTX@?JBQ;<=Ocv=Ct~DKgN>j1F5NBI7Q=a=VPz1IqkVum! z-!0VW;)+KxD`W)>M<0H07-H3uM!J331C*a_iD>EAq69zN%K#zuE1HEu$@l$mgq4)~ z?;31YRY5Y4dISPN`j?;teEXKI5VreT!wlH*%Vw{FG#+w%hCoN|+L36!J15dtp$`%U zpm2fL7r(4pMss;hwtCCI`7?AeLCVq9M77ANYOvyJcVe-bIaRtyvZUIiOT+!cdd0OH z%T9(!kI~R-xu1K(8@J3vXO!7Rhx53dG0%vkDnO!EPEmx8@Oizi2IDybD0bHq5jFBJ zo`soxU}^A7nBO$kaw@LVjRCbUr0$JBQ*S=pt)6t{=cty&G5vg|&S`b{vHlxip2%lEIGeEQ>C0Cl08KN}*ovL8mK>UZeczD5tRf+P1SOJ}=001uB zCX5uS|5TO|Vr6lRGvyZi`zZaV>VQb&KC!ZxW$ZC&8=hA(ZXg%046k}kFZXnGybrW; z&U|v}#^p5CHjRV#i%MWky+BEUYCI|SD0~?EBj`^+pPRsN-xWH0tJF~#-xFtJPOIYX z5x9FMz2A~?vn-PH6++p$5VHpnTV^b5Y(paA;$P!VSlmsDC$Z?Y z5lQ%ejJ3aze*z^ze<=IbNTrH*~xH6kFeP=&-!kUVV7g8|HMKT(8sf&X}P6!11)hvLBL?IAajH$X{hjZ zVs}x`CpDMiB8ZXqqO0i2r4yWzd>p&rDM&~{@hY+KTf)}~ZrCc%L*#Fk>}}685V+?Y-drpH4;bP^4tne? z8Vzzk=9hzAo&i6D-%(=V`KV4{VG7}4m8a~J-WZp!HeGCE4(vnL;PsI=0EF-hlJz%Z z^}IPbw;V+g-Ibtx3B)0E~KNf*$O@_gofOctW(ZF8BNa&61sVi5gD@G|pJeypRU*}-R>aNEc z-^8dmP;a1yT)swRYGk~0gd{3?5| zSk-~|)=b#6;QHgzgV*Ub8OL+(#GXRZa7 z-k26g95%LW%8eB{z1+Tw^T^U^N^DgRzL5`e4#is|(e{Vs z()+^+_x=Yf=r$FE=KI9|WxAS0=1q$^NsW|aMU8RE=649HKv(qJHHKr5F>GA&ZN}Q! zwBffD)ID^wZ@z||q-*zfwFllsbyRD`#ZtLyznfsEYl&J8QIq>SgsOFbTU3&Sk#X)+ zL2FTY1H0Y7Fvu)T-pcJXPGdR z`Yd9_&Y5reC?N+Mfl3# zIsb!RCPrRHn@;T_hB08yRwx04q5XvADZ){{{K@oqm|WZ=_#G+pm%~E!0qo>g_X$#T zt*hY5wf5~$;7xTUv&6U^wIbo7h8+ps3QJ&mx8!QN1hgGiV5(K)V()qRG~GX*a?uIH zX-0wMq={l$Z=s;>q!3*BS2i(!+CNiHl;6ZuK08x5Gqo3##N||!cX&2{# z*EZQjMGNaOnF8$!$u3aBupdQJ0vMiuVe_ywzO)y zZ16j9S<;+CC2uJn(GHB*iGBIwD?AO^9)X8(K+S2rP1rLyTsJj7KHhv)%$vSd12g+c zeF!*p2b#v_YLapZMPypa+PT!tqYd36THR$-w@=sgFCb2Qdxbtuc;?>29Ix5E{&clT zyu}3yi-ixk20@W}u50^%ddmuHL6YZ|D89~_P-GMoT#$`?7keg^D4r|C&OF;_fB`O@ z@~;oCbT0$#cnw|arf_K*<)+*;jms(fN$)A+524zUwTn-V?6PQxI~qV3)@EQ&y-O@I z$+`w)l4fPnF+OZ#X1FNZi;}JFS-j)Gx)$PLT9_wVpGEyVMsG%i)IwUW;fnt71TXxF z(Cx6OM6eNQaGnM$4%(opSUsVirTjyduvv9QJ=Up|E@W6H^XzKG`C)tFC6g9c&~6U*kL)^cDgvj{A>ruPL3W z+gG$2G{ZII2?!CE`qvKNG%g#r%q0`B&GI`ClaOzYxs$sfWQ8am z40)-dCLB`2e^NW&$fT#Uv!p25lhh5HmDETryvyCaOmih4B1JOR#V;yQPmVGkkwE?6s%FVrbxv11*|6TUw9h}L``4wOfj2ppACH6~F@6f(%5=p25 z8E-EcZ&e>FDB~wQ(zPFP5ZC6XKrE>kp)$=WlednI`!POX(I#raL0s^ssC`k(^F1u* zqlr9FG2B_KuV`q5?YR@Mba*ej5m;6VVY*9hp8rGAWB0v$`mIw3mbhSs)Q)fu_h zmL^?D7HVQTqzb*R;9ir7o-@#6ox6FpuuVTCePYj;0Ubo+{xv}TjO()yCDi5zDga_z_&J}Ia(UYr2?1d5ey+|=% z>XhuE5P~0UOTGkEzErmhHw*f2)jmOaMD;>>|JDMG?7<8Rs$*IO$UWmUs|%&Mc8Tnf zrL9c#?kAmIie%`Q49n1R8}Maee|{SI?xH=w)>cUNC*)Ak6>*^>s%t)`0t;Gd^0p5H zZh*(Sq&gc{KfRz}cpq0PIF6V<4t3@oQWbDz;2bzO(I~RRox>tIZ>RDraZc7|l`yKU zC?-p{umQ_iuyEe8#yp^(9y&4;TOt!#U>r{PjuN4dCB^7%-6hOKI=`;QnWT-X&7ZfQ zs(ZF6gju(dChN(_Nt5G4@ks=_kf#mU8bku&K9_FxnP73z4Y;#SwjH)(CW`I^@p-p7g zHyQIPsOuJA`m9vl4vKSD=^R&UmZ@VVQSp`_<`La+#^);gJg|y+3v9Hgbarm+J+?S9FgF&!uur$?EsfNtEDzK01T8t z6)58lEr%<+2P33@}g~%TVjh+vFypV#N%uF~Cg)m=h zS+otmzTIkcMn&Euh(I>~6?%RP`0IynvWlb%H;Mit%2r7pC1{pu+sYRF-}1lb-9%BcYBwjqxrskp_k)0;fvl$Ux%@~kSd!Z z4Xi}p)Yc+O4J=7wLpLm&P7)W4L&ig28@W~u%jAJ`xBs3sfUm)2|EbRustRi!xw(at zZPrY<7NZyR3RALf^1AeaGDz?j*{Owv8)gZ^GJKQ8Pb1aQ1Qv5+F@UjXeq%8O< zO@Hn4>LU>U$WQIckeys>69Ti~Z2W?U5z;TAWvbBPO^Y=>>eix;1Djmm<^)Omd(Ul+ zw2y3^KuqWsXJnc`Le+V>$`ZC$DO5lQ%7E3$|{B@QK>abUdXRi@Lf2yAXx+o=|$ z29v!fqOJL5ASf(CepL~o9GNUj_tc$BeDUJW(oXDAkf>XlgoCdrCVwvC0I|KXZ{1l@Uhn+GM)qEs0DX=1t z?9x7IgY&r`|Bk->ZAu3#tKP$h}!0=7%j_{AsI!9uOd4=nb? zmHx%APDT(=rqJ!jdesLF-#9iY+PxM~mw{KSDGr6Drub#&KX3E@9T+atp(mKeV=mLa z&%K&!x9u+F&)0~=iqv=O%F_N;pyNG^t--g1hm0GLd)PYK&N}?5?6`^kJy}0nZ_AEsE`>on5II$3w#Js_0sMqGB(79f|I zIaf2Z>jLisyDBO{t1kIpggKNLY++Dlyjr~ARn8BCaIGPfA@;T8cKh@XC=|26c zP7a6s6~+xynEjTfuS$qsEVxwc(Cr5Qv&roD_pgp(eH-zOsrU3*c$v?t_@A}+)i}zh zTEMKBvHa4GQQ=E{f^OM}eSLhis-g*CpVUcWrX31343xFkAUv&^6sDdnv@E;I%hx#- z_jTS&O}?PVwh^j^wUd$WGR12Y1zx@W6}Z&Btz@LbjfPGE4N6%VBcw_iH0X_nliu(5 z?QKxbibeqJN9hNU-s$+Fi86omhsyhVoiSLx$c2sx`l=a}J2l@H?+3O{ih6lU`7q@D zODNNPRtLb!qdB<%#~S@S9Gb9`)kJdW(Rj_wx&u7U$R?*MQLED<3FL&+l_t*YeLxp_ z^5y5%7`;3d5}VqvMEz9@Qc;^&l1#lx*&BSt%fn34-ac)w`L#w1O=2^Zj$E{znI2hF z^v`{^eq(lw5FuGV&YKt+2(9|5yJ+37{PY^rzrw;l%>+F=cc%S)rcdrmCO22*kwt`9Lh;Q*f0zv7x#_O7s)>2)z#2RX3hER`F1HRUP z6!R#%=$_kUIEaUi$G&_Z|>@uD-VDKr68(!13cg5^1;=Zy&1f884;&j-v`jH zTCg)l&yann44bHKcr&rdr)^Q|MSACFsuikwODak652fA18WrS%?nqiuCl+h_Ch_^X zmqlC#uBe*(F8gIsztV{AvjvZDlK{g!wLvA{QgNS9uivdg3IhoJm7k-fH_3oGbXB6h zsd1*AukqsQe?jEy=&N7JZ@wU^p?IPsBO{3RH;bL4V~r#1tyR77&CsXmD72C{5D!XD zVa`-OTkmFuY5r=#b@=(kJDedsL5^5A8&%@d6MCRf@~qF=p+ zTqp>bu&pCkyf5Bbt8RI_%NB+7e>L)OP7i~MSg+WfWo!`nJSo#$ZX#^`T$LfMX!PNo zyl~Cwjr#bUY)z&jas`l!lpCCrVbM&Yn~d10_<*#Sp}3EW)Q@0;&w#i|)|rbq>Lzb= z7O-S|Sr38eCE&CF$IF*9p5{l-aT`9@PszxTylTVft%fb>}hu)vkg z;fPKov4iOaQk#!Ggt}&&q$zQA)w(QyMpryKJW-*#{Bnz)rQ+o`uBM6(mo;y(SbL&_ z_d_57UU^Wr@U6b3J1wQ-(n(($W2hKu&}>ZgMjaIm{m;&QKl;RXoqELaua7=+L%?l~ z4}!-R2qN8W)e9&bxRFXFJD?xni>`elF_~w>)bb}t~M`de>}`RorZ$r#9cJ1-~BaK`x#!$=IJI7 z*MU9LfK57Zw}~CvxAkyIp@dGgRU*(7jmtpHz7t`=g1JBV0FFt@9fppPiNj7c=b?n$ zuvJuE_fnTWDNg@!@UeXd_w3BQNWIMwHB_tju4O8)6STmvL5yfpMBj<8Jumh@4MdEt7?3faZcu9=LQVti)7jE3wD9`fR`NYV`{%~ z6!GvSjY?TIH9qIuH4C6(?N1bW?JlvR%l{Kg_;HM%J7|VJouDuKj~h3O5YZ>*1yX368r04k5MBhtCMXiqZA_ zBmKZ-8)F~RCim>O4P?Ax-tFL4!%j_B+khFO4d61@6mrV#I*bQ4B zN=r(XLJCD^SK;RQXI|=kePW&3j8aWrYPA1bCh|)bAmY@l?n1K2@esLcYNRoTX%IM1 z-atk$w>$aL;*~{RC_15$j#&aBlTR9vI8}k#iu8fz@i_8p^p6|hb-I+eLUn{Gr7gPP$oeA`7%s3H|_3=$7{+8LhH|psT zqI0;d6M4Leg=Nnz)3g@TSPbOyYgUS7RVNwa)XJaK9Bi#fF>&uw_Z z9g)N9H&PWt#>cbHStX7yf60@gfG&#t(>wg}UzWX}hYiqpsnxPt#y1}jp(c@2^<`s>ORdg`2skyu=S~QGR zDmZ7+_FRiijTDwTjA^y9KW?bU)K+To?(^r*vWU|@YMB?hZl5N6Q%sf-V>HA_7&Do$8 zQmkKs!4)PXXVeq?);ICa?|tDdjq*Z~-t@^e_7X&4_Wvg()b^qIpe#R9Z12B5OXpyZ zRrsQtP#FQ8*S<`gfkbx(HoswuuN2mvk@-@5elOYy5<`oIWg6#JvVY9wd=44Y%D0zTD-< zB0yS~MaNs!slw_kiou00uMZ$S5T@KHyrXR5A1O(Frw#DN$O@>6y$RzJOsrWol)AX1 zpHd&5Ouo`747Q$I;25pU>i#;sdv`+sK}qAX2e?}t)7N~Vb=qgd_wCwrmo5g7$I=}* zhcNmfCj))bBBj`=IzfGE=taazp~Z{I?RwBw`XUZM^hLcn~8U3ncP znwtFE+BH+c;QYkn98~Rm!W)HO7GK7y{g<+T4#i$a2NN1mOz;m-=JpE8SBki_)*iwd zL6a$(MxqfAT;2{G{K=% zvd`Rp<22%NcKHal-;Sv8oGb%NXSq{r4D*en2paT)e!s+jt_k|`HFnKaz6&7)Q4Sx$ zUN0sK=SZg5E%7k}VB&%-`?&1ubanp#$m4K5884oa!?`3l6#Sr3z;7SXrG!wb>Un$` ze1k|P!f|1diTW$g9o&^T{dOa(gz?MBSJ?>LZ#d?Wsuu6CC8JV>T1G7tB)_Z_0&$fR zjjN5-^#pUwEY;wWKc+`@FLqY?$}9s`p%dRL_qZr-JH_2VQ(XTfU+aISINtk1YoE4F zG8lqNDeolm7&<(Ue_#5qw;a!p;vO+3z8h#g(;-Cr%UR@5eY|IxnX_?%&R(8W-=ht) znAuhz#g>qZ?-s86=O5f5eg-M%b}&>goo7M1XpnpJ7wf^1+8~RQ0y( zIrLCDooANeftnDrNEJNsH~rZ5U5B_upEV3KWvK5Q#pCgQ^oA5y!8DerBR@;JLg<1u zy#7l-%TLZ;ZpEALLfFEWeW{S-I0;71a%L=SCGTsIGI?|V*lj1jEvn3xesV=_g|%=T zGf@J-23m`3PBT=a72*8kHDRQdsiGGsIb3t$zqtS60;IG!eudSZu#Lu#*TTI(bhPwy zk-Df^k)1&!IE!wv81md9pj*~`==w@0(}&vdw_L;MIYsq}Pl4acj7@X)N1gLR*)rO9 z50*YzD73}X`-~$@`}lu zuWk~3sNb2kE)&LaYVsu?6P)T2fs|uO(n5h0&LOkhNkzbX*up0v;wvMs#IYMvS#}?} zm+rWrILJX%oGpYGqA~c_mHg|ij-oTtrV5@ont#s396I34i;U&jwnWe?ipkqd!@Jg=0XFq{&DcBlbAr zQ^)j9DAaaCGuZUWi5*xa(0DSc4};l@9-WstOBoxZw+WUj!Q*Oc>S^z#`T2ch@wn2! z@ta$dU_DM){emwz!7XTXQq08F=}!Vr4bF-aYl?>B_qXTx@e=5LIPy&9 zpK$=BjK6+!;yT*-UAL+Gcdw8Tx&3Up{~i|pFVEFwazZe}i1Z6kGXa5C8GYb z_{dBA83#_020MRism;KeSw*3k7xN+X*8h2=R>dHKppRGs&Jw|}wXLBq#sMMbXNX!{)85H^>HX?YC7T+-B*#B5ju`pDSP z2Xoc@A9(Dq6N;8iXl-sYP^3y>=wN52rmk1|9Cd+u>z`39IWB!`!|D`7XHiWcRZAz_}(8b50iFJB&XQ!e3T$A`zw4A1|uN<&?;t)=9!L+BfTP zT6~phZdNNif*m}}I1{tB(atm%$}~8f-9Y-81iWd-#1F0y^PxbBPef0obf#={WTR$+Bg3hp0A+8^N36Sf1{5&XRtf|ep!F` zQJzm98Z-1EkzDRuo_!?1zRpkuh_d37dH7Hhb^U^Vil zs?{6CM9`uP_d6%1Jm8p(+O$8Ic0BqB)yxcX*|lpIcJSNUW?b|->>w4wZd|2%Sx>Fa z`^}o{Vt~7@LRxgHJ^$^@RWXpNutr_9q6k2}&rUit##*v8l!@0%J)-3_1Cs}yZySKK zI3mllYPTO93;5aH+1b#c48Da6))Zfx1gn(*ow850)c8X=j7n$kZ~QNmBF|1<4l#XP z5-9zzNmfTy0}e{QBway6)z{r!)&ngZW$l5;VUxs`F~_6;W>izR9V9NB0g>sfU5`AHUN2@j? zf-ihQL@D=KP=4$}WH@g6Yg)jwJnj<_DpKiAP^3ynL zJ{pck>6cyCpkr;#G0Yp#^zH3$`zSw{PfiBXxK>{!cnPp!MR31>CJsXHz5)K7#?rBW;l6d?}bWnsA``I!3qhU8)o6PV^fw=CqC;_^ek;S-n$MvKJC=fm$12*x9meX;i56= z*)xe_p1^!A#H|7wQDpw(%I}K6Lwh&~vjx#0Isl1|jzM-5v;CBFoyF{c*>cy+=E5E2 zW4A!8%qJVEI&&=UbbS&Gn|zKH9y5$J@|16 zGzL!|V}(uRAGVM61`;%l#B)CQURDeZ0`S|B=ZwXKh92Z(Ax!LMR`GJ zkx+Xj|E%Ky)|R*b9O2t$*vt86^UgUY&cQRuTkM#Xj}l}t8Ay-u==foPpX?9BbVV5K zqotjJFk3_CFheZb^iA^=2y0ngnB2k^JSrmh+)ZU<6pU4Sk==kjU(x#2t7R_t45=h& zjbo^zo=H%SwO77nZ6D{EV|q_^Bklm65)L2?80YKV&8pu7S#enz1P?JpKF&;-$VYQ`l|GGPW$A{aj=%=(-bS$$aXo}C-=xX1Xr&n7~ z5FM3D5Ekxkhfov=p~$ar=m3vM>R{5Lz&?%Wd7C3SKl*e#U2A9%$9=|B0k?byGY84u zUt;v;BQr)#G04xKYkmt!`YglVMdY=mw{QHG@(%68KUNi3Q05>7iCU6Do>acT8XdSt z59bw3cjJsb5kKqye9-`REf7=pYKyRJ3Ds{~?g!WY0*a@j6(Sk(FLn8KZjf#V~=uZ1TNvFbj+{%Z~?vwGqFO`<{ z7z&(zoWeh8td&*(rj@}JWzsYvz2%CI(w(H@#|XVm1QMozAFiu{U;is({P~;thw!PF zdc{WBLplZnD%KPr%Nb|r*&4`Te6L=;Y6DL0mZAk#Mdaui4U^6cI|l#o`NyIZqa&e* zY4>m>V;5`UETXvUN0ulwdnQJMN%TA%={fQa;)#>h|3^I8T8i~ELfP`Mt+<2VWv}Up*85a2 z=RBR*K`7;|U(~`+TDCQ*Gl~Xca@yv#CMc2JtJVJ7w%m0pgxXRsneatc3kf zs}!8)DC70!Szs5QeP^lAQh{Jw_WHWXac=HH&S0INVJrj?-H~u>zNVFnpfekXfsx2r zwVoOqIiMLXj0Xe#j1jAYQF$T|Xhc`GX-1SZ;Ct+lJ87mVQkp7NF7PU~>_xUZhAPp5 ztQPCzcKsHc2NS$%@o;#xR$ceM*Enf&#KT&iLYtE#uD^c_m zNZ}F!BlpJl0DYGSOsh0g#%`8D74FaQLzQvgxbHc0>1?+tnPz%X?PT4}jg@jUnG})? z^$yr+q;=7@D zO;<%$U}WpeMkk*3xq8>x;TSz`ld$S2IWw4Cl`BI31yl5nP`Phe#e3_;7aU#?6ubu}GUGw-`O&YlLjU^N#u*{a%!kew%hJLRUSG|-fkY8NoLc?mkK}QX>W#Uj zLkgPQdSqcM!vSobPOs9J8MF*&&EM~55}r7U2eW}VD0gH|p0uOO4o-KY#kPpnn>_M5 zrP5$!e_u}+He5QdXUe%Y4o7F3NQkP5d|EtF-%fq#$i{d|eg3b#DV}g;lEkf>(`bgL z52<{P>N{7Cp?g50mVbV4N9|sp!7YOYN?@9tCKbFZHqNq(wmR!RWv<(+c@W zsPrCh=YIQ|F5~}E=OOf6)o9(Ywd6fJRJWIpmzTHh@Pk2*sNcRyg6;7W4?kRu8J2PF zc+t?%a7cdr*SpEAG`)%A*A(7fRY}z~&MF$H8vMYYk<1rZDl%bR63r4crePtJ(FAUv z!NpR;5X9M9jPBy5#$t0cb%PdjB|2SGb|SQD5so6U3b0gXj#wo5;Wr_OeA2bDP%rf1 zW(W6K%79_u4m@xGJ;@HlKS~Tl_I_FV+4S|U*V4NA>BlLzgEw#b=5PnIyO+4eyi<_% zHMp~j0oG6l1<*8c?}#}JAUde|Vkgtt6hg^8=3v!Z(I!}J{l%->koOf4J*&mVX;r51 zV!awG`nw4Pn-wwn99i`_o*9;M5UEvpm0;8K24G{P>4jLo zwkN`#9$y-ZZx)xyMTiEwUbm2HqKZFdj+Qv)*LTak`={>tr;dMm5523T+f2H+4uj^% z9Bx)KE#Wxt=S6PUT!N5SpskPYo`NI08!HkrUuJG!Rf)bvZ6T3wIih}xLK^;bmiANN zVZR(j{8T!)F}Zj+ZO;$ngizpnc8i19u{|7QBm0@$X`Lsuz_htXXxPi)c2H&gEVc4X zo8M3tlOSgm%uwMo1#_an*rm<26#p~Rog9RFx-Wjr?Fx9_ol{Z*yM zwZ+;{*Y>U+aCi5Gza%`UeF=s?{eL~JknkU)L>qZaLB zempUmeDsF^q5B+qe7cb;zP5=6V%6gc_n)%=k#rr_sD_ezxpFpx$gB5>r#Xd{ooYy- z3lBr|a2G6A-#^I&ve5nS!Fr*0I`SgS*p3`YsPbBv_%vUpH_@v?py%|x0j9Wxag+Ae z!Wj%z!yY;w4!d)3wCU~^-WuyFG6sbunYhaY|5CtVo$ z#;=X+Qk?#l%6RRHVu#;+)8NwB>-w?I2y!D@&rceV z){3plsj9QMERnclD@pBFM|UG%W2 z{m|UdGFv`3ZhguT5YJYm(~0-@JK2unUzfq(LnNc=6ycl*o`r){g9PBx23v6*7>IBC zkxpuk@drYIHp_3blFCj>-t(JhV`fg&Wkn+}oFotmKOchhuG$txg^NE%O$*^hLAHf@iQ>^w=YoJD|Qm2g38)Q_e zyq6pORSj2WEa`3BqmG}7(_cyVIT!_t;o7qmrd!aX#*H4eGRL2i|8>;ZsEmG!iZ6D% z_B-yVW#$?ffLWaL-NC@Da!G@-10`sgo8@<(ol{oB-nR^+BAD@rYe*d6rW&|ok~f62 z*A-5l#C1G3N050x%Y9gw5Bua{O*>u3WFp1o6&20+V{y#J{WK6a@q}G-gTx}gD>=pB zj-I)07C`>Ri5O|lyb=sad?EHRuW@UO#}vlhOLx6jM=*I-i1YrY=9?JKNDmLdr|>81Pte~jCGBXqXP;ZMjEi(jzAa;?*Z9}_Ck`0ihOrC zrX-`AdHNmI7qkt->$OcbK=q<@&|7onl&hV@&iCO_QPz8c-j;JG>L1a<$;QNBi!M|* zam#(HJ+!A7QU(>)M|Yu?~x{{{V9`W<}x}VVJgN>vbw}c5)YS;p$hi=8dE-os9M@ z6@r9v;u-4vJaV~2^yB;)&CABX+bZK^R5`Z25OE8x^ao@?5Xl;{*8-lm%hP9t7GE@ zILgB)2?+Hi7wbv$8C|am(qq^(xx^mmvFh?O zym;%aH77rFM$N|{tor2)h*w6Zpt*YV*9?sL>XGfdr>t_V0dCa?kKXv01O=B~GNc3=fu6U;xyaDu!(h$o3vESb+OyF@SQhEP}~@Mcj;&A*6!N5%3Ijkp;wtlg>w40 zkSBJKFWqjR$S854f0XGjV;jKo;I7@PSx z(HVQU=U=a9FoYbp6MS$X=$7R^8nvf4-7F|ej!A7pgheSIu2$Y0%5$XVIH0Y*!Kt?X zLe%Pl$yR*)F*?f1Mkdw#lUlIaOJ{cg^KdqiZ+|4U(eqF7qvvtNr(Kz)vJ_uW4K8JQ zDa#SLHiP&s;;MEa)g7nWotmC&B~JIfxUiM zND_BYfmM<}qb~hmQwqw5Ted1>AWAUM*yzrx(AWVl6kPFFOzP+Q$MGi( z4SJp5rpJ4#6Mj*1PE#YVO=7=;O9YK%&kbCb%xz_HZ?+D+KYyRVcJ42G)?FJo$#=T* zd{%91>qhpvmPZukj-b1yBqD>7V{6b}TjZgd2DU~A_F-_WUpmeITQ_GikDEUI76X6A zxu3VLc7ZufMJ$dddsKO_Ko-%x#rkg8I4N;W{Ptr_bi&ZBa1ey-1zl;eblxTy zrD38jLN8XOO6_bws@FdZ`juv~|` z_+rz1-}brTyj|*WRopbax%U&CB5(8}uu8Mm{b!qb2>o>L$F>wp*noIztm&~TV~RbV zOBAG_bdKkD_9<}vwQDC}Gb~}(`^~iPa*@vp@;|C=?!}VvS(nyyI zwZ+eu?f4n94WIJx50QGIi(dn%Qgqt!{#E_-?KE409n$AcUAQok@mPo8Q7t;krM^Me zY-r+6iO5}4k1u~nk(}{;mTkjEY>6lc2jfs=@ReJ zvkrbwaw)a%+VVcyAgh?tGR8Hwf)N-=vfbFbP|=THTeZlSF<8hnUNex*;htxI)1a?P z+7R^Tv!mmbURLZ(+MCfH7KAA&DfJ7@kOK}C<&ii*cBAz+pWAWe-ofdo??UoseYfr% z92-H?9sPE?yZ?8((?|&V=u5+KBhth?r^pPy(d62%fqZ>(E~Ukms802|zxd<3rkYeF zJ*E*Tz#F|{@iGQp$E0=DL#-N|-M~?bmcF9&i?a$Lp`iPCz53=(<*x=i58G~NEN|+Y z*6c)t=X`6D94PzoevNM4MJZrpjnocWvQ*;ilLXgJHHHmSK! z{ye1*Y0dy~s)+dw!Wu<9&#SFNA6Uh1Pd%xHO)rPQ7wVeCohu`Nb|1I|qf4${SynSe z`mSkcx-2VtZuSmqc{l!0VK5Ia05z2tL(^b%>qb4#kwv52L)lb)^R=0L_FkR#r3WD8z=f;(f(6`LpcuMuEjpO*A zf!>ab)#s;mQAMfTaZi2$UWuD$BC|5>!U7AZSuG* zXkiJ4T3oH_vYtJ!o!I^5_GJ#6^LQQ$vBuMR>2JV}_dZ*+^a+_W7Ff{WAnmot?T3X* zXGpGi`|QQfy6M>czw>&I89tOPEsrBQfmvAVIt(?v-&s8UShlR!rA?Xtky7vD))`XG zYV}NTkbt^hY3`a7LLh!s87zDKbciqZYTqs_8>k)0ssx#YmN9!e9`{={g4RX3Ub{Gx zDG_HHH<4WdnI!$YWh(Z*R`Ef46D}MiYx?EBV8Xm~Wb>6)ys=;`j{8QI+#y_6IA$nF z4~Mp9#lNLr)C<)`(r_%s>TTxDtohrHI5?RDR<;exxIL_6U67UvX0m*-ul?fnRuM#& zzN)M@`+%G6??7g>x--qWAeeFP%K9i_uj`hR`-i$}$;8GHw`a`vU)&}4i~(uZ=5%Pa z=~=nRA|w3k8=;!KX)F!Tn1>Ri6cww#ehX$ZGcSANqp{LGY5zc2a?hHZkB&I+2PJ`P z&vp4VA0=JDNiIaara{LrEKB$IAtLcVIHLp`&8DqYe$-=k=_%C4XI2=uzy~cUF_Cs+VKmfmxJLBo|2d+Wt=mw34=AIKL`vq^e z)y23O;sjcR8TN1=$nOxvWmO$h?oqe8NW-BGULuP2*LPu+cmvsIRkhMSQjCAYu z%g-QcFEzsVAaZNf=5Zd<5;xq{)^D*ihJ=Lhzz9Wk0k5LR6}p>+u$NQkgFvw{&nDQP zHW_4xt{=fF=@MUy9qny9Jn1=bjo@+d;r{G$Ku;M}d=ztO5f)gRuthQr%Q+B90P5(# zKxhk0w#{TjMMWPo$@KAi=3<5(o*CnKBSLrd{_^E)j-jOb_;+tpc(`XwCEcg{tcfON z4+Y<)-=Ogi_UFiu2j^GaoqOrDC9n7Q<%K#;Dz!2dhcaEeK`Q$YDgh0Y;Xxr&wz7kK zi%(}Z{LilTizzDl(bX8*l}|x!Rfy)u<6QRkkcK$BN?)Hf9(!SVmo6p)old^gPK{a3 z!VLX-uk|^jKK_>6==*&VF(0nFxw2#mYdCM`-HhGVUmt!c7mC$$m44QG^Fc&7$kVOy z_J(KH0@-?i82LA_D0vS{&>GiTKOB+EjwbY@ZS86{nu7LD|g@GUP4)eH)Zr1`@1N{Q1f=EUOS z&A#@@-z)1i?>eFG8j@ib8-*_1K0Bo>*BgVh^90}ECF$Z!1p;NbCTKZX?Ze9iuKH^~ z-WUtS#Jrd6n})E*6LY;3@Tt3}^AtUZAIk#*&)~8ED-V& zDvyo!7*c#iIalr(M26g$FZ$uUZ+5RnuHSDCn_06@yl(Q4X!FGpiuRoWGj&}R`w_p( zL|7s*VKvw43i_o+&pMqwOECEYdt$$YSw?2levy{BM=`@WM=pNiP~yMX!Zhc%IzyG; z0L+UcSAr+Y2dM(2?{$U}#+WT%?4gy0$;CGU0eVK*mS_>wUX~qKk}Sr`sIl&8>l+;ZC)x z`7_^C-+r=x+RYze5SJ9^Osl7-oH$5v2{F@VMPNs!0E?GI2eLf75B!}JKO65bHvZWv zQp2|N=wwU%f1M)m^*IQ>KJ7VXxU9T7>U|T>&yu8>Ecu0>=YXeSEAO3XW0)(PyUD3s zdIdAv2%F_9`KLE@GCtP1ViSQ}6k}rZ+B4amiIZ#)PT2MR?1mn0<1}@?(C?LgAUYN; ze$e}vfxLRM;}bpE_Z~SxP|2a~I*7{drlqCvK1*%7Ry~uVitv~9rbQ`cGe3DCS4ieo zd0`L4BNnWq?T&|nHS0hTtcO`bWBQ&$@W;$prC5s#?*~BX<0F^S(rG~XY_^r%W_2GH zggZj}dchf|!pL4DVl>?uIW;yP44~6WE7$|kJ8#75DPdJuu+_Vet&z3O%nL8o=Phbr z8x9Ym%VM17mPcOQ*}+s7ICC=z;TL`&6Y{z^cm|)aF`}sXv<K|u)-hENcs z8&Qx_X&698si9jz0ZA3<6cL72LTQvzVnD>9ODRPV>5xW3Qba=XJEOmi>-u*8*}ay_ zU1r|rJy%D+&agJ2-{*L^HlYzR5~2DRen$V+-Ld>kBlU0YQmJY)Q;O3D>p zWQgi4{bAIGEx(YPvM)qze5xY+MnBG$)1Yw|<{623XIrRkAW zg~R3D=3|gLc@U)k#}H+9xZF2v8nX^=?_iifrTnONe6Gv=V52tv*ianFeU`TU z``Bn;mlWWsAebTxxx}>!n-0xPa=~VX*4k9`wH|)+$c>1`%a~SI5vqjre&hZ)in`H4Nm!`-ViO!Ag&Sp;p-nVpCIO&vB96FjIOwT$`rUg3TlB5ZVfC|JxBA? zd~ei?{FT+WU85HQo3Rjqi(?rllr)dPQOJo6KcFr zq`K466hO|-^4WNWaS1F&WTA!2ZKjY&uEO9R2&DEj>NJI(MvO0k`f#L;$84p^k3aim zYjen{UtkIcZbeyjjIlz6uT?J;iv9H6C)|Fo91$0KH}}rv7|CrC$Xx`2f^DQhRgKXi zk{tkFx9F~)E&p4P7QcZwl})t++wNK8g}3GMx3hIsNEH+x&cR!uFdSbU+t=gy>`~ECC!M+3K>il*mF{7T$pS{K(~2yZ6x6s ziK`CVWOmT1bD>T^_bU%Nepo>per3Hft~gvR}-y3@L)@!hVJS8ba6 z;~>W{mxzvN^6$%gnXpUU9B`s~VzemY;-0uX@p33Rt}AmkLh-pgqZcMr)n++w(B-e~ z`4?FzxDGqsZGt!cKLC6`@d_OKy%Kj&A>=cER1!JSK7>Yil&$Qz#tS7;I;=xHyStjm zWvoXS1WZRdG?RRKLG7(|1_MTwXx#d8Z^aPO8;sov6p0>z!}FSf!Gk3>7AaiDwk_6j z(c0+8q?7GP*4#>RZl`3r@47LZMQX)SQ~16ywfWh~<&x|;ULECR>s7z_nyptNGl3Vp z3l}hVZ(fo+h<|r({ye?C{TN{-X6TA0w zzBgYoLRYO0dI-Ee%o@8A6O+!iN&z|qo zd>3O|N9RyM;9T;VBe2pB^&FxkU*8GqOZo)%G!FNqNsgSd;$iiQ$LP{T z1s;B3tURqbnHs7bexjX~nc0WyLr8_o?D3{MYTwr0>}7iX!Dz>e_-6}@Uf?$pCq!{^ zm()pDD^57#Uia04l3?_UZ3^_B$|0>bmFACN6=v=C#JN{&j3TzoFnMjK+yoOWMY}PU%8sOA4h!N;-p@s zM7&~q8`o)^KaWRW9i|oWt$T*g8OT>ruPm%OieTmo@_0-=8LyU#T)k#>EOqTePF%*< zZdK*&7l|d8WD5^zs&3{#{vMt}8pviK5WoBV6of2Lu=`OP^(!0Ohw-M`ku4ll_9daj zh6%%*Fubbw7;FP8mh=`^U4UzQ=PL98$q!CSUI9xtP4uti@A$cP*LOH@^P5Dur1fvzHLLGRH{7kaT>~s3!x`} zDAjQKUKwIS(+gf?xy#SNT94gQ?7`c03xR6%nu*s?ZGj3=onu_8#jiX@u?^cT*wS+so>F}{rH1%EtxtKlzRYLL z-|D8Rs)zWR0{^-rdn4InKX-fFH}=O&gh5yZ%9U>^yJ*{Km~viGTDkwt>$~qrSR<7F z5}F}7|IY;c7e&1O7!Tn>9;$BjE<6X+y~oX>sC%(A!#Rv&1<@G0ht!ru34Wbnq{ZNZ zIHpPJC3o50EVfY(Zl-9er?KUJ!gj|H8tcC4ZVp8y}*8GpAcI0ff4AJDl8Lgqa}JqxM4xlAQx7 z(m-!?HcjY_Qe#+3b=tnZo;NQHwuiu6n50H*T~$19Nz>HB9S(b2CM*52*KD9d%<(;` z=yT@M*|R5#?mH#|CifVFQJlR*6pGi^Ym)xWmpfpz%6}}+?=~OiG0lgnmK`EO+ULdc zWf#&ZrXZJ9SMSr((uSW3HCD8#TG*McT(&*j*`|Bd+=G&AtfG=@0?ofITf^IW3%s~n zob5P=suEZZsOSR@!pkCA$h-W#r>8R}SSnMcAtq8{P+H3ot zD%qICad`xrvcr&DAawDpSMe8~Up2P+sg;ROt9-mp;2-Q~zum$kgykl9c65vvDwDav z);_#q&?ki2l52TJ;QI0ozVBm0|0%v_$wK}({Kk5(04+Lq6GNXc&*nA0>g%7^V>xa5 z*zo53ebL?V&sQC~KPjnr5OuFdJHUjlgv0A9WKZi0c@8?cK6)S28zvym3Z~N#Iym;) zC7uY=`t&3@3FcSqtDD9;5*vD!8l0rX(Cfl97gHu5wqz7yn~35*TNl(JqU z!^L9U@|mLWzcmaAxzOi}=(tDiS^vk~ymX%ceY-s6->Rdzf~P`gDTj04Ny9LMoa?EZ zJw2K~tTT@`?K1x+IM4grN;oUNa(5lT)9I)jqCmVSV;>a=dKPwCJkk;`;WV5u^6Gkn zRWC(+`7MNwBTIl=NV97)K!WxcW_N5*5&IGwVqFJ< zwXQrSIynhp;oA&2C|volj$L^f&dJHCWMyRqRuvaTY%B%HoQATYxkNV_WbPsFz*go) zd_ve+XA?CDWyHj{7-TDDZ>{b!)x0v z#vJ|fH1S)Pht&CzB2>C9?%s8_X|}gLzE;=PZ|>P*ZcTbpBX&84%VIt!?$@dZ?P{WL z_-ulQ(w&7q98-2};#KRksS9)zhF!)w1veb%(@%UN-fJgY5v0t{n%L1Pj(Ou&&Ptnb zi!hZXpSJpY&33BoJOC+g`-V;*J$lq%Z?8psnBW>-!s7e0vjK7Kwu8gsCm?@n>b=oI z`)ZJO7PJ}lGRJGIy*tnn%zn~Ed6kxYlY;%SgF?Ji6s@8beRPtFBu(E?6H?s4)DtDp zX%L~kU3iyyb`I?C!nCt=lp{KVc>#G-$8JR)@f)K+_$xNnv0D-ma$%0JH!9Vx90-bO z2xQOkB%(Q0zBpo9-GSTadJi_&?lG?~b>(UZnN%p`lhqbQ{IX5)N^|l;1jH?CbKh0Z zm3mERcW&X-3lbD0bEnUHaouM7#EtT?h0ieY{jf>z0c?DE5sM8bb>e8xK*@_ zn*}F3eX2L2JgHePkGF=$sdi4uOZkHa6{V})uGqf*c7ZFi%_fAGs~pIgnYYiVv%jju znW{Wyk@f?typ>575MR9Qhsr<4Ot>(krZ;Rufmb(GxR&(Kb>K%R_WNF;VMIwl@*;pv9!iYRb{5PCIQ3kf*MRP6d%C4A+(C9p*XE`_KBbcV@t!m8vX~yt z_=T}pVj(m?`R8c*5ESjCz94aYhL{WDwhO5_B!c;_N3$FbkV)VD%6pR4e|MFV zj4ae57r##w39LPzPr3z69Oj(arlDBf=kH=MQIAMD`OlIe)4@AOicAqK0k4w9QXv(0 zi>N272t)WU;2SpIN<;h5Hl3)wXnZlVGV0ZBfR+~LFKCz@GrsWLv1u8E%;s2Yyp+d|x|DejP5+oE5&Nrw&=3|QFWWMEFX!t zL!qL1l`#EHTsN1r)<)MTlHPF%T#}+*4N=6ZV_F5NSLLReGP>XOG6@{aVZPN;#O@vr z5S;BuKNy`?PoWMZX8uVf6W^DAt21E3`d!fcH81RQCNUbl@Wd!uui4ONx}$x zTG>IaK7e{t7e*`Dh%5dfeA3;@Kcp|oNhQbW4InXkM$C7t8Z{#CR(h%PuP^;*Xd~m_ z*dxcSd&_iu;m?T}F~*6H2y2SN+C~W_;3?4yB7I_2&PnI)xU?%1I(cWaYSR-EE zICc#&K8Cox21TW%uh48KU00=Y0OTEQ?06lQ>iJZ!b^Sb`)y7iA7 z+2qH&YjmAYb+{L9p;!_V*@!j zw)&)Zbn%yhVdKhOC)fPq!-dyjh^h4M?}?2h9nTXk{DsM$N4&BM6Ll}^OPb8*F*&^2 zK5A@FBcxu+^;~;M9Cm^6=S~o=URMUbF#N#by7Us~l7d;r z)SLKl+)XRX59%Bn+lFKs=b-dk22(>p!)69F`Eb%Ynpv=ty?Yv?6I<7}AN?4p+yrFVC-h zC~}UH03+*Cu|H=@9k?3`{?QF^D+%iEQ2t?r2w3+FOB2fZKwpX#`X&{EeZgQge_r$60-IJd zwzE?^4;4G}c5f7IRO?jH@O$GR59iEBI)`xFx}UdL{aty9cs_f=Nt%<^DFtH3f9w%- z2-hfo%t%>goG~YeI@0)=zmHH-0d~)bU_O<&raQJ-H_++>VL>`%twj_CPS<9G!S6gZ zE>0M1_PMc_uO{JhTMwgik0Ex29+I+s$bo#76=*PSm#B4{8lZijrIc@FlKGa|NylU6 z#n>BXlM1HtU~@?o96LXAx5A8i^=(iXUj-)jD=!(MOIWE<2D%0<3$Tw>#D1`2UlOxl zasd)5Yra*iz*W>kvb0&VV5|*ZlowGwvbmR^X_Z_(6_q?sj9Yi@rGT9h`9h_k2s+0e zXJ#8Q7}xmDbwu-UeN{&PN43O*d{g$h?%ocU-|B(QH}yX`aN@rHLr$Fmn|aTRi;F83 z`iuOl9UG({k^Q^~x7knY2L#Nw4GtZ1_1U&b&^Q*&MV?0ZKn7(=cU!;wH}CkkANim` zrBZW?!?@aMAzpeiXrp~Gq^oH>wWcreX#yXg2+w#xqSHIKNtWQ&c#+YY5f$g#hjKgq z-f<7HBITsLHx{ozYgjIiu7EgL-4i#ozWbVm>tyhdO4>DdwV&cx&cUDUe0Y^d6#Sjn zACoP28?VqBN$Pc#i*>D>006*q;snRDIIm*r$qiDlAk4u$R!{yjOI=pYBfslu6z|6y zdT%lU>hg(DDUSZae&f0)vdKVn*Rk$M+cRu4%#-*5t~GiObM4Xmc}^~NU*DqUz4Hvl!k@| zDF`W&MWpng9Im&LsdNCh1~P>oEraSb zEV>=`im%75&lbjDYR@uLmEZUfa~eb}Uq+&1!K;8$bG~w!nSa{$rw_+u?7P4tz}w~q zwmy9)wVwR*lA$?Mz(6r)g}Z(ZQ?R#znTi|J>C?U(i~eOB;WJuj?H~5&z|&6ZBq!A- zSy4X8BEc~?33pnQQsYAMY2wN#EZ{o@8r_nGn$s(tb(V$dp$$X$_NfHf_A)ObC>Y282h zNix*IXKakUiRVhTt{HdA++l}zhfk87znp+{(z!_yTk8&eP6D1FFXI}6_AOfA1FFzp z$zGjGK*m14PUhz|I{J4g-^*so**2fnP*<->5i0nSZAsNDU1%1G1d{)SgVu`?m>s9S zd+_~zj{c3}pCx?$r$p}Wt1dx!~y&wnkDN*^-lq$kVVt0Ol}Kg^IczU ze{Nr#!IVeC8MIYNJ^7u-C5B-EtPBgo*W@lq{MK}2uld{#9Fb~$_qh87=RIukmwR;p z7TO$DkdK$lTh-4mz;~QG5q@CXoFeclETj<}CJ&-KjE(=T; z;Bj$YTBNHH?X**1E|J%L>z8EUfx;n*vR+h}?RjYp)ltm1pB5 zkOp>zy9ns0W_Z<|tmVKk9|WFWhOpFwj=k^Z!vrb!<4{PB?z|Qp!*M zIbl6Cx6LFe5JmEhd)aZj@>3p0<^+J?R z?A!D+9Ft5DB6MfQdY^#yE$)YB)YR9hAX>9j>FibV%$tZ;6de1!q=E6bf@nYklDytX ztGVce%g&?!$+S9!y&#ai>-}7dO+?--qc`p>z6Ux5ZtRu=g5gbHp=9%}0Q1w{m}1#) zDZd#FvmtMIL7a9eYVCQ#8r;4kMAszkmvpk@bZ=cR!I!dS_tdVm=$XGXdH$PVdvHRr z`ZDQ+zLahw2c7pi8=|1JZxk2{-l8boNDk6Kv+Ow7uWL>QHmbaP4y2Kq2W;;V^_hsD zUT;r;LUtF%KeR=!^;(meJE8MTEa^LY2yrQdNpSY{bl#;~Gny3^=YhT~{e`wUK^#}$ zNDwPII5^0)+-U|vnRBvl*@<)JDL|L8IFqf5onxEK#Iy#k?9i3;o#+-m3c#^q4#S6} z=2gpE8Wfv}-~NpD|1fAT$y`w!#G#6hhLRQFi+JRkTUv^;&)@hdJaIuIwYAT<%D>o< zOV8N>WEU);`7>*4dhn*NE~`UTMa6BP+H=kSR6&)-cX{7<3@feT=XR^|)R%Z3?iF1f zw08X-a-st`~T$E+Nb8YNFz z)za4{Xblw@m`PM8|*l*Xcekg?CBIV4OBs`_Dh zeA39?K%Cm^bHhj^;=2p~^h)>3Z(r6QwunGumjd%!lISGM2l%?)zIgCwiWp*l7Po!t z!74lNOWRWBFBzpD-AY_Cc@GoK;HkU9LCPFqSHYvw^%3krT`LOZVz z)*)n4S`bY(nsff)xvMl(KxWG<$5>#u#6Y)mYdLJvU9YP$RyPIO!2s;6aUL1EM3UOt0FSP7OW(5!j`R(=WW}{`W=aEKDp{z^w~8sjjOG@N9ombho&!5_sM@O3^JFfWR@HNu9dd@wKNY!sfLFS7B*iEj_Y@XxtknIX@Ts`|=XWuKZUFOW6M~86*~32$0T8 zSe#SIBL8DwR8PG96#fs}?#_8>u-~KO*cYC^6&94fS9f9K5#c0$wmJke0|ml0v~h~& z+U9_GS`^vOAi^;SG=lZs|2$TpfB8eLW`DaZge|8*%uukWaRvSq2^EObOSj2*@rBCFE71+K{OsIvR7A#&$+hz;mObFOJx&WFDKXMl(9@F?44NTf*TMUr@NpT**q`8;*D$+TvMyaeIF zKU+Muv-tr;0aiX^77BYWV<-^P7No_wkaON!aL*)vwggkVDyfLI328Z0pA0;J96j%m zxpduN9dt#O7GI-YPVOBrtT1M8bhq>|3B~e4@bAe=3HU6D2Q1J#QR8dk-*(Z(e zEaa?3WBs4S`1TEr@a;H8&?E(q5_lR_i!iiV|E#*o%?J@G7e3`+!VZ-kfHM21hUEPYyIQx2fvRcxb?rAg6{k!EC#7$>ATtCZv6InVcC$zJ{pu@r$#=0 z_I(z2mrpHa$c$>IyK>f3XVd&A*naLfQ6*MFrLeRY(vy{zCtdvlkmzNdyE2vGKMQ_r zO+{CMMpAuf0ulokFUZR7f5qK#?hjpghzU!}%t=c|-WhujT055I9};Ycg!HC|~|+brGRe|2<(oBE_?cP@j^AAv6~ zPTs!8>>F!oev`grO(*+Yk3#aY0au~jgLRG$LRq+k@KT3VV{e7Q1neO z6LMAWP855FeMG2lfG)bzYl9*>v*0ygLzjLILY&sY)uE8q%3SgfqIp)-t3KdXYAnMW zxm{K0eFUME9a6q=2DhFrb zB$DB6E13!sE|_ytiGmq%gRoZib(6k}dE+gN_^fopbmzCy_(QIV5l!h#QdirO=jaz4 zo;4RMZ=mg32*e(L4~UN^7)Ql-ERzI0-O!oFi!Cgwq$lhoGTAL6m=zYgfkky=1 z#NM0`H?rS^?%lOlwlv#y%{T!;=rmV8z14 zFKKL)@p$bw>UE&ARO0jN$6Er1gjKkI#j(s*9|QhG3XPJD4AuJfd)rx|0tyl;w6 z!1k>**sDHsZHqco+xYg@3EaA%@!Z%Od$Bb~4+KY=C7AQ-aLUY4Z@Op4Ctt~?Bk>rK zr8LOhZvHr>`w^(TA=v)1TRQ|x3&$w>_%j`M5)%#dbo*R$;xSag%BUWQmVtzjFKtx7 z@_b0m+mx~d&P6dOrS`Vp*y$;N7ml(fQ#|-k=BwWJ5T?4Np{yj^-v`I)2ICb62 z+u{qlph&9lfjh>yC0%ES<$~0E;v5A6f3(WTMQ7ih$1$*YdZqlA5pN-R>Jkd$=gG}3FpoU(cKtP{# zFd^nvIl{ZV5=L#AuJ0rqk6EEr^r@z_Ffg)Dw>WcRs_{zf%;cdLm^K|+pZE-av!_|D zQX=s;a+-Ot3Ho-pO|}u$*p;7D52;m^cTgmTO9c;Gpp&51Bw(B8|A%e%rxvHpYCn|8 zE0^recsmDXvZU;VVIJSL$m6EQ&C1psjaTMCy5 zS1p8v#(A#2KjQL;Z}J$Ay*x&+IWGXqe%7od0ZI9cUfb5^>p6S z*8@{qcS-KsXP%z?l($f6bGSac-6zWW8RaI(sDPPML8E+gFFoSO3ohcWu_ClM$KCtAElSkoLk^Ca}FHEC9Qf|*LixcnZq_rL2#q8S5jlK^EefVW;n8| zWhqx$jSEL0y)GAwygdpAhyHX#0RmIiuhqu9v*T7Axftvu4A#BCNhA@!q1w3*nP706 z1fhFC4NkDI5Y!999%Zsxw)>HqBi~w+;ah-TI~97pt1x5-W*x2HXF%YeGXPg+mzztY zm-^?Lh03R}!G0_&uJC1KwXhC%{D0dgQNgsTn)syzO_Mk-E&R*GB$@4p5&|4huf`S$>onA48WliR{ zJB{^urHxdwpJ9Y%Ryk?*w{y%N@56uklh2pnN_g|hK``nOnjdXGr1hN$@!OV)e(!7U z-=*TdEJ58q>PY_2(_TshUyHlUA&2rr6|sZZa&z>re!$yh%3*eB>252EGY-Q#Ka-D0 z$YTyXpTaTu6g+`1!3r4nxc!fLl@7S7w?JRY(k13%6QLCp81|$oQV;#OeQh%EgW$dy z1;S>l*5c$>y>&T2O!>2=O-S!3b|nee`#&>foa}@4g!%f0Uq;L z+5iSvrU)lTj4Ri!^pgw%^8lQC zWmCX%`UeKVaMr)YlsyPukduflMXVITS0`U0b{nGQ89CDEUG_+E>zcI9_c8Mwilo~t z!6pq66JUpa3)4y|B4K|>R|+pt3apR%1Grz0rU(6fm4LLzRS~jJ_Ei_(u!UCQq2zM{GWU7b zsjb@1d6ki@iZ zx}^6JuRzJQd;QYJrHgI^axeIcWk?WRi~dH=JC|Z;@2-HN(GO)KeS%7Y&Kg3je5`S?+}4h6Z@0!27>quPkHKlaBtGfenC6WtiNb*H)yz{kj^L zTA*=*3ZeR|*1}*y=u?%SoO|kza~j`Y)%dsrfh~$h8bR=F(+s74c@dpkIqQ$|$rYI? zee+_-b!3ew;Pv#w0FI_dxCnv?BVf zBjtCdmVTGr!#RV*f8Ru}D1Toh67fI<>)YdaNw`&NiKS_22UtkCLS;%H)Yk@okpkJQ z2{p0nJHbDS*~lXXd2_pL!yy8rTW}|Km6s#VLwW*XBQeVIiKFN-nFA$Z96QxhmU7VJYEjIixD6vP-<<#ao!{; z(s^}TUhv)42}hkxLh{E4F+YPmS_Bpk$%iMmzsr+M(;*eKO?Sg9**-067|a}7WETiuG&NgYN$8j9`>N~OZz>}r z`Z&E~M>@APB4XEi!@<>Hmm@;t>FPuBvD08Ix_|bg<;6q=XXkTrn^kMrEh!55Z=A$v zFokAURrn8ccnCNnq?_(7M|%7(x18Yfd_RYoX+lncr?>G(`p|*QCvQsHv*V0IuoP1@$kl4$V?|%>q0*_* zDchUcqv9^INhR|hEHw=lX7CL?#hOzfU}~;=lGo~r?4(6}cei((MTN2kvKgXL5c{_9 zvEu~l&FES+Rmk-%oY(=r&jvu2l$~gr&f4s&W~NIH_Jy#Ui$uk#HcB-J@7i$ln193jMN^ZZ-~ zTNL(5ZNzP@Z~InFHf2SI_C@L?z%$!#5+7X{ofU95oruIjZ1Asa*G)jAY#@yH+*JHL z&*U(F_7HLvdZjZ+arl8doicbFj~Yc@!SB#cuy#Dh@5>+-k?GOc<7uRIjQ3Q@t?j}i#JQZb3>=|7j8)wK&{i1 zO+TQt&_$nqfaR36$V6Vbh0`PoSQz#MM5t?U@Pj`mA;s*`eeec9FCTOEZ=>&jeD2wZ zbFlE!9MbBmkc#@(zrTNq5KFfd5I)dwUC1(e=(KF28l0UWn#<(Fc1})d-B7npOf=_h zByj6!#DL7Qwy_@7aZ7?j#?eD}2fN%6_3UJVo^1>z_Fd%N=WiXBuM)9FT{GI#qR|Yh z|H%tS@LOGoPQ@W`;OE|Z?D8~l`XFt*m!&yD?)y3$X%t6>$*^p5LNiw}$6U0aL0a`HPa_=vJbYJ0Vjr0}tGr_QlL#*E;uiKuO@xt|=%it)|rzrzpM{&f;Q5VDp&JQI-k zJEMn)UCSS;j&_v{vN)un(%ig{`{sDj$uT>3>6<+4Y{Wb<@__^6kcG3rb2sYS4SSe~ zDIkHU@1m@W^<2N9efq=#LC*VmV8FwR-{t44V=Zd-%8m97!e93}llrz#Aucpj+<|!M z@b1v$Edk;jPC`yYi&;Kvy7Hp7ocG%_(3{j`@_F$s(^DLNC;Zipj-(b`Eg29#?^btt zn-e5kirUlV{6|6B1C(4UJ;Uk#2$9f9?2{)?U}EghJ1;AYM!QpY?sVm0xwriDL}nsk5Gf`R&~x!zy)K8mhJjz=|o+6R*rO%vw+{ft-ypVxR2L z@nPt9`Gwjt*bpQQw4uFFMyoAJNQyaLR4<}uOUDWcD+V#YJhS&ujiO=c3mtl2L~-I2 zDuzB7CsxVkW4`;4izkQlny`Xt2!(M@~Px_)lnQ8-cD;z8Ex0spBO(l@20 zwMUQ^A=v9yR#WZJ)&1H$!_y1aIHJ>RqMt@75i0@|G6`3xi4ABvFB&nz(#I4smlAp` z?Ar&ujvf}jNJ@+B5O2PHJ4uXipc3UHn^kq{mEm~!L8c%Nzi5*$7pv+p=7 zfH0-Jc4i4_)n<;Mu6Tg&qdHVCTf9(q$|?%twjN-|TF=96xC6_kok?FLP?j#%8JvFND6cLk2^McXCz$uhW ze7SriDt10KC)C|A(ljyo5(SEH6h0<#-(8A=33>$;Lbg^WH_zU`9`64hJlqUDgbWY6 z7GrCgLx0~B^}2)>Vrhm2+;&)6+3^hgxe z`9{Zu%YI;z@nU(|NoPZsA1{g+W&(b5K}CgUM^sdl`Eqwx*Xsu>JG;(zdY_yKk~=k_ zJj8{%SAgI6vh17P^J?66gFP(67x_5zocqUo<2u5vnWkR@M%BiwY>^V-36TypVO$Hp5UB=$#)y}zxKKjGs-Yy8u zm`jBZNVT@UnpVX|dYmo8J7)JKmq6?MrpI3Rk|X9ORkF5&sBI4XsvqH3Mbe)MWV;ZJ zJ=;;4b5{;E44?lxy>IL0n?AvjH3ih{q55<|T?`|$25J*9+1Z2kF^UX6FOCiO2P8(` zCp-4#Jqv?6_STydbY2&}sHyP{9~~X#Dz1sln1kQKtS)AS)aVwvZcqarX2~A$`|baD zn$ObUEzeAISM{O&*TZZiWC=AE7g(z`vK(hedh1G^ziUD093iIdM#19h4Uw%M--VAE zw6mOljbRtAA*6wMEg95!3R*^ceR-_lUw*0IPn*thaA06-dg)KP+4;i7{N@DONncQ6Af9EmZmALlG@UT5nBqf>3$Qet zI=ZE*{U)obYUkHLwATdyEmuUMKBid={qSOB2E?g$o2Li4r5rrOe{irue#?vhOi+7r$g-Qn| zoAvVb*W;JY8onC$pJ_{6Ge$)apbr<>~S~%SUGl-t@^G4R{c{ z{Uq;T%`AOnN%Cap^juK&$olwDuTsahQll%VLB2XubfRi@MN)g`r{@d(Zg=~{}g1*)s zMzWioM<^~hAp)jGMABxLpzAp>tTk6p&{}TkV8@mVh6@nlo;eK8W6NSAMJ(FJH#1T9 z{NQ(ZV3ckH%{k|!0ZpI3rpVSF+t~!I+81$NKzFJC<{ct3L%F=gMj9i4jz|c`j#}dO zw5f@mIWoSa90#ufjW7#}z-#^-m6Cpuzu@j^z|Og=5$#P$Tjw?aPqS2;uJ}SozK<9? zhe6vGG$J1nS_uW7s#cJm3l2`TtqJysaU^0qkAd@(I3nd%#Dwg0bNc-;L_G_co6&{6 zD0qeJa26y8(-BM5nsgDtM?C(vQxpfsiOaf<+#*eo>*`-tQd06_K0s}*K7K!g&r-y& zR|PdD5Qfuu)8|w;WORx2zL5&y1$St;S@Cx{{;NHQHx@^DIgK5kUif#{E&^S9OG0O) z*E5*yZoq?GXN(~t{OS9N8i)sM1IeP!hvh{%_75W*L^#E5tXcGU#uAUC`*?}xyHU4`XZ)Fk+i*y3SxuS2ozyMYB~aCnnL(TKE$g zJn_&+jx+_P`m-030mG2-wjO(Do&Zye z@=O~xb^YnzxAro95B^`$~pYUW+(e5EZKre68oUq(E;C$;=CnP}!V!aR7uK}&` z;7EWCcFrPgcD2fC-#HySJrEYc=JkvwO}_ui{tjoA4=4xEhI%Llqqm zEO26DeI4hrL))qBGSdbTU$f~JV9JL|ZMNw!|01sPG;%EA^-OA{h=1tVl{{ia)U2VsToYt= z2_fp`>Z5H@KkwQvtytOoDk{A2(kEJQCg{3HrF~X*_B2rB4_zk>1r>f$DJm|G)JHcLa<-H&LIjuxO`{b^OTAX+VnR`pe(+O?T zU#H=|F(EEWh_7S#B7}IkZ0Otjk|Rt{S|ANhyy`nUlR;vn-^%zalkZd>0>jeX-7V@2 z(I_2yuK_8rKzQVf9z5(Dy3*P&phr0&yXdT`skze}$s`%y0$zIb$ke5Q)7m8LA4&x! za+}#Q`7&b?H5sgAm)*cor=DI;fbyO)hg$8cxkh8DHVtZGLa=<4Bhovo9?~vUIK0xm zld-|6dbMLiLwV92qGY~f*KjoticaQ#OHn}ebNys=Q^EHY9Y*Zmku8pw%|FA;Uw-;c zUv9buYFG@^cQMP$%dduh6;AkC@UIu!+0=gRm}q7&=+RX&xGBJ{l};EH(|0(`|0BpZ zfSPZQ#{+qgW_Z6Dg6sRQ(&MtmXcxHk9r6<1SNHzDfXrQ7)--&5ef@JcWM5W_e-r5@ zy=|EX^bIRSz?juAZSVxvJyg`OJ+>n4&8FsZ3TT@cf}P%#AtA2*;_3iP=Fea3R&BMJ z%$y?+>kHtOj2?2VAiO#2KM?0E&f0CVKO(?if8L(^iG+MKQEz`^g&AzPW@Kc;Ucq#}Pus zs%0K%ZUO~f(hX{y+MQcas2ZW5DMdZ_Pat5M8}gOs-JKz)0Q0c`pE@ltR9KchCX5%c zb~P)?3jV~O9VOq$+ML6@dORn1xyK<7b8|@)F;0yz1QDkVm_KD&-I3?-HUBnDB>$lA zlBDvi({Oq?8)@O!yjc+mked{;QsjS=RpIZ2@bR4$bOs-8{q;kNlfi-pg^QX-MXbbc z<+?}r8e6D4EBcH#WyXa|no8CoDW!vNQ1({D;Pp7GtSagR*WcKL&`H21j2l_-QaLfB5cCFe1l5RAR*vPM-1P$%&<%eJnYV zHeF3de*VIQm@$0+jxbX>zT0~~zP_=sA+*)fLEdP3Gp;~=m-LC@}l+mv`2-No3auR=rx|fOHK$TgL6o&{33X=|qdH{{G?Wzdra&62f#E_@gaV z2?~OZb9Z1tsI8Q}qpCx8ip(4_V*J|1-7oOUg}^bGsjaQ0Bul)pbyUtznd0nyg3?)K^Z+G;AN;gCFT(YkYf;ybf_JH=heHz@L$21Y2!adcSKm zy$miv3+b5oYpC4{7hlwEm~~Z=okGhVD4&}Q!Bo~po5UNpNCN}%^A~lcsiMUsBnl%8 zJ|@(AHanp0_pT$h;;hDnIqbemJ9~3Xy7=85B3>QS?fP`Dnnt6GD!?JR!y41fgYtoA z?H}2ZuFDv|LfZ*ySLbL(mEilv3sZ>kV<`B3sZ_LoK87?*Ic`dI+VJ>3Zxtn_26aGx z)aZRk6kdsiWw^+6Or0#$M0U0#ZpY)@T#guIhf^nL!y7A-S*mk_^PdZo2yma@Wmz;r zUcC4yhdFcbgt88P@qhQj`NS-)pdp@P4gODN0H)WxSl0`&rk+hVw#8372xj};#}-BI zpi$1R`bI@g@3Q7ctEu&1=NRml++Pprq9Z-z`i6(AZ>p-cl+b@NYY$xHop*WsGfj~H zv5~%0cQIrxoY#J% zeO(7|)_pXxdtfO3MJ%@r<_;cXFvYJiTcyJy3ynGlk?)V;E#+lqI}%8K%!d`on>btbh7~J2mmn?c2V)mt4pjZYO)J$Cj3so(A&$ z6xl+lX$AMOhIpUoO>*Ssk$`s4MH#>t_12nJgnQ@$c3mtbE+>9~Hrh3Zf%qs7F1haglP+P;93oKVqDfA9wbgQ)K zsRk{ZFW&vS73t5?Qu*@D9U&nhDVP@)MKP^cpM=-2oj&!+XcZAaa;o7J2Mge%HNCq! zXy5KVlR&J&!|NVS_+mgn+QbOm;4LYEh5zW|?qB;-9Kb zesBGKb9mcC+fk;=c&!TSJ6Xap83OIIgQHCD(*M!*-SJfK@&AXEhH$L36tW^CNwSKA z>}(FQGEcI~UM&ueRAy!wm2t>8*(9s7vW~qI*^<3}ucLeK_x^t0`@8>mG;hx5{dv7# zujhK*QJjaVa*5bWW3_m0wx8_dH-}X%k2%f-7kFQgUxpzF>?!R3^lSKJ$N5{4POdm; z=EZpeh=NjKF3dmi=r2k_iW*VRwJcJ13Aa1|5jf!u#X-##W;hRZk;?BCv@x@LR~`V3+`siPIAsy9df%-IoJ>g_xbi#cKx}FOmktfq?&; z-212C5=ev(`etIWi;8G&ppccWZ+(L&Ie{4eV0_wOGkOD$E^-`E9;;frPHUusI)-ag zE8DCGp1M}R(ee;F=#*P!PAGMxQi0P@tET7|*F0_|UkGv-5nF|<3DQ@M&9J^9SnYcR+7^c{5{n%45>ow0us5f>^n z|J{Q?bfEN~=Un^9_Ug1!7rn3**+&>{7ro^(`HeB#k*QRgSa)sa0BJ`E%7r$zD*D9Y zkIfw->}cQ3xMT)VOLTIrv8htO+$yJZ6$6ixHr@+6YFR4&t7Z6m02j0%!2&xU^3u}e zHY?7QB-TDk(Hcz3x>z@(?Z=yeSE$WsGDY6yx5mBT$?BD!k;tW<*WF@%zATvuXdb{ zjeKy8F)zSb@Czo3u-bL^$O~(wU%9uu;3by9lnD9iOc! z^~qsqluAy`*R(T!_-vFPh(g|`@L$Of0?T@tt9xt?7;4NcS|N%=GFwmuApmYJ?w~Q* zL?=DnlZUo_&2eIMhy&)n1BVclwVS_aM#{^}`?3uS=cH|2z_$Uub*_Htc4imV^^H+T zYPuvMf@hJ_i3XP(SExm3bwJ(?D*$kPY*%O({ROG~gfx~yJ1v78XaCj$s9`Xfza+x8 z9Zk?_F9C3F&Lv9enuRsX*RYzYGY-uS!RPV%Bw)hYQ}yOZsmjfwu~G8C)FE}i@a^^V zx&-RKvNc=*jTqT5ZJ_{iHns+k2}{bM)ubDfyG#)2n&Dm7etuU1x)nJlzqByg7)8=& z7w)P>{Cl_bzuRx&P!cfbpa8gJND8;}kc*?^?=9%Ds%~pur;S%e>0KF=w!;}{>>Vt{ zLz&n~3d{w)ikMtgOv3NEA6jP2iQ$&5wiDFI!b8=L?>|-lIZ&zRvSaTjC8V|-Vq>8T z7?~~WQcPy980_vY>IB8@PQti~6R^!su{fuzUH7q~Rkf(LHV%H8K+%YmaTtND|<}lD|mxe|E3k&JkN9wGr z`BhBL!1wL5n*Cw|ZWl$A(X5%i*vJ|)sw_1<{bwE!DT54#5DS{6L6PU;Oh%2f&i!)m zHD$|lItkwxp@2tUwcwmM-4nXDYgH)QwrZHxA%=CgBqVSoZ)OoTxt6t*68y30jG}FK zqKSSY2&&ALR|KoYqb6tUu=>kS9%yECE|t|ABPrQ@xv8pK>SiJHX&_j_H&Si^_=8|T zGHpcUQ;Ks4zJiJcA1~Kwp*=52-JY3-J8fQ zzU&x-5rj0v7xNnxz2|xTRk5cl)y1(==!Zz;3fBcKMuc}A^U?^;i+AAhM;vKNJDwkz zNBdv5(SyC)C@yw|`ZcgoYU`)5#0_!80(JeSI8MmnT_s~^ONypkAg2Y@?vI4EiZ>F# zL)_h)61KeNQ6B=_le2;@8GYz%Yy0&OaGi<#Zng9dN63EMz>U!l8Fw}m$GvNjqop`= zF4=58HF#e4u25asg(Gi0XYy@YlaE~%5wTZ9b$4|9?uTP^MNx0cIxQ`YW5GvyKAU4l zOwp^*24~v{G*Mq)oR^oExc<^e9}&5E9q5x?oNN`mRMifloo{e(yq@eGuXfV$ z3h7R94Rk0FcH#(wSxcR^)i$z@ZL}0LK0j<`)$4aS?l1I_4Vyq>X=_i97@P9sJ)`I~ zzP~ylQ#Miq&Su*zWe9Cvk5}`9?}-ym*Ss(caz?hveXHB`kSsIwoGELTt`#l7o@{{_ z{{A@Q)cD!;9LRsRAWYmBU6Mv_H=-LlWdnlPU$}By=l%s5Ec=!faGmJ-f|yx`U3Lfk~^YtA52sQGJ9iBJuLuo7|2;)6Th=oLkQgymkN<1MRZzltqIp8lpJft%D;xH|dA9OU8~C4t&M{Nu#BhaZ_~jv!TF-Z!4K z6;#borom!xK=x7Wd6tTKfuAsms>EG6_z;T8 zomDBe3K6R6dA)uQGbMI+kni#nUr`<1SWw>j;7e>U}X{wiO0Sw&3JSIgNs=nS!rl!(f?Hk|?CN9|@Y&pD=Fj1XZs@ zjtqH{Bw~0qdmC;xNWjM zo~%_QX?twjv@8)}pfdk)1GPl<>NwJ`v#ZO^dQ0*fD0|e(R;YI_lxf>;j*NN~zb!6a zZzl=GbS;d2?oh1PM{@EjpjH!V#_O1U&xnkKuD*HwdgKDCMyIkb2+P~xCmNh=sIcT= zi@_K|0_gjbY`;otq8%Sfp!)fFY|%d(`-8BBPt4Yg_2Q8fRk^53Y`X2M+r`j&Q94)5LTW32zo1oFaK zKUrVwyL;=US$}$6jDVx`j`KplOG`Q+PN|`WCHHQo_?`gLw49a>#)=+eYo@S~=W z98^j#2K`*f`TbX#C}{HzK#6)20I%_zfK8Q`I&gF>xiERuMrH1cTY%u&Bb{K8S^cU{ zDX3+#vyR(#EAMDjynSnZW@uHs-8V)Z-Sk(F=!QU)fy=0^zkjPzr-m*DRyE*~Z?KO> zAq3%5JsKJsnhBXp;%u90Y4cjp5D2XJnC$y_S%YD_eyL2YW$7xkpGhqH*CAHDkjwhe z;p%68Svlt&dHQsf>>pusWNSYCXo{Qf2J+e^@&_rgF&W5bX@5%Wm9^8H!O7pyn}My3 z;=zQYt`{s%d5Z0to{~d~T)LDMZ{JesT>+Ks*x6Cx$Ql$sxDs81lRsl@Y}UNiYp<2H zyx>rFhL4vQBwuX&6vt;-EN7E6qY^S#jm+@dH};GB3N>^|(`U4vA`PduW11J=U4MP^ zU`c7I*jU6RR>91Jz|9yOreH>k{((To_CLSfM=*ed^)NU5#asU%2!D5CF#dhK4O~P0 z?VG2Qno_S>sWn^RiUTfS@P+`|Sr*yTKM4rQe}MI(Z>>Zw62S=P zqNjIkw=Su-fOtxOr*iyjTjYkbbJD0ob+q;&^NHZj`H z;~N8Zi1Bo-OhIU!uh-<>UOf|R?9!zd{9vr;LjmmekX8DEf0|F+5e$7!f4fDIFnBto z=~n>}#3EJ9R|_L&<`d7UD5jSfFP~-G)6#rC4H}uq7t!0@2}Kcnp+kWjb?G|y?>*e& z_3^Dpd><{FlmpK(b&Yze991C7c9#aRMLidd`uzN){TOG0-|Odb?)=oWCzBv zyunDPSxWZNmi*tUlS?-kvgN43lu2Tq!?`+~*b!SS==#0wQ0TIhk&m z4@o)~wP@>8KxC&&l+*a#3!U`NX>=^~yo{te=UNZIAoaAThe7o3up8xTx}{_5e!`LD z=6Cn|KN4;pXr{>qWbDef^D7($i9&7C02djdiV63j`2JE& zYso$hrNv_ux;idz5Y3I=Q6PqKN&*iQM2QNg39$1W^e7ej@!a!P^cqNjA$kZ}ho1zK zi(0*L$8Ap*k82dKmR@S%B?%g*rr)%8=EL9C{@JC9-%ER8L zS{F0+CNaSMEQOuADJ)sh;XU3&9J0YwwqH#7XBdFL^DjO{MS(PFmKw|Am$9kwsDeyF z6t7Eu_apjwDcaTeCkQjmJz+w?3~glk%+l%=PPkG@o2>PXS@DmS$QY;f>oo^IK+tP zoO>BXenjS_*KNHpDOK$nzaB8fjYu9RMrh8XYRqyak8h8oYU5Ack!d#mv=PvMt;VNX z@b-fS{ocoGdZUe>II<<^R%ZuJNgt!AQov5;9i|?N?xTknw&%P-*f3Y;{AOJ{qggmP_6QR8HI7q^ zTHIB1D#5y6V-Bcf&eu53O<{G2)E3`OkpuoPJ31QZr?vSy?zHCXHN2VLe8jV(0+9Ds zlAphHhza|9dD$MzYY;Rk1v}I%#g)=sQKweM$&mtwsuw=>`cTwMX}uutT`Kbd(#4_f z?)`i01mf2Fd(W0WmZ09U7_>)Gf({ zb=20UI@e|EVtl4Q`Z$5Q{K0V<(#?OH{jJS$XrkB_F5*eg`<2?E1?5N(ftS9_{{wgF zI)mtO993f;W)`Yk7HAf;RvJj$s75UCxKSbPqa&Q~YPms%GNv-5o=nCmScr54|EAym z!64|B;QvM+?81f%J(%|cl|Tf}Zc;=Y6lXiA@4753OpV4F@{1mbD;2R?paF}ch^V*0 z7elZPf?11%qNE0o&b{P4q7$Jh0L!Ugr=CA}sIu_ZZG3rmIoQ@|5i7?-s{hQEHH+pODoYEY;U3xZ17|Dx`5<)CdQgdpn#f7;d0!xu+& zQ1VCaC2#R2Oe4){0E=0Y$hPuRJ1_*PDe*6e;4ia3Dc2entbx`&UrnKRvbP7RhVt{0WJ8T4EYx$>kwjiW9W zt3P!1V4|w~gz#OYB4PW*ri>{kiBFz&)gaZNpfyTUwh!txEW2qj5zBe^qW80au5uQ% z1y@$j9$eu_HWByAtz*v5@>az((I^=?ogJ)zZ_x}o>x=dU1i}T_`=eg$(xEw{kx|q( zwn3{G@itzWne=zJtT*IjIsH%@+ZMlRE~Cg9d6m~M3GUruqIbb%~5!W zcGuvMiTF!AdJC5i)u539sZz?AQc(c0b25dyWZ?7(x#x3JQ+adC;Jmmn^Ydp9>?dc$ zx?leqQd^A^l2&!*;&ACHSerGca<|0NekQ@LF9j93{_}unk&dI*!}8Ho^mIOAxP4p= z>wbedK(^8;iagig@q4f0u(&(&m?oY5QqP?3*MpvtY2NVlLnrz0M+0t(75-8 z5cPzDZ|)`5MNjdLOFV?SH&S0c&Go6W?WzJ+Qhu7AgigQaEh6`={08KwHP;tsrlnP8 zi(An|4b1js#M&(;SzE9KS(-6BTONP9-P^EKHdMD$=Y1Z;g)+zXSWLUD7DEtGFU6)4 z3JV=t`})LZ;-e1qat9A~c6Kgj=J{U;uhM^e4#*Ui69FUkDFlvf(30#i;%G4D`M#T5AYtJ(fv_x>IAO1{z{;KZNb8|#K+s*zhAz)lhdiROx@-H&*;{!o z=UZ+_=F*u!0f;doVem#3!UH9sTG^m(*x0KIDeSKZus@~QQU0ixM>;_Y*YM$3e!=Ny ziySJMaWxNTmqOxUAiUpflo=Zvdn=zbOQkFsc^-N2QWg$~u(g0C$A3*KOQaiIYBhr1 ziEUK*#EBESuCA_9&%bkU5%>GoqJGD%%}1sjr2-_|Rw~KFFwx>K(%h2*;i{OTMJL{W z5D`D)CcWb$k`Cx(DPWM&b@R6P`2+s@5_b5=hn3i>PaH8$=C+eXQA>msRs}OZHCE&9 z*DzB^qVShSd>n(U5mA&z0U1)Xq!Mzh1bB1>*W4fCxq>FQq0`n5g#N~mK`yKW;v3k_ zA;l1Qx%p8ai#A!su^snJ18B{}QNy&~-qI$pkHt+i9PCNN{g2u+Ofkk6rzYrR8OI-Md zUHrMtWbWW7m*s`W)Rf09*7AjtBPni)GPYD#p$ej~JCCt6OFfu7cf2=>ClWX7_hcv( z>frR{qL)(M3@}@}x$ddx{rh~&(kE}JmWjOKry5FxeIFadcpSwP7i_L@=+N?n32YxN z-jC3wYyIl3lVIm!gAe5`U`sC4#}p?AlN8_Q_o~}d5HS6vaB8?1t~qrOwPp56why?^ zt!FXank*{;JPB~IneN`L|0q%yBb>i>ITHG{Eq^z{^dq4+NW=lV5kqC=kQE=l$M0*F z%Az1rO8@@3GzyF4QSW4*Y`FM5%a?6(KCw{wKp0s#1E=rgo;MHc z$!pJ3Bh{d5m&wu7;bg-{`r}Mk5FZVI|BO};06$_YkooOBSw~k&K;(v2+6{nO9i+3b zI0bwv1M1S-zXLS7qqAj9>qzojT%V@Fv;Sz78qbo>|Mj?aKSmxpi$_YCMmkhQl}{e+ z1WlSFhF>j(fr`K*{vmNg2O-F9b;GvuuASZ0i;2VX7@t#sZBc)+2%3aMp!-hZI*$)6 zWGnGP0aT>IbTx=U5USakST);OwH^-t>5cdLc z5PF%r3lMZi!Q^)PiW80fVA;AU^Lm7IuVB&`EKQd(LX-mJ*kXYn(FGe9X%j--uz zzNd}698L7RFVmS&c+#TCF694nSfGUUi3caMGTL}F!g};L-E5@hct^HDKi&ZFPEq8dzV)YA?MCjFm%8^kf|J)R$M1n)imsLMX;Rn; z3tg4VS)>BiWkR#UJM(~ao6U@ACqDNyJtswyfSn|@yGJKpUJZd7Hr4#KWUq!BK>8X z7w9PeCD(;xxYsM0GLe*?jlJIZWlF?w$Xv!1Cx^vVjqEEo)ox7V^>aU|gYxd-usK`F z*(Z&72y1!th&Zr`o`&BXxj2n=*I^Erh|0NOkatd0Q1F{UDD2Y!e%x{iDPz}m zlqw=>c*LV%6C~_f*x7lqNFn>T+pkm$*)M&fS*ueI<72z%;^^DEgSKfifS@;2h*!N_%HpW7ntol|Gg z(AqpkO3IQWQ zqrYnWCk|+q505l-knXYvwl~gVENR$EzM&h^S`;EKCIvJwML)Tw3LSOIn%qAd~1w&6^P9SIOKiXhJNVmrmvx&pPw%kN7GG=B%cd+i_T$&;&bd>uVEdu zB=UKeBjU(DeS|D(8tKG{aQs=?a!&FU2Ty46cW5G)EytjI-TZ2Q<7TzeZ8mdDi6aVN zBuWC8S*TDvpyW_aE7*Uy?u<-W~AE2n94UPAlt{ z%UNXkX=E|Qkc4%kJ>p~9@?5r6$UFugjcg6YuJsz$95I%_K-f7mMdb zT@q>~D1Ip7>sLQf%VneT-!HO;7Ol9h8xUyUsX*W6Ec3q)FdY4kJ;%`eAg)RnRN#J` zHMyrqTDug~_wne+`5H4|%Q7U~2ur$Ch2Ro@-W?-~MAnZ|iJ;0Z zQ&qQ*b!nb|K}8>@ZSn0d;$C-JZ)8XCwNkN3`r*$@Ix~Z$A#ji@JulEO_Yp4_!&uCk z+l-lN4a=7T09a5p4qW}^L(J8=hKp;30h$jd%-9aIoydsL2(ImYwovQ^0c$hILWIAs zRShv%5LPlAspUEJV<7ps4CI{N49J#D(|q%XbC9c?B@6bY`XaP?02z?E89`NR4o!~` z@57cvM`{GA92x}!?;*+aWB16A?SIG+foT;UyUq?x3mE>hO`n=08Wy& z6_;gZhIDoX;k7yKeLltS$<+*sKNJ*{eO&d?EvDv`jWKo8VvwsZCXYII{V2)#P#j=y z1Hj^|%r>c1p6AF#b`b49zkfk-n-eCRXm;K6XAUAVX%~Y9sRrmmtKW2?mo{3sjkjWj ze$+(pQG7Ny2r=c-%C=%HODnMPdFxqXZ-izJM8`&f;r}2T3uC|tP2HzrhO*!3kC&t) zX3tvEfQFySO6r}Xe1K0YONie^dD2;;1w%277|j@o8e9App1W5jdz4>+qhJx5GbwD* ze&jOPrJI+^Dsdbk#F;d1-ecrnNT-rZ_P-!hu+G0@@@^q3f$!&%4(|;mS|fp6bFJEQ z_Jakek|#2(lXG)FzURC|A6|NdC%EZe1Y}|Y8nCxvhCPq=vj@KI89BQpNEH@3q^ESc zF~}xCNHJ2fnbKc*Z2L+OtfEnU$+YA51v|tn*E(ykDeht{WZfL!5dNYad?T<2H~V(i z>NW=;{|SpyiaNSbs0(o|8n5#LO|rR#qLv#wgaEWqN3E7jvnN9cW%UX~dkP`eP~;&g zp+ZAr(>xR5;n$++^=YVS9{STC-Yqz+#}r@o=7g8p!~U+W)fMhwe${mT>Lr)aNzo_^ zfy|zG+-5_xfI~0D>4+lyGvl-|d%|EV#ZUN%4LKRU2_9HWovF&U3Iij_yxp6CKzd!t znjR;+RbjN{X3@r?7ECtw=o8iI*o#y_`5IL~_D=JDiT=HCN!w09@rw%TE4pEHKcA0S z`KwO~%ctvNUh^e_9_jibYwVfJD-COl6*#Pv8Ga!CeZ@cu_b6Rpb?jerl~ryINkJ+0 zIVtn`ZM&!b9Qcxh(Y9W=qwrB3LeSKNMz5Hi94x%ls#LrSH)yOL9^D!JrTBS5-Rzk- zs)xf=Y$q6UBH*eubaQpx;r)`chdxYC8&33#4>B(i)(8+;QvNUO0SlDcooo)X*$+q+oCCXLENyjp!f8tp^BM>F4#IJ=n~_ zCaPg?=RA520!O*djz%juoxhU1V64GltotMS(g*!kR@!!IQ5rlC zpE>gphZkN=Wzil>n?-A@NpRXTblXP59I4O{*xS8`CGtW~5z zDjv4jsqaT`X8(%kG2vIgSl9p2=f$lP^sQpVUUggi`k7x^nQMN}gnrze3kSZ5D|s_t z*AUy|)3&-DUHa)^0j2BehSNEa9czkh z68KjoY#|y{7U8&TgR9-3POn~%)y!Z-QdTj)e9#=^h@wGemtYM0!H4^>lPnV(>~%4cV_gr=x*tP2nVS^O zJ|~o1|6|cRwa?9o3f_Kk#784G&sZ$+PHruenFP-qEH5aWnV*+tLFjio6$)8#^Mk7H zQ$)_&gck*ms}x6L+}m_N=w1K5BxKQONsxM(cIAz1u%hiQh{jn?sOr2#uR#xfm~G>c zONBsCGM0Yi1N&$;;^SsaH+E76Gs}@Hyse5lr5kzx!7p6rXg;0y?NO#bRi;gKQhI7P zO4RBF2`in|=VrJ|fl&J%->Y;P0+!#KrVBoo0P0%Wp*ddc)V%9qHs_0zrGQ3BojAr) zJAnajSH(Qn;VZO@!0wN~5y23*y!fQ-@WnERlXBg~+nI&=uK(7*)4FeN*lb7qgd%sDxyCX&S zQiUg)P`~$~zSH?f_5y&fwjm42ZgXjj6X92OqSmozOb&m23 z3D&u16~DV(`o}`$5>$lpQ{w1aBU9q&=Am4q8Eiu*ftzu}>^&H4fjweW~{Nj-tkir?ZNjvgP=VOa3 ze_r1*8(Ix>ZQzY+wtI0UrSX~aqz?;nmr1Tw4NP$3Qr)FJbZttK@*0$?xbn^FfjGpr#m?$*^Ss;+-|A%vVYgl%P{?E zfNOiktplW*uwLR;bMjHl?7qcrJp6FnBT@1FbKTQV_AO4l-{uaMI(;k;xSj2mlKay; zTIOgT9=>|~Y}`F+%@^&7cDQw!GEnxnyg4W03l3L5j{%Y+Q&LjWi*=L;iCUo7nVaDH zHIsV*!YyeRx1CCjctC?qpIQ4^wLpMJBAV=@>8XC^zF5f5s=NoWIC19Iv14?Bhs6FU zXKayrHj*kV7EC#O;nWfe0)Ot~Np#=hgR0$&`So`L0Kd2j{=)axpunisr6 z{8%cc8vVkA0%}#+(Yb9jT<7UZ`Sh>w!#zT>XsMP1NtGMM0NJ zZyUV9eP82#tMrVzWVeX7$X-sK)%2$4r+lL zvC;?0cgS;Nu)bqaP911JBtq($;2cSuYduM?1=`0YA)UJwWN2$PEjI{~QJZ@Qo)21s=TBbzR^0QJ zp8QwgekVZsY;(409sboy{0|aPL*m=45-8I;da&-D%(@~BtJOiaCr_Lx@~kam5QhjQ z6Qq-xQ0XqOCSAv`Z^-1wht0!~kypRE=-p7;G_z>5{<^JyPlKD`vL3qBP?Zch1RTO@ zLJR7JTQFMu!rL@2SB9>=_kvD7#GHo^`n0r6rflvLHhM5tK=BFlKiRxfxtdM_{#xAY#oa*zeTD z+A=hE1EtR6Uwsh^0J4!I3(5WQ;I02)2XS+d3Zf2iG+*A=b?~F7#mR=x7RnlRFjPj@ z#J+rNR#fZ0#qERkrVTi)SQ)`2D>~&-%v2Z5e5+%^FCjV*{Ag# zO*M={$=p}j@xcg^U~KcvmtXH?6iZTh^dJRFO)AOS>4x^)E5hJD#ORcZ8=rJh5_CiC z8Vm&5RxUFCOjjz*Tm=M+AlaN7MggQ8$#Y1@b4&E3uU*@XP7SPr7cXsGfWn)!V{k80 zr`RV{=qSBGRA?nLV7c*+unz3W>$Q1yyD~LNmo8oMcFs!cE75;9^>dF;MV-asn*7xiK1kn;O|OHWrOHMQA|eJza^3xI8*wAo0n`yn0l# z?flKNdtyJUUT^fYG}%Y~IUly(X0*6=IxC0kyS9t`B6UWz^RR?~lZ{3EDPifGnW9T1 zDIVZp?mn&~w+d{YpWi8x#(StxH#IP-XH}Qq`<~tA) z-+aQ`H+;MM2(44!Ve$)!;o(Yt9+!OUHsPip4Rx*u3-^nbi6rM7-aVIONte&i5Q)t< zu~>do51P^o*@>gwYwkj7FEZcVfY6Rtx#&RE;qy zs7>)V6M-oEDfIACzcx36n&NqbD7LTMaU~6_+l8exCjs##Gd*KnF%Mp|^P+GJE>h}E zGTX_e&4WXFn|JXW+9XapBNY1-d{StZ%!pV+zI~q=FAR=#D_>X=4cZj6d;g z+bymn##Yj@B$4gtff`s!pP&Ba>q@{gpm3=idf-X#)zPr@BjQforFiWcmUhSX8q0ss zOH+ALdx{&pWf1=#e2Iz+=>+{a88cj=X=jbv@^}(_a_${cQK9(Jp4=c^h}QFBaB?|` z@EUolqqI$qG&xz>>|{xji|YnkDu%p1&-w8*)9}Dv!a&lQCoNpS5;M=Y9D;a}Af*QY z0T~Xhu1-R-Q%OG%`m+hzRbMq5zp`B@Sa^iR5kCFi^2J7fyUwuV`Rje~Myo5~ajuB=XbbDXl{RoRmpcNW}G zR*6Z~I!u~+TK_dSU-JR>O~Ge2(EYzmhB9Z>NObX%L_tA8ql1l25g!k$?VVa9#$gJJ zMs{0q-iD8+5~$ zEv5KFbRFj-&%~dXh`@0hex~sQ#7KiBg-}tTAGP3JUq@md(s}@{_jx|y!Cs+)d&tO} zY+|haK1{!z`;JN`i0g8=v_+3|_B$R+C0M1s&m>9>F)t}jl+FM27%VraEQ>lHyih?C znettr4Y&bj8f}yWRIondlgGbONnM1S&xZsRi6*XFW|8X4D_zpX&f6yJ!UA_$qfeyH z{{$AaN0T(H^MgSX|7y7YK3sxJd#%;Hlb6>f6LjO7h-)De9!m&QVI+QPe%^2N=i8)a zhH6bZq$93y=?Ps`HX8;5U)twQ#P2du{B;~zo2>zCKAy3vQkF9#xqr`EO$ zl=i2Y?u6;R8(z36w6^nI=}Sm7EU0;(lQVw`g&dSuZMUIfN$0=rgs*<9-?#O@~ z&|FEvHWP>?pQ4csdN8HcP>#(6O|oHTYwubQW^xCs0lJ!XE=l=ez>!d)E|t13;0Ql} zIOB9j8itbtxb^tFD;=bl0ku8rZeac}5hEP(<4jt$2W1#?P>mXI0`&ZbWz8f9c+<5`cw&q-Z zP7e8dsInpb*@i2ug5i4uZz=CT1MjFX1Xz;$PYXBh(6e3bLyOB8Cz#E=E3v_^zqSrb zy>7tpMX}T1B3AN+Qrc18`)>(WBOet`SKhr`S0-%#02Zu-YA}QzoN6I|jLnV_wr(E3 zyBx@8@z&)GUPPFMvyD6I&e(ZganAHCUhSDH{mjBL_Qq3wrx2%T5X!>uOEN*x{eCZN zEHmP2kv6Ar^*5w3%LSh=TgGj}dpOUId2~0HWCO3)=ZyL@f~J$~a_O(6Tz)V>^!lK( z@6Y!T5;2bb%uyFj+ZqemUGw(c??;M%mS0@_TdoV?Y4C@7N>}ro|GDn|r$c(@M=II7 zk{Mhy(fr0GmJD)zOpjU(yK`h-nSozM-ez?JQt0c%(kaStvFCW= zh9!1Qmu(83aUZ4}~5>)iAGJgrMRZht5kq+A99H zqberigk4+QCYsC3Rn=Qu=a$xN=I~c1I&w1VZ%vu_AyOiyXsVyl*cg5Hy`|pe@2H`Ses~N%0us-u(gAJbqNcF}4p!OReuXHZS_VX{~miW?oXUpSpkl z{!3W(`MRP)%Jp!%txxo017qx31i@A3-hxCDFt$mWO63INqu^rc(x9_!XQ<-A7l_7G z_8-)?XzWBd>Xr#0%Rh;^DLjcuoZs9H?!sXOjyxQ;4(<2^+LCEHBey|2eEI9v7)aG@ zWP~SWGoTXE&0Yst6L>@KR+T$?ZzDF0Pxq{5-rm$xi0^pASi?f2k1-fHq5IVWmuvat zPU^sZBHGpFMu~(4RbOhTMl)S&Xmhb>q&Txhcfg!WE~3x?6Q-Q00J<+!SvEC)CfRVd zAeFRnRe9=DN8ub>BQg^fGPVk!z<9^l+t4Qd*fDL=lJ*{cRYl|A*9Ancv!hiAN7NHo zO{2Q>y15w1D><~P&XMfUDH}C(k2g(S}HISL=O^vOJoqw0ZAD7|2ZJb??ZvCGD z;O}1p-7zPe_Mei{nhB&rdsfEZSnAfR2s8@SM~6Iftt|4i7wzItL8{%;0VDK%4wc() z7^2mc8@XYUbd#49-=z{OZ=r+ z-)CQFOzZQg?)*ko)%C~(D~~;=eojrr0)L#Vky-1!`_4X@xRGl1WShn!7rLtG zv!h4I{MDt_MAu>9Cm``Xr2evhNIWLe2dF*KIxiM6{#f<9X!!5J$+nD z(%!At_AQvJn=CRL-6A`N-7GJ@4D0Y9zb?tM_jq0W;V%2}@tpc7t=x=IkRuoCr6f&Z z$njBR^Hfh;z^?r`dvJ=bSxu%moF(sGTifxp)nB|DsSI{c)0tZdGuut$gys?hFH5O{_1_`E6v~p`B6P0+VI8+WyO1r&ddSP z3Y`Ve(Y4&E@f0&~;A zEl>pv-SA@)Iei1w1JVAa;4N7R8RN$fms$nwd2Mp2pX z0YIHGourXG^Q~JOC6{@aqC$TGMcQh#8#$shLZ@`l_ydzw=?qtrwYq7+I^m0yDE7KD7tu zhAUpf`0%hL9&30$HI+*W^fv@KgE0?*3Mp`-N5QjsTR`Y)#X3+V^9!x#&jV5D9pb${iXfAz6ZN>xAN0Eh&5v=>3Kior)^D<6;BB zg6ahGqbNt&h`}KQAIlo_gl5bE25jcf4_03HG-*4TYMxv4>L&M{EbCK>_5|X*d`+h2 z=!`9Xy8-2*Qm_kNmc&+Y_g}5Lnr&ypR?;HohSGioa8WJl-t|=B(#$PtWEZlP4WW<2 zdg}PxUA&Nl6*ohHmr@Uet5Z(+EDDTIuEIz$^O%F5B1`p!FPf{ROyO-~qy+Euv;onIc zYc8@7bbbNhcrTPuU?j8=I(0gvUwYB|f(KijPG)X1lKuN&6_ccFMSp`0elvOAQaQ=` zL*{c0tynn3R?Gos1UhpydG0HDhgZPXcC)T3%}Kv@@$CirU7spY@o=wdrIQcH)l&~P2tDJMtLgzmW7;KW!Ew`5TY9Yxjx%kQTl!Yf&jN__59|#p{ATv!$)kob6aG5Ky`u@@e2miXSAS1~Ug}7+& z6*5m3p(id}(96~T4XT}YvXHeR3IX>rUX+Nf8Iv^8cOfYb5%j%&HR#QszqSj6c+>2h z3F*-~1?55k^`djCgzhob#~Hs#G-3@)-T_Y17y8#-B^Bst7i4a%q(F%KypEDjMPcFU zJcpTBDP5z09-3~3<<^d9Ws*WoeKPy?nlKnu%%Vh|CEcc8W%X=b=K8D(s zivleN|9m{#1&{y4%sZZyWaa&>1yG0Ixhx#cLOL=Jie)}BS6AMTH##xTu9RMgq8Xi@ zo4W;wYrzL<7Lz>Uud4d7Y$JX*edd5wA~Lf-B0=8O;Z+g0tSuScKgsYfJQ`U(x^4l`aplxw#-3V~A@X5JTU zKe=3)adkEfwKIc&Ysgxi4>>lmxnqgCr^YVFKKc@@X(wmne)~lot@)uEwuLq~`}`Te zg6q{&HcTP7TWMOa3f1Y(h&&S}D919lES1@NdU+LI7LKlgylB%olI5`f<@6WkbRdl% z*)3VrsXqw1J-m8#=7duz;R9{c%;ruN>i4w1NG3?`B2E|Mgyv63jgKqmUhXo)3=y|9 zE{X27KV-jX#x(GRm+1JzaT=<07HlUNQ`JSrpezXgc=b{UoRKcFva;ohLR@_!;EHEN zFtc;ACJS7h80=~kQtQ%3Pq!l%F}WLS{#bgu^y2Tnm*$oj*!W?4ib(x8bpBXPkhv+6N zlZ=QHg%kW7gT`2QrF$)g1p^e+bvq@5m3uGc;Ll|MgsE^}zR3;}c$gKDUY9VoA_7lg z&lA|NQ;;6)UqFoKY6M$uO_wf)hTg`lb0fUjJPS449zevyY)I`WBrtd~=I$52Xk?1t zmQQnv99-LysO>aMUbqUCq{_eK)oEd4%)(kOuJtmrC`o>4ZTop8I~14^IwK1!5=O;t%1MkI^oc zAAPzw0*%Us%`_`dL}Z3 z#Cf8%@|V=_mz^6i=ns0Q8QOElRUxgT&OPp(*X`iyv6!xDZI@?TGOtKHL>|+BzThe* zP@7{aT-YP`u%0*K56}scAO@J|e|!G4WAv|+x8*3xhBSEydZ2h59kFgtNa}#1LAnz0 z?EL?X8=WuG^YdN$U>DZq3b8~izjL#(kupxcZ{dv)+lJ4%)h1$Z*X0von~UxdnhLC= zOJrw{Ao|EZGNGtw7ps8b3qdE)yXOZ?cy!UWDjpIU=pP*1NrXsfe3&%sf?NCqI57-h zxbw-66cGOSM*gs*hZ4vG;}wvI?mvfbgN6*oQaR_ntk8s`#82a}d6$ zJidk5u*0kKiTw;!ynzzGIx$L4Y`qNGo@5{0EOegcj*HYSY<>`}@5Pl&5Ud`DS_;)| zF(aOaE?3l}y>43|r{^O-x#M10ojzzi;N@3U2@MnZ=3 z&ri90>8W1bUcyecG542d5d?DfyT@8&Xl4jQ-g2`Lkg;MCz6+1_BfH4;dkf-8W%sEO z-QoZBmcz+-x0l^2&vEpRR^aS|YhUNx#-mpEeXIv{pWY+A-jr3$;Fw<8tkNKRFt&1X za&pZKLRfpNtUEQoy08jP>`kCr|2`65R=2BzCao7gKsa)t%=IrP*}#nWTOvm^sbx1e zD%Nm79#g&#@!q1w6>)PCV2w@S{1Y>D>p|txm*hE>K2R-qiBIBs4Hu{fHK%gDrOs$; zR_KPn%0!SywY>jmAvE84y+)@!(nZoDBke-SUEm5g2sT>i~I(H~`Z|5ralNbOz3nnzmbi_X!Gjeceq zh_#NqxvAJJ=Bo2tnbeMdqlJfi>9Ok7Ncc`IHUyshX?oT}%7wbXVA01bMrMZ}dW!9; zE0HwE50rYhFjt!}JVpMbYqgGdnNWATh@QTG=&A38`y4OlaBsbQ!prI;@b_02F#ffZ z?7OVVO^5}qiT+27&ejD)h|;L0&3%Mr`9FNU2RzmN`#*k^qRF99gpQC+g>32|E1_Yp zQ`Rv;MrPZQ(QR)ck~k!el37-=LXMFUvO~yT|Ld)Fe?Pzb@&7zLKKJOZbl&gR^}4R- z^Lag=G!@_R@W_2*>pg-*__NrYFQ70!V$^q>xlc~t@V5&zYu9pRJ5oXz=5(cC9;MePtkh6F+q-{AyZJ zI%z@3`k)mt$Lv+nsPopS;JystRUBRScd#!qmR;t+OP{s0907ri-8sT{LHy)1PlLfj z>^gO;d1H<4Z`z~80X% zr)4In7(j@Uk(*7hIA_Dm!txC<{SS*UKZLu?8DIPFMc7KuVTY;5o>{`|*=}Hnm6X?#c5Zut7vVC-U+9KfU?w zm94Ek;}-pY4TvWZ6C!9aI4?$6GH1B!4)%CZ*WFw6O}S;U zUMEq-uyTyLRkpp{l1TD>mhbu!#x%m~yUz9+C2>=o$pU*ThIS=A**^hxKsROZu0ZeG zfyI<`D8X16ySmST$=5hz(+0D`jOi_fu%C%A!;4%WqUTK=#wsty7yz+{OL?KMIcrO152Srs5#F2Bl;Vi#+J%y& zorwY@#4X#XNKEDXMO@XqIBu~Xe1(t5uEn~W+oRlX%8Nn2{nRR-Y31U%6)%d#e)yO9_KNjYLGo)H z%hCI^w7L=Kb|8~8YB-UPCn4X-}aN?N_6mgLfrEAV;EOeMdh2GOZT`wRl?Dnw_ z9HYFII(=DblZ(9f%;DX)8Ia}BJ?v_`k>=z7Ddzi-Qh)agY|#BJAr?$qPyZeE8yW|3 zW@Ld<{S(*n^oZdDt(<&D#kr$`X`ik@fnN?cEJPl?86}huk?Yp^wvOLp%)*Wj?Y?b| zw2`guP8Rak&@xhl{a~Ytgq6#h8JoIyflKQ@ufyp_y_O2#j@7jD7N!rv1}IUohbFe5 z-j=bo%|IEPW34?(K5gF=zrLJg5X`vsh*{)X_u~3p|Is3n?f8g(g|L_9O%69{zn?<_ zz5A!NBcKDeVu*O*tP+&@C1iPSWV*<_>LG=_oK`iIzrv_h_S(!o6|qWv3o6|8`h) zX;Z_i&obTr^T6~R-wuUJ++Ft9GF`AjZ_mz=a$i!&k|@gKeWIF))nrZ7*uJq*H2w4l zYj&F+_gn8lG5n+>%SR~qY2OU>$KJft_jE134L(I!z6lm7@v;j2)!it0oI2CACe|}* z2T;3=+sm0|E{-{10!+pkVozPjgvrwtYM)F@b%Dl1LV7^AH>FC+Bz5U#o|q*x?X4$cux= zVQj9?warG=vo~<%L$2Dpkm+{Jr$Y>PzkBx3=;3Sp!%|2qrM|LgBkhqT1kPZZ1k3o` zx5P^WS*$)n#;(`;8`d8k45-@k#5`4U!jytxPx`D2;r<7n;FsjyBg_>e9rsgu54Vud zja~3(rug#FMx@gbw?ta0GVAY1f1a;Fcd^@nDATcZWnUDyqF(~5&$KgsZ7bZ;Wu5wU zf+zWH4+XB5;M|f&!pr1lyf?37R`mFl3VpX{Adb$zmig(7{&iFTxzEe7NcqP6NX_}z zleXBubwsoObwp1J7`@5&1}4L;?Irpw4@nL1XdS#Qz&1cBvH#CBE-LdplovU>Kb@2< zoNTIL=FaFhcDPKOKT22%aEBx&nFbXFytfplG<5Yl*F;^ zujW_iUFVjrkv!^}q$DMWBQ~qUr_%_pw9n3V_>WRxR&F^@HFXP5@zc3oXx4YQODOYy z2p-#ojfIK&_$b#ik0TIBK(#A$Yk>IQQZ4TdKdQwhWG(QwhTo1sY+s0R5@^p##PSbd z(nR$%9fcf@lrY{O28Gc2$V3X6$PQP2@R&Vk@TU@(E+Cur-{M*?2cLtVpW36F{R>#+ zL4Du(L*&XQ8IGeK)r;$Ap}JlMHM~P8i@dITsdekq3Usa=L`g0p6n(7|3$8xBGx;ck z+tBx28%=7=yU;xVi$oo%Uvl0rb^FsObqIqi3HFi}7Z!pggO*C0;yMSD66{})OG&%0 zUFy>Hy>O;T3jBq51112_H>M&gUIaxb0i!k?K3 zK>P$c_pDJ5BgRLX`p%v($)^&bkzzAtG-zMBQ>Q)|1e~j2a)u;hk&BC4|JX=K>%*!6JL{?AGc26_AJ!dk~W^f(9IG#!3YnS!0auT>wUz~eyEG@D@-iOIqvoQ$C;V<5%XMkhdb1pTljeORR-xMCfw5F+l)0eXRs3#Urvv7PeV#+ z@pT9#H0pAPLVGe)Yywg$g<$N}kNfDb1Jiq&pS6&i`i@-N>>+pe^O}bk4s?fWZ{K5S zzqjH$t83b`;U90eF*ePGR&pVdj)38F=$Pwakg}8jG*+1*$>w0*b9a#MfeXTw8s@ZI zwLKPr%S$g#kK$CC{6{a2bT3Ayi##X{roZSYhmrgwu~?VN1bdhiU-DJ&_Lr#v4DJa+ z&c{E%NGKjg#yVHTmY4-sxF5}^G_4Ueo&CHa33{F|gyaZQygcxq*jMyvcu?x`$ylph z6ae@8-Zy(~+7T3M)Zunfb+U4EA4&>^+Faa*a_2&=-KOD!z3+e`saL5NRDQb^o4!F> zvk42ON>&ka>kt+2aHIYiLv(1zY}f*al_#iZD8_f}b2!a>dm<4A|3w69vS|sAr06M9X6tVcliAB|XWpk+#S5&1meov&+=)4rYCJ9%piibB3EPCcFa5J$dOjvxa(`M+J`Qn}3!apu0{R4U5Sv5$w@Y7G8wm;Ha*J0c?TxlEsAv<&(K?_nLhwz7g$@dcy9_eO4`aKO{lz*iAwP;^QGH8LA2CD`HhHN+NYws>`Mx#&Ua0uvV!^jurunqYq2awvQ}bIZfLbS8kR(! z?Jfp$dR7+SQgA%Bgg>2}-nB#@Q=+#C3c+i06pA8Vfradrnfp+CMzP#}AY z<;gK-;uH^fbb6#T@#{`4hvKnbV|)oES~*_Iyq%{a-NQQvL5%Sc?Y3!n5-IYCIb9C4 zaCQ)B1o5n_ZZ;}o1GF>$uv1^INrEhpt*L-J5=%p(K6m-mt5^e~3^w5K~MTbwxm<{o`lsSWW+0vm3xGiyRw zz61|LfKT#;7tJHmO4zI1k)QaWwmi76&=lbMl3Y-=!(62?2lbr}3^EPX7o+|G8YmW_ zzBS$+SNMGm{_RKLDXSw2baj?Ykzq>IlKR@}+2BAzoWGSg-p*_w#^#Y};4=2k#9|G6 zuj&CU=aRbkY_hKHqq>>4!>3Ze$PK5L_8S7d-KnzG6kqw%6l?ygpq_br5b3gaf5;OD zv#h60-B0aSvM=?j!rNPA~n}q^bjta>Q?J`^o%m?444@wXTo7?b@>t3tPvzqLs3p_ zR(SE{n(nzU0f!bqJ7`g|26gfg+N)o|4(&1?+*ro=(u_eq$(e%V>!>p^B@VImXy=5Q z4pF1N?MP`lG%xl7HWm?K&N3XB5-kbUu56}D~XrqaEsgp#mzQwY0*lB;6G8B56 z1CvCM#@5p}kN2N`q-=CBP$a;A)Q+%Zg1wREJk`i`q6z%bkE6cH@|)g#23{LCffDnj zA8bGPVR8K=VKi1Z-LFDex@E3-1ipivn8aAx@DUyE?lM?EZe*M{RK=GF1=yiXqjYP) zXqKw|a9|Z};z&uzh(K}^Q^=j^Rq&d!n{oY#o05jVx~8$MG;X`4S+&ZOw!aC6fEjO` zIydSEXXOTgR4_o3nAZMI8*|v(E_@IG_;`31E5GvN&_GnM=xkq15U3lDqM=r)h}pFus&zL{-l|G^HnvtV_0E*$#Z=0R-b#N-K} z%9I9F*p7ZwM;;fKhCk=u!Ik3AE&I3-IgY0_arHG6UvQ$>XQR)w&ThVmg#eEV*dY|p z(BTRj)6cWeklo07*RSvoIXye&u&wmB*Ipl9;CB1AA1Yh=m@G`UjnuCmtv9<-51Zb0RoWlrV(GM zLr+Yjg8GBj2W?xHk7a^)_3s>= z)D3%BjBcJ?M=q0-F?)-y{i8iD4l7`5PdcLUwV|9xKsQ*zgl-CuJ6qXKsqp>otTznL zX~or@H@~hH%c{-nm#?%0zOSgRk=;7IrmcJ=w10V^IOQl~*A_1uqSE<+-P6Qbf+%MBo;wrJ#(aaBguX;IeOV5N@!zP}9B;6Z3P@Vlc>$?Fd zIb5xq>rY-WaO|hsG}?W~v^!qoRMZc~ZSzP^T~e7^@*m?C^=&fJzu~grQzH<3bkrQM zUV5~Ya+zs%Ct(KPEDhg&7ln4Cxnrs*t;tFCEnl|b8A*RHaUOVp7~P^bWmj?{l@osp zwWIeSZ|oaA5F-Ubh?M@TrDOljOVmg55{ZwcJ)NOEt~3ca%(5~uz2=>|`Zj{TnK{9^ zFD6jyd&VL*pZDm30J)1EU#r&@FO#eRTgCvDEh^6)!Ua>_DzeF-K+8dBQ(<*<6iPl+ zYP*z#m)G^{!r<-llt|R@GqXb7a*mJu=k5-SXM1RoJ~OU<6NL)Ut~TU1)Pwy^V?-lE zVruZ2THTn!{)qAzh!lU%jO*QjGEp@v2&@v{m7YK}72k)=CoHL>Oc^6asui~lMMD6h z^OVNgbW0R|gkH|ZXGH7)~MGkCWo zhzIU|RcMY4*OuPTpt+=)!HLcHeb~CI%sy!(-jhEx>WJO>?DBZubB2d+ZtTHNs?}{~ zvoomdxd}gCdGG%HQ_sU!%2Ji8%$tP{?2b&j6n$kQdG0mnC<>Fqhy8YNjwlb#bxloA zn-rT>Jm9VaiQ~6eXQTTwawopISK@IOv(92YHL$c`<0+b>%g-g@4$cE*8=zRyK@c*X zhbE!FAKzb+1KN;hh_G>9h*nk}48CN)9H06cf8ZM#A~X}bKTOy7m8n@+;#^O&x}|wD zLTYuNg>ljuj^=%ZK2GzCunBrt?%uPOWSM!(f3`$!NKqU9h06Y!;$>_99cp_SVPg1> zH}$f>{C~*VVCCr740jUthZr>74Py;V2aAM2T2RJW=>Zh=xV|%77bnm&77I$*d61l5 zDdh?%jR`-)<|SNtTo%bex4>5KGu+UI4?WKXB`Z;iA*@tp;aSxR8{7p|frgPel{N0z zH#WW?0a7V`YE!X!YUw#?p$B!L-Xc$mH%;v652-5sRT<*^X$twEn;#kw<+aI1&5I*4 zMRw9cT}cUA1go$U*9WvLMn z@GPdOQ$3IaE9^QZCVg^TmoMQ;AU=Kjt=1Y1aQe_;_&x`6x3eTzzA(@| zh>-2lm@u}wA@c2OvnjDJkDRDIWiUqd`tU%XEz#R9-zfCi?S$j;&H?5)_4WQ7ePc}5 zXNj8g2XWb{K|Ke}i7&`gtT(gs$*?GracQ>fGc33MV_uj*UKt{|E1xOT^4rt5NQ1$+ zo;(EG4Zg6?e?LEdtq_I!aGvi;!_u5QE#>E`Xdly#lEGP^jlX{VyMKO%&dtYx@LGK7zw54)^HR!9lmXz`-vbMi8dBp8(0?=(NmoJB2|;lO zwpBCr9-n_1?lHaX7R`PK+chM-)^dCs6-uO)<(suU_wJ)|p0{jJq;5bjsH3!X-0*kbnaeEye%N@@M`YH92 zZDye#O@eg0TDjf1^2x_O8Tk`vpcjEb|4TdM%ZWGIS->K5la+(RrWEs;N~gRF!}nU| zOfI2A1Nq{718p1}${vFrrEKjZLd_MfTW!aAK_t|ZLGDZd=Qg2d@7FCx1ZIzVGiMIG zao6LE(9Qt>(^viZtFKJ&rdT~MF$1cPOuM~cPk9P68$VfFCjRom4z#Z_mIiv?vrsm; znFg7vnOu7%Bv_mDCn)6K%eg)6T3om{k_7G;FPdM&4%(>zfYf3YwFiDb&Hr;FSjj?Z z`Bal1S|^Bl4F&8%ms`~QHJdCKP=*-r%=_|`EEl~u8f*eV3WaJ$U)m2dw~-R&OpAdM z?R%mJY1(7mFZ)BkxrJ7kx^4k|!;s@50=e@bo(NM9J9|r$O0jYTTBmf2_-9fQmsCGx zVE%!*5~fUR>?s044thqHr-lcquOfu|%Qp633qSf&Kg%e|wRr^lLvf^gb$pD&4u+zo zO(;o6GtDsY7ZJ1vaSR5@LOZT7y|@?6E0~e)sxZC#A`grX*gyR0o({I^3HwaI4A%HG ztT64ye#2lGD8E0L^*xGlz?P-6w$l>ljX5V49htxSkG`8M_&n7DctxOd)A4n8XF*i4 ztQ|YQ?NN-Y>CzayE-or!ll#sdclqJ%Yh62pv7g@v4&7dX)j5?X=UaA$M5*pHEsqf6 zOr3@%k~~58<)(A!97NIH&Y+nXKeoYnyHcFuONajWwp(qe?;&Q%j0eei zN_l1}jcu;9BIO}3ev|^W^k(GdCgflhk+c(}L6^~2XP$lxtz)fW48fNv4`#^HCY(`n zj6wbgM0LxAgCh#jsh|VSCadGf2H^mog%$q)mMcrbE`8thaa-`CHZJ$d{WCB_7MyCJq8BZ*B&6Soi zL$DrJ9xBTGU_~pPPOC_J9PtJoj4#TQeOXHkTfkSI7Uu}6{QYtZb0eUcODq2$p6JpC zBmy)TcJgF1Tr~8hdPkGP#}NUfl+5$Q72Xu)6Yfv;V5X9qMmELfBQ)%Hq=oJ8HISTN zTm+(H02tTYkgT=0%@XYf3dbwBmxiC7I|fm$RT1NSb_^%1=MUi`l{I&9rK~47^UFrl zUlH&X*A;F9Pt~iMnoUpX4md33fA_s4anLiB=O@8Or%s%IMJqC27Z6@5u7~dv=H^n_ z$wSF?8$L`figA|R5cE4H*9pKT$o8L9MT4RLRBx}bkBN8&W6=@X<^d33mt)0sHMNsF zIy-9z+GI|bj@&Gfn#|YH6bRr;fk;l+DaI#e94Y>7AVdhYa<8Xx;EZ~74fTQ&m550Z!`54#h%s%574;T< zxT?qgeLu=w6vc9wxm^{t#Ei+l>J|VEL!a=xM9C8 ztSu+_-up&VA>3p>p3laanQQXG^ewoM#l2!iFRSuESFm5UKfA1KlZIK$*8Qdarcxp^ z4(t{jfN8j|ICuB%UB>DiVrw`XuCN*ZXRms558)W-n<27EGBQV>WaAxi81h5wKPh8f zQE*W_fRE*wb9Q>jOw}szH5YC+fAJq{L@s)AZ^ffIi^Tr0 zt5GyjYHTdZqGS!yGIG zmZm21B8idjgul|nt-iRh6V(g9?JM(%U`@6!I_Aswt|?==Nvg-q0{dsKdwWSbn|ph` zJcTyW^O$@Mt4u)h42C%=XSYzTE%DGS>f#?|`L*q}n>d6Mq`I{!*tR(VJ_N;1U?EoT z^p!WktDbwbG#h_95L=i4pLOF*)zj=_E~T6qavjQB8r?R(w4(YWK+L+v>FnPq>u- z@qcMRmq=|visgGwFk~ej^~w%RO-u; z5^t>^@`s7@_X*LX*CAW{Mq_TpTr@zYyD*7KC!P+1+ueDOrz8u@(4?n7@E>{^aC_M- zDm;BaFeKR~u{gPNSpLoLOFFuBtM4BB+jobafX(#=K866tOUvsO3wNlZKF=wEK7WIC zVsjUMa)*w=dblM}yl>qPCXUDy0bT9SlVeY}hlxQ#`mj_o3W)i3$XuKyJ#EtmA$Cq+ z!8CZKT(0ag z+UcCdJGTd`+(-V2T%`#U<=fc|qPxd#gLr{WnL~wMeVa#f;yU`0D(P8%?MK-vdw6Kuk-KJj8eF0he++Fy zd6nz0F2A{}LIrpHhn(pEf>ogD2Yo4?>AUoX1=S7jZrm*6se6K~TR#hGy;uOi@Kwwk ziL?irFG^lJXFKrX!@H3W{&ozq`qszxjXu+@(Rbv8&agxFC%DQAPqd!cipls(Y;E<8 z(f;sZY4v4sdN*m|DKRaHBLcfCgzYpxR*<`{smy3EaK2y~M7p{mSrw}E$fSb-ie(p$ zztE|U&J4Z98CLsk>$2eyJpwG{F&R8r;i*d55N}#A6hygV*qI0&L=faRwn&Wv-5$auoFaca!~*G4QL3kG}r^BT;!14xJR ztUWz~?VU2mK?7T=%fG>wsK`U&hgA~k!p?Ltj~4&!gr!)RBQL03`@Lyo&@`9W3LRYJ z^oUd86>vQCOe5*3Wh-^1wlS`MF;s(URu)SbP=`IKsrGY5Co>-XBoy;4u*JO8cNQb3 zIqhto7X$~2nC!ge##KBei5SKQ39EJ<_lYIB22Pyg$VCEzGbt-OaoLSt&eb4Z9ljo4 zf4tN7GGAKtdNgfaAsu>JTh%-5)hqESy3;X3Dj;J7d^qsv3uG%AG}<$4`d1JN_D0+D zOB0s$@~I#7kX6P!pNH3O{r>3wHD3zf-8xCNakn~f_erj9#QD$YnXd3G1k~Uj(~Irc zj>>*P4xS={d^`D>CubGx!bG>XwzjhQUVdwRujiaLO`{=#-IJ)MWjfHjN zPozp+KKK-Kz^1%gN^V-&FJSX_ArdJ{jC1@$Zwc{)i441-zV~$Gfn?68ZZS|q9915N zv#a*%i0;x-*H{3tZM^VaWy9ksSAZvi4BY@JZFP;1uyB_Yu>e@ihja~g<6h|J-+_0Q z{g+i9*ANc2L~d){CuXhVI;ugtciUt%{oEYVEfF`sn@DxyqQ{rum81BQrRpoLZL6yJ zf?U1B>WP^)R*d#RGDxPKg94?->ULlxnRVq^vO+sYxB) z;>(_M3KYiL(V)yo1wV&uVX9qM;f!*l8-#+^p+kKu4vl@20_%yd)(D)}dc!=GA9-s3 zb9898{j*kNc%c3w>y0>9`R^B}4xX%D+l?|d931GEp~e$N9b(jnLt&rYxNlL2BgI*J zh0TGu?%0wSe)f2cglDS%8P&2^wowO)?%>~hfYMMECot)~atigLPFV^w8=aF!Hl{*V z9hTq}gAh>bRY<_B@z*P>$1lGlM z`UD%Uz6bUF<#8d*N}%)98oA`gQVZi6U7d5OL~WDJgAv7|FOaTn6Xhja_EcZ$Cdhz6K^j2m)#vM)tNV4k zI4hrTZ+rmD)%w;r_<2tk*Uq{8*O_^>i;$=6J(oi&Wkl1cja7LboI0A)w}1csjcbTi zN)@n=y7i8>+MA<^QKNHU)2ayC9lXv;u>|!Uhlr#!Af8O?jkB$?c6%eW;tE^xFbT{= z5J+^X z6blQ>+RammBEuf2(*g{GuuxW4x2RpY?FojX*YF(Hfda)4{0#&<3YAIv_?%&$`1g{w zO5SNS{WMN?&H98ZLZI#<6=&N+u~Vl4Z2wt z_3=Eile0t_U_K^F;!V{2smK!OYu7J>IFFK2UjMW}5&qF7ZRzZhn?1HsUGJ9|ev9_b z^l_5A9C231{C>dReTaNZ)CY3c(=wX#c|Pae`)0LAx`l=Z4C^aJ0y=QR4f=vnQv7=> zGEt`1`5pxalq=~g1@OZS85e)Yh_fTL`Xwt;rB81E+W1=w@TdRyUB0)Lik~^y1Bc{U zT}1BzT! zy}s^3WblPn&1lr$-%k`{`~%pQ8zPx~b?CrcLzJV=R#MY?D#(+1vjNb7mtv;a-MqWt{g_i8#ft- zixrm?7x&BbzWOOH`EmEPJp0j_t(~N84DXH^L(?h~f62qW73>}Q^j%pxtAaE42TAwcP)bfwVYR&(;a%LIa}8nR@()CPK?Epk#reUZl)O*ytxR(_QSQFd zhcG_JskgBjR9rbZo+;eeEOIrkb9=b9!_Hfb!VH)ddd!Bo?&uG`5tcoQHg7s2s^+8_ zSR+QHdZ%yTC)@GV4@#$}=^q?zZyk(Z)ljh#?IQ69A7(I1h^KY9Y75_LzZb`b7aWbr zA<>B09Uq{59^}m$a~wE?dflZTR}h@Jzc4Sa6;c6z1cW*H!MgVx3|ygD_DeX`?N9^s zN4{xIdRyESd|J1vp~p-@NLsu2r#Lu1WvbIs!uHB3p1#Gtov5fQv^H7! z%prW0Y=e@jbjMivoL2r_3NrlByX&&7RmBDb7=obQ7KwUvBY|l3=;b zxL6~Np6YpJ5lBh!ltChchFtEr$1uIMyzvx41$JGWEotexI@9>EQEIvviGlIfFIAnQ z6|HZ-H^Db@ZdtzM9$`0O39_0l-S^PGuhcC2t@jRLtv~vRSc{-fgH!gyVD4QesxLn8 zPTDK-QYEa4Cvl`Fp+E^eQxC$@dGE~t?2p8bzDPE3?j4^6!m)(#eoE#EBf6wrq9)xc zut;@Vn#K$jy^AU&1Z-E;Jo6!oRB)lfBLw_XsE^UoTGHq-&?Ay+|BqpR1<>F&gTVEt zk}|McgYa*#;5X>`ksq1P9;Ngk^*Su>WLBDEL7ey9OV-yUzkkc<2pkWtwl(hB zrQ7ODgc_6|i8gtz{q`m2ND?RWM zroN(rH_l~3TCaO|L-!eJNpyC;V?h_o$sgB_4Lr~YR zq)As`B0*H%ZcP+2%{9!L1jAmfSC8(sbBTlOd^IYS1%SgMV6e)nk~Ds7p8e(QB5mKz zOWIl5f`$1RaY=aZ^vf(Eamz^O7ZE2vhx8`P>YOS6Df+NL>WTL;?>=98wD-xhc96S2 z=S450n)}Gt%Wc};y%Uy3tU+&EoyFD9=2ysq)mqwt=~sFH{F&6P2a33DE7wxlwfs>d z?V=odQW-l55sV>#hBH~m!T#0-RL2oCn++as&i(Ixk%eGfLwPGnQ7lZZrMWrR zWI3pW>;aM@8dgyWLy!gZ?7qK^?}KuV$2~^tISp*O3!EoD?x630NU0&O>|o?CIYZq|wM%L>>g zm&Tye!WZd3V?T>s;FIc7e^&awn@jK7o89SRfqbvPPfHK{v<6uO^2vUX#yx(74dZi~ z`i@%Y3cL)8m2>7zQ<#+22yNfYuaY_|tGQ~)7dl2^|G7eV&GYN@&OI-U54g;$xcB&0 zLCrcf11@X7WVP<2?}WX8sb~9~7)VZNES&hKs-66;6KiZT#SH!RP=E9tJt@J^JwGpPf6Bhh*4guNvMrk`WWCi^8R(>!YccSDYWp62aJ zYOBxpJ2kEsM!H9IG2yaz-pWg{8}OsmJmvRC!5|#9)eGkVVDZb7G41MMEh|5>r7Y01 z^?{tt!l<|h@wyPV>$-6oQ()KgX1)|rW=iTa;Yzomrk^<;fT+UlsEE(ZIEL-z?{{wO zZm&R%?qTEr%=Vb3xP*O zN?A&-D2_F0u*p}-(Kq+`AAdw(rgh{%Nwb1KBPKUoWcUhC_Hno4X}u9mBkk8=hacQ1 z19H3UN#sOYOGZY%@I+rH_q=sw`qL(l2*t^Ag8TcIPc$!p+3d0z(^E+c+GcNpyMw>; zZ31f_h*!EcYkFh+RJ1RG^>Wtu*dZ5rN3k~?FFhD&sdik$Ubt@NsDPzaQ0V7+m*b5YgVv#|ky9V$Tz*_>6PJ?A zi<}ydHSYCd-4vdy^rtT)9Dc()803bed93;HnH)|EYZ|%202F$9gD=yARhl=Z{B$AU z;Ot!+oiB7hV&v6foN>>W*!sQ94&D(G2JMe0lq`;5oKi^(53b59ew#(11!}EEhv!aW zfApWC3UMlw?(;vPO%5_p<5+Pr`KnmC>y5G2-LbT(ys&yiP?_G4D6=YW5sb*yzsts* z&4@pK^}2lm?i?`ko$>oW#e&%Sk^ksF`AmxrsFOva#oE}*Vk;GA(;n2U#VgSA3q`0t ziN{tKWq)L)wAf7n5bp{8vx`F7r}6T=VE2TW@f4{gm;Ma_eJDB!~IeK{||MJ~cBV#(n5UJ?=sWj5Y zesoE$y{Qr?GWb-}LO;`p4z%b?Io_jJ`Bc=M94B-`0W2iAFmhS#>@&rmC*L;fOEm|S zDL?GqKsd4I5qwu98AC)&^e5P8oBNILXfoj%S)H{{QU3gXZy!=enLk?4kga58ov3{& zVe?edGthDjPc#9q9*eK9iL;94&a{+K58unhEX$}Y?rcAgpRA-}t9yPoMko%n_16AE zwCBqmcc4ou93WuZSS9nV$Gdwrp08pOVis?tJltz}ckwVz$t&c;vsEAT@+dfo_{4D8 zpJRTaJ$|HqZ7AIG)joS4H->$psH;$+@Pcxuh%X zeVBRvCD}f^&5nT^-Px)?K`Htg9(J`ne8M8~XT!esF^~AaFFbT^>Qpt$3mM1dK4z}E z#hA%jO7FWU>RVf1>JHmzJ=Ld|t49Cd*W{_#)@uS*w{e8guKdf8{&0~uV#?|m4yw3B zFz%J*heGF5o<>%IDlP+7H~JpXU)9Akz6tKxh&dhH9`L?<=bo{~At%kqx}mRTB#zjE zfm^`;I%qG&+XgyP?HSzm`xUkdMZwmfU~JPWyCTf%d?<5RU?Falu92^=F4O!#(z#^v&yiAH94~>%hKUOee&e zlS~60#r!Lx$X&ZmdA(H`j>^qC{IU|Y>{V(x*+ojTDT0k=#UZCo2@8JB;SZ9X7w~rR zht4JB7Bz%663xm*t__#noXHFnDf0aG2xsxiU-o8?U7+KBme3>5*0Vl5CNbLa zT_?``HHPokgM-Hx7y%y}j?>ST9kuO$zl3Kf*%z#WbG3Qx`syywS@g<1Oetb+corU8 zQGfpR_X4HQv?0y~*Qo~gvu%vOYSS;;ZkfjR$y(jjIO~El+9dGd*ZJ=NNWfLkQerGF77X%O#OlYOpuX&B~rEgM4bC z%hQk`5=W4|m>&7)5Uw$!byycyPkpHjLe(r}g=0Lg^TVo=M|f zyXh>N61|(lG-V6fsqb)3?fLYWy~4Yh*=5-52WT;MDKdL}`BSJMzdAv=3;%1lGlYOjR5{dTB48I=bQvo>oUE(BKPxq{& z3s!vt_-mRFy&K(WC&}jM|6Y*X)fV()==|uTLHBTsPhF&dv$$=?cPx>`Ivay=$|fz` zu_^M1BPXOGPEL`4xH;+|1ZWiD{a9_O;@9K&$;Hsc7N=G7Fps^^qpM?mbcM33=QdAO zula3CPqO_8XW_U+U~z*18NV#ibNA1CCW6h>(Gfb{wpriSzL>k_DP@hpnX^FD%Z*DY9T*k%@G zE)7gQ99#+ng2OX&5^RYI(0l(}!_o=-3wxfIW{aNYdQ>EK6BGq8Nvo@MZA z$^?dM$*4NTjc0?WeYCVyrsSr_#>U{I#Oqk?GuS5x9{7GlgI61QJDaucP%&nbHcq3x zEAei3M24S#1&~IQFnX!hv$W_;N#`ehspQ(v6IGmva^>~yd-v`&ygU&QM_$3*Jrn%M zV(4H9s5$!7W!=U%oC??6a)NOAMdDL5=nx^R-0J8vvENhub2aKdf>ees1f6~)_4LQn z$$|7EOWd5NxsYq*V*l|pd&}YNVK1;>&GD0ZZJuUKf`{SXN`g|Le6-KMTK1zJvDBh3~p&zcrUuwSP#=C)?j$A@WE(5~=p$zA3l2 z;;!OXd}H)ur%lLANIQR|)b6ubx@6MCP~m5$xt1)U3#u7$tt_QAIK#@vfZ62c@EkR@koKc zLyY)3J>3U01y07g@1`)Kk_bbaK5u(BeZLtx0AZAG8~$b~VFiCD;zsfusQQc z$2L_)<8`<{@F}z6-TLMDcP`%+;&7Tvo0W3W=re=bNCc{_6<9I25_iAKJVL6#hVGQ@ z9UcakuSv^6uzTZK!ESBQ(!*vEqypt(ZhAV3BvRazg0asnNHPMN$F@U77+|AyZDVa& zYhrRzHYI71JHr!W(YuKm0zW0LCvt!7Wu)ms3S9BCK#%$zcYl8nRzY6)K1~5PL9z7F z;)-{yF6ZVt-=o8CQM-;Ugwfv?nY#e1dw1FX{&$<0Zgo8Jw8Yu{T@a$mU>;o{m^sy* zS6;pfI>;N8rvtnw!&=H*=$I#_NMBSFlgGgRNqmk2M=aY_XHvN>N?=YW!B2&|9_{gHw}y@{{?4Bv$|yyXd)#N6%;Ox=@ejn^4pt3aSQn)t=P>w z1cr<3<%AR~;{594;l_j(WKT8!Ve|`7l?2%4L2cXnZg@6GfOJZX3#Htuwn%9@AO>V= z@dUNZbS7<5{E3%2ITHfVvFk%kBPt}e3G0QMd(FyC)v^Z@J!OlHtRg&hok8GrIIeN{ z4PeG=_2fu))BSbd6WTb=1s>@*5jBVpwbqYr=e6{^lC9@#p7HPmetoNnF|MwBWlN0a z%-uXx?QiL)%&e@yu4T-oAbg|!dz6(dMBvx}2zIe%2cOu}L;ulBQW{2H$a6VZicmfI z&m@~~K03o+f?PV&u+`F>M_QUdC9ywk@9+NCdOPGcGEEX#knC^mPBILy*8;1NRS;Y} z!o3St$p*(dd()Pxh7ihH1AkcaU!wp1jDV3{aQ|fs3trypsNtq9s53}tTX29}<8(?t zFn71|kUD}|W5P}hl58s7K5^GtYaGjqlhS^z6Dpvvlz zr~cVqld%mY)sFR6JfRQ!hM6s^PI8aBM2h&|xbBZ@I1p?$B76GIOfYEtaK4Id4?cep zvI&gLU4Hsw&J9QDhMMalMo|6hDi-Q}oxR3}=@XJsqw@<38^nMvzGL7zF#B1cGJ!9) zd>(etG7L^#!mm4qogZl;Cptt+NOT3gQs;rfNIi9#8o>_J5=YFv2IaH(sj8Mq4&|8D zW>I0%Go>@#$!E7Z#le#B^9o}AEv$nZ^0l9i1f2fvuJ&O9j^{22=vv~52a)i+3oh<8 zso&Z6&xcn-$bV`mH95(|u&as@UYQs4E4rV~(A3ZX|Mj%pP*bdnRbj&;je?Lv8;Je5V5G$$IT zE~pC>Gl^x{S)0Z;4qpLe_EfyP=I~~9RUw8bvcy0Gn})^=O6*fz-Wmu;73k{74$l5# zcC(nkk#KkJ5RP*7#Y!92Ka8L`mmXfP4S?_IN}f^Y40LCkw+IXl=uu=zt%3srYk}r` z0D5eTBF=+o8=}7&CjZ;O^cix+(@?*who8ksx7qp3F#!R~3Vw6{h^(5usHUrr_*cj` zXERRGQ7pE_YD|9ND9GTSB_>+UGHZ+%WZChmGy7zTjNF=%`m5gt4>*$zZ7ht--4+%X zJ)l!4He4rF*909Id;3*?Qx&8j7coPw9(AFYlRVRPvg1bXwMW_9xXKk@t{cXoxf+}>PQ!7LO| zsnX03jcf4U3ToNcsa@vu+Il8<k%I+i1uGp;dg{cdK2EkUXzp zZ=Ub|)o51=WH0KvX=oq9!mpzOrCnBW zUg^05*I$ zZ;&@;a%4A-;!PGIYiq>ol!(m**huj;?z}ZUZ4bVrQ&mi=^b93o^=#BG+85^IQtys+ zlRhv)G;EAz-vw@md>PS?b0aI>UZ))+1sMV5ghgCcVJm((8&~!fIiEGpWM>U)_I0 ze#S-eoycv1rZCy)Oj-qmYq2B<*J7RvbqekfY+D@BqHx-EIO6#m9~t*;RqC)>3Gzmj zVQ@zcj=n&3^91U6m6W;vYC=dHhf|4 zmH-QR*HD3-U;$9w#1yHdKW2XjwTHoqZ)K@=GlzXMt>;=rOw(ya#@ufb+o*$=XFNG2 z65H8vDci{wrGdhG_Z^5~B8(Ygih2$ViwlJkWKi9qmiA7Ykiv>1nR`Ar&@MuhdjuZQ z85YeTF3%crmg2kNnEWVM2;~`8>|R(7rZUyx^>kS&l2Q96A@QZFj3?KF_(kXhpAug= zTM0iX8tT@IPonFt;;W@%1{LiY6PuA+*IX#`9T(Jr8(@>@Rsu|962S8xCFcgpx>*bT z=QpkB#1t$e*rJ0>3o|vzHy0%q`0HL?DUH=2zvjiOF}04)EvErr@!p~AUMWjJdYTMh zwh+my{W&RnXv8+pBt<6)5i-VlR90BE)N|xnY;cZm?C?`>zJHYX)O9@5-f~1K*3HA+ zeHv*t>rO28IP8qRNh`f~Q%28e(T5#X`E0gYk7l~O-O(p@ zYeVs0VCzpmaU>b;fdOZ1;#p0BPYKSK9Ef+hM9HP=TfpnjKPC%#tjOWQSqLP-9glp& z9WuOuKHCP{QH9Xs=!{PEi=%Xbw6Nr7EdfS5vaRPgp?XILsvzJh3>66!`pABUtRRG(Et z!N)D-VPWm)C6NIgwW&9hW}Y{8PcM`o-X8<2Li}~|`z_Ojj7q;{gXQN+T6L%@ChsM+ z%u7Y8@^p6{4Y_=iq5kIViA)AF%hA3Us^(GcVuehf*WP1Sr*hq&hTftn~#|C za9o%cgA^6h(sYyj@%+`yj}u9rap9_h30IAvDUKXEAKG9;3`up`FB`4qbo3C8#d_y9 zv1lalI5c0CGKabQg~BtWH^M!pm=yk-OS2M>@I)=%jK<2t)kw2(=CXRzNw&bh_%?hM znUzmujQ}i+g@P1y>N{1VChvyLL==GvOV6&j*tfW~KWn*7^>RS3Xxxomoo0a=Co(2& zMO(9@&~1WxN`U)o-eJ~=$mC;xzrr=5TY6-$ zAkj^qk;cAH4I1G!6?;yGy?ZM#n8Q%*N2U($&pNHAhf^fM>bvQ3N8z~>8T>1TH~|$E zHxCa5@KcWT>z0f%{n*{@J!9aUK~Hz<)t!s)J<*~AdTu!N#!Uvz)%~&L^rV*Pj4@eC z6N3EI4JGpt=O+kbAo4{ePmA@`$pdhWzQg7X#!#}AMuO4%eYO5s?X?KXjl+)aSxQ=l zg_*u3F<3EbFM+A@Gvm${0=M+Jdt*_W$&pjwOjEz4!Z)c z?L+M%JZ)JQq6^d*c9w$i$#mZcU`>6%`3>Q1-H3a6?wr-HBKJKVB&$OYKi*Hg=irRj zFl0)|?BkUMDsPmWC^F!A<;o3(6B9}*A@$2eOdsg}+YHFh;;>|j|9m$LWq=vc62ZgQ z1*P`c$$&G$^6Fdr$*;4iy*5~r&+RYQ5i zAsZpW*;GmwndwmUcfeQmznTw!@O0<%y8&IP?4YIhCAG@fr2HRUa@Qg{3%#`P>$+$! zWl3#6|)C63&ClF*D6~!pvUe zmtXk=&E+;!r8lp?I~4GRgOVokFHGFSGOX*__>`Dkszof7siQFD<6y1(KD99iMgVb8 zcz;-$3wam-y9J@@aDIHA?hktTyV*I#4S(&%OK8*|7*pMLD-U`7Zh%74kn?x#8Oor{ z^=lw#)h1!szuQv2PJy#bG_595MB#?(jjgt(+Rqmlwf0UN)zp3?RScE&5qndJBp@@b zkQrY+W8DYGj;!Qh4jHzpo-(6m$r2-zTTFX-CX*z|*!mT^DGl|@-KX$*U|MCUs#-Ug zsq;0)>oV9fDh)~==CMzd@vt9#b~)$eCw%|V+!{^z!bmFXmyuT2A{Iyy49JPYH6!=|vvt=jfecCkDdtAED< zzsM>d)<)-)=NE4rL{)Z%lWJM#?&tQHv6`Gh*Lk+USofahIeY$HG`wfvufjY3;{p8b z{!;}ap99!50druh`b!u*yncQ!fK3U$w4ebe>o6YaE|=bK0fg#1U?5o1ITeX^78Vn- zelUUn#aN?BO$)x`g&Imt!FT9;=HW48Z8U`Oqh}gyJ zOssAC@un94>g<*(!|SW4U8Y)?%ShP^a~6M;)ExpilQpk5N?rI7MsR6c$|-)?as0ay zYcTX;8H%dU_4RAB&@bfa0{5wEyShkQR5YiMeG9oS^gt)|I zzRMCpxfPlfWln=+eTJxcR+Q(L&nnBtCkpVGAw|n7PcZ8+H6%G-_m|s%-=Q4VNmc=t zrI5Ctoj+6a+Q_5OvCHwJ`v~<3#NveO!wfaw9=sOWp!hda?DIGpjg>7-uJ?2Y9$C5o zi}5ypAV9v|hcLG+B>p(?Weh4?fEN&g>RzRGrGFgMnejX^?&51r^aq<4zE!3eC9F2C zi#1NC1F!l%tyOV4zWcwp*(DFUyF?teOGayya4)BmMTbfJhoRItUO^Mv?4 zk%y3v{++Df5JE0swConqVV2g199}CEDMDN1;Q4?LIZ=t7G{0EcpIdqPjqD>lO|d=? zFG&hV4R$BjpJ6_Z)(6fqvC|#tP?r>t^K$0bd1+sH8Q(PPuhCFZA~I4k4Ldqj!|UDr zB1E2WYuxxEo(u)pm8x5WluKq(sNUmfg>hJ=V=Kq<7gSu}~x4bk7v3&I|XeEWg*mG1|))2{E}~8v3wFiFkf*|j+}U+Ee%hsR>#a(TUr|RYU=4g zgL(m90%EE+P&j+|2Aox!RHFF6d{>iBY#jS*AhX{5-NTpcv?hX#P6yqmY4bJ`tUmf@ zi;zPNRgCF{FKVy>e*j#96NQdaSW)q}q7Mr^pVW;h@B=7;lZEv5Go)7X6 z-=HHmPoU&lZim;;BrP{9KKvk*Tdc|7_WrV=g#Ps=D@K||XhMS`kL}q)(z9X=6jU$> z5iW!<@Yc#P?m+~?yf7L!FQOW2*2lMj_ox-y9QftuM+3OW~U#kYF6k@fvf8cQ@p z1`}j~IQX_xAY6XP3o&cY#bn^AWLR zjyWa3ke0Uq%ua?ca6oKwEFI2MydRet>qa@j2ki+|p;0$i*W42cjf+ym zi`$F$l?xri)g5&vLD;)@C0B-6eD#EVQ$$(YlDIJS%bhD9^)>GCEPyRMMg9XIV~LB`i}})-`RomJNuCe!ui)z|NZ#49`r@Xtqn2~Pxtk zUVyJ%Y0FLB1NGd#|JHNc{wB_gV17(RV0#+|SXgm>Scj9?2|;vOIN{#MP-7l+HMA-t z&8gl>T@CH?D{oxjRmLFy?Xdq=F9<$h>|S!g5A3WLVK;h7r}vnSnp^_G?;fMG?0aaU z-XOypx0dp0%Xor=g7ZWrO(^w*O9W9RtyTjKaQjH5w!RVBrIN3V6|C}rmIo18Q zK}``3GyVnK2q(tkDkIGV)CxEVEkzBjqfiCO!}YpW^7q{&E+$k$4s)Fo>=T^J{}lOD zV? z>6M}m#%pxhgM|S7mBj-%N@#-tab5@gt_LiVDyz{4-I_U7nPObNvpSLJH=H$T_`lYKcY_h zrX}S&WsJtjr!qC~DYO+}-W@(DS(TRL!8W-HTP#uuYgo+2CBvF%DG_DmrMc~BuiNSP z=^j$*g&63=7t<(fDIgDhVQ}(E7+84r_`m;d5ku&~MSRVvRTFTN7Hhvh2wL1UkcKaQ z%#sJs7oX_s!w;L((a6x~zFowDKbQPhh=#@l!t%2w*6|&`<3d0I^0ga1UP$-+dIdrOfm9h0KJ$#A+YO&JdN|B74w%G{$fW?)JMhWr3+;C3748} zcnK|l2_}AR0m#r&Y7yg>j@viEexo0T{KaVJSQf|IFyya+!ksp>zU8h{lqtzbzGv4q z*#eEhaGSj`x6hf40<0}rNAjVVTaG4v=fP1D2v22$<4!qb6mRT7O=0=IXJTCYAK<~s z;$KSX_^|x;#m7=t8C#VoYa{$&S74i!8b(2DLWV9dPbd3F{mq?#RUj}mllgLcDmigG zIkw!S8>l}`sbW^m4(!(^bb7%GDB1|1rfd@vSz6dV0s%NldX81%Pqp*wzQ58#pcTDD zo{#@&NC$yVQ@6w>ejjPTncp6c6y$sx@Hl!A0@vyRY(`St%x_q=~mVE&-xhGk`|2!anC z+AnNDXMhYV;wg`c-M7)3IyyfT8R4E^nVWqa_V?_vkCwAbnTrbK!DbQMKe0Q+(jh2% zHlzS`#!N6Fr@Ww7$`_;Unulix7ijdXQvSllNRcew8G)_zTUlBjDbgJOHxL8y>p7?S z4zY~%SO*@t8OdaMj5;Z8Y4SO9`Kidr;3r^u@jZ7wmxD@ZYzGshH{)fk*mCjg32h84 z81i$-WdZ?Xw^Zq4-;MQ_F9xem+w*3V90Omdq}-MV41CyDH-8`U%+DduT=WC$e=5x1 z&m0(*b$Thqn>nO`lqzvk|tW;odQgbTiq1T^^2B9^4!W7Oa0uUX50MYy8 z{=AB_GRO>r;oXOleOnIM$1@kMjr6Z_n~&50Izozx0k62dFb?Hem_0&Qb>H>tH(U`_ zfj2|x?qbq%FTi632o}Qa%6uf4SneF{`PSWskML}`*7x@! zY_gX`t-soHh{r@*E%h~p#oGx`QT$nwdiT^LOBbE@A9 zQ7vQ4$K-oj$Ru%tw_sy)C_Ly{OA1btS$T%5(deV0>6FPHPq4G^y*VB+jFizhHleOt zUt;#* zhemx#ja;n&rG*~yZfaCMc6<>L^L*qw8U z|76tQQZ+1^V6!{i0n6|<8d`K8k?IaX#dw!cb!Q-WAd6#IM~o`p%0Tl35gj}k%r%F{ z{qMv3_rr&+uF9U*P{7TZktY4Ti;8B%$Z;HHm#)1lj=vPFEBhgcqcb?6eN}6F=)3pp zdm5TxItpO&_Mpz61`6;huF@my>`vq-Z}n(X$pQ$G4w=P#f4J3f|ExN#Y2nxCQuJI| z_`9I)M-n)QCWMZ0_7U=g$rV|x+LolC8+{d?AzticAZzKQ$k~QzVupZ@rGIz+_*Ug# zDH7qlP;bzf1%u1v^Gf!@Nr0_K&a$Wi(PmGgr(9R@njOwZLg8 zZx$idJ?J7WW;!rj4h+h-JAg)Rv)SZ2hg=EA`SDLzbG|@D^gp(pQk;H5l(xUx?Uy)b|SjlH__|)28ob4EyY{~GVdAU z!!Jur$1Qjg-CncKIc_KMX&}1PbaCiHs~&$--l`kzU1Bu>c`4d*(au<=1w{$x^#_D82_mBv;e84!-vj)Z1^zKjm;a=o*ggq&fQl zX{b*BjZb+9wAH_kF0hV8R=(o|DBxWM6T;W*2Y@6AiY5-H*G{n&X z<==ZnNVAB_rB!fQ0ThpyMPWM4hxq|~NnqIkhhIXv5kmcGPwz+HF+ctiGZhmSb~FU$ zYU~-mr|Sn$Q_1rwNeL?2MZC+hN|Pe*=%JK8ik{tZipr{co+<6L$V(fxL&0Azpq-VS z9jkIwYi|$k!4w_a@n)TzcwpLZSoy>AvyTsfF*H7w@1YGb`e=qj`*whXu=rjZTN`~7 zf%)B*?k)TB<1b?5%Rwx@FW-ptqS_>#=w;GEdOTq3982f{u%DZ1nN!c*;)<|mTvH7r z_WaNHEsebIaY_cYOUh|qd31aB8ba{V=Xl?{dap9ElXNG2zjCX@T{{pTJ{wrdCDe!2 zLm5+@wA%vIc?krc)|`2OhfvkYT0;g!R3RR8-OcEe|RrJVUM z7a(sw?OFW1=E4eAih5-j7&84HN5Q|dh5umqruAhm-IH3H_;msF%C*eta&d<~AM4p` za(pzXm=>lO?+^@SEt5a2?@TQ@)LHRb7n0w6b@%=|G?V{9^CzlIA!7g;eyx6aI3n^c zKzcRky09t0CrbM8MDrNBB&pJC$}VDg3ibm@mq2bhTKjIz9ykTxedF-AFFuGXlWO`~ z5tC*^^v&I0xJOJOPC4bLN@soaCvWKHqR?rw&oNJjZ#2YwRBQL$#h#0SN9YbBm`WtY zl{Gm1K+K|N!Km;YCwhO_`KY9e=oglbx*PxDYR;IPHNdAXSwP20%&E9@{siu=jajI8 zi11u=<<2S`%`eU`pXEpbM*Byj3c2@9q7PX6@MZMUw%7Q=81F=^J=>{CD@ryTRo)CO zuU!l*F+Pkm(z|#A-^~DEw8{V{iAwr@3c+6nXQA#>Jm`_`Bbn$IPXL^xYP5I{nv124jmw=L~_S*^{>-#xT<*0Q3=k+w(Y| ze3235si#ga+S?g+HZ4GDo|?2z{UuXOyPkr6(@668ceU&Uk-5}9NqJjP81>K_WATfc zA1b>!so=Z7bIk|6a)mZTb=8vT|C$p0ugO}xW><#vs~pVRT?{GRLEc|JI~?MA zsY}I{>ss{1o*UWe5g0G>B!7jHD$P{)-D-_xd;F+A8&2~n8A>X%5j_va#dFtTQ^DMa zf51!jL(=kH3%MZn+5dwDnmOGKPQNlIlADkP!CII!(6#x30cVQ|1K*@%f8+OCA|uO& z=MsRK;Rz*hJ6>X-Rz|gh zQ;v*`fon13VM%08gf^dIRBFn~d} zhK%g9@)diGz;F-0-%KXwit)|kcP_}3W~G`#RJuU>W2)iii*^aa_CzpPXz3C%1PGEp zEJI6+dY|t_UeP)`=7%Sa((uU-*8TO7w((3bZZY0d>bvf+LpqW|(or2=wiVLRk0T4L zEzakz{wE#zAnC}*MKu0+UQurcp1AHd?^mf?CYr(= zZ2G=b_KY~Se<-(ab#)aCAt_(S#x5cASc!ayY}&omDQ7KO#+!gi5CYmY98Cpx$fA9@sMzJcR!rWeC(7-#mz4O`#%!EyZF-cv9S1ol;AiB2|~vX_<38( z3jmX3=u7F7syn$wP6rqIb9Z|o`za?1>JE2{fOTAIQtzg_#Yo-C>gr|>tY5TX!r2`^ zJy?77*~JUkfg;^DBO}3tZj!;tsDW-hC%T;h0!R}oAnn8tFX4B5`T60R3X;Io)LjrZ z&aBMTq0#9Zz?fxMhNvS31{^k!2ndk#r+C66{vl3~U^ zIWZxY7rb>g+M}cnUF5>zJ-&6AG`ys&z2*;-7rW=9TNz1RfihPwy6(C*b7i%cP*5j3 z&?%?&xD>`|1HNM57hGuK{lCV7Kby9qAiyZ59?1Hx!f)!;WOskqVLP``@wL~1qdXFW zfSFejQa+sR&b?C@DUyC(-nDr@de#8D9o1QjEtj53(y`^U+_?I#5EvBp$^{0wX)kKA z6Pr94a)*fBB}yg*=E`-TuTJ462dcPOM2rGu`8oRqn8RmMncFYXkidQ6Jk(a}$q0d?<^ZXiSmO zP8@-ex)m}Azznr-pbd`{Cq*pQnlwLh)hJI{j)k_Re6tlt$;A@x zSwuGOvd0X4dj_E6_xAAoqh2=k8OHc2lh4ccN~aB}8kOG%rRdZVG9GW>ap(VuC*C@X zN1D__QA|lZ6A~9K<)QRSE?fb58<434XVZb%By{sftEz;aU!iaJgTr6&CKTuyrkSLk zTp4PXjypjYxDKGX9C7TCNz_U5mf(%Y!!dq!%BNh!Ue$LxHH7<|WRv5XO2ST4l@kY z)oEU4fi8P;o{OnerV?V@+yY3*2;`-ViDIksOBFRb(Hj^Gm9LfWq=uqo^>=iRFP zyWB5LI?E=U<&_TO%)NQQpA2qMg zWmZ3%CEj ztJX9Z0Hd_g13iaDjRg~z1Hl}^*BogR#rR9W7TwVpcs-U)kI2}GwmnVZNSEjcZ zrYfa!piNjJ_i|NLaJlK~mT`T8dFy)?C*ipeU|_kv(1r)R@p*AQT>`yC2U~Qq^~$Ob zKeB|uuzwOS)=}y%pYxemmq7C%gl>vj)M+GHo%C3j{2ob=EMexmPpVzdFDYrK=9n$d z4i}1pzlAEbgK4T-5Is{swO^o49HvZi7~O5RgLX~YIXx+Qe{0?Nr`Gzpp*;;M1IeL) z0yR=bm+gK}ISfg*rEhvbqhcm@=Ey;sCrk*s@ZpB}$^QjiK!kO=I2AFD+x@r*5*klq zID> zlbD?b|ECbBJDPXNOxY4<3`VpCigg)jRQ2ETds_`ThM04jM|_=2w_0y*p$LU$)K|*b z4xcgOz#3YRs}-!+aq?eQK%Uo0G!j%8TNFSeI+5OY*gMdhT|){8R<3GGEO5|U13Mli z4F>@);di^chFpm?m(xin{^`Jpwlpu{fx(2L3Fv;`$5~e(bMy&>a5+h^zW-FL`T4>^ zZn4R6G}y&?>rVcYlRQ+N(m0$4EKC2q%WnGm?S()JcZ?vc)ng}mlvD`ZZO)JBln(=w zHFD)qmo3q_*@a-8AK{f}Hlq8C&ndIk)x%?kjV3_G%<64}3=F5vzFL2^J_x)tq04qX z>ps&kw4ci$tAX*o~hH`bBgE> z)#=$~TIt7tlIp{*HX$sas+TY9*Y0Ss;|{s;AJY1YsF4s|b*wY?67!!}{X6pPMTQWz zCuz1A>o@bez680qsXQ!p7o>4mOpA2nYw39FTP8B1J3YxwoZpS=Jd@DaYkE9h#csr% zlzMdmB5xlCBV=ajVcmWZMe4z>R$^V_bzFeEsRat4Y_KP4u(UddUk_qdT`ULtGCOhb zfx1nZBs&6|$i3Q3CIs63X8y&!8=ISI-qEew$a#7m2A#>Yt6sR_TLs><`z*?cFdNe? zX5g~5h&c5u8c0!8lH0_U(TuyxQ4RU3m_8fgy7Mfoas=Ck!4_2O1g#6ngMoz&S9R~q zy>~L1(PDEx=rVWcQ@HYKG5B>8wDU4MDG!V)YC2Vn0PA2}j`XiK5^Mp<{#Q&9O#bHp zNRbwkF>B>U&%&;!aj(=Ux71IKSAD1Db)hwt$pyZuv6vmmojI*IpRfdfx%=9Kd*A9Z z=*IH(j`t2upV-3I*Aq!Dqwbm?y;D@`wkT%Ekk-0C3=~;Ex%EHC@INm>fIC!sk2vG9 z9Jn9@^pjF=#e4^&tPvn4r6zfz#5Muo?*z7Z8GYZQ+~C3$)0a0o830c0gqv}o(8Tp| zF|lGzGLaBtFF*39BW+G8uz{^c zouxD|GPO=?`7nh5lD*);@M!V-A%s~F?A=Dc(Pk#G>~q^87|NCGVLqZS>ViyEN^ZdN z^ib@&AXLj{z<%XL9~Ilzq`^@aQ@l{p^rQQ2NAS55LP85p84o0*c+y+);cnJi&Z!UI zEU5xIn{;{-R}K&FY#nRxmuV*sTHtBt*P1(Bob?>8y(Qz+6q*PYHyIC_C~N-^WF^PO zzZ>ExTI{y6oQgi|H>FogmT1Y{yaGUi% zS@quu!)ab<=|3hK5sSSO9D_{lPVw0yC67}67I$XWg3}A=S?oAJ;F1eAH4{l*z8xS3 ze;u;NfUcay0^fk#nT7iv6+(98DBjua?7%G5s?SzIB3-wl zf~PnKH0&2VeP@vwr~d4{Ma2YVLdJ31!=W-@ct#`%Ifpoq4EXe%(~Z;p(fdHGX<^CN z=jl1T(TjGz?f)4}mwm-mb~(esW#JqzMsTbUMN8M_%pqE?L!W~YMxQuBA@O|@P z&S6Fv`-*PkqET#4T~fioQqox_xL2DIgG=e!%yRNE8ELtlV7;RlHbZs-6Sz}sc0FJ5 z(T^)+jd9@0fx-D54LLxwO*a)ueZD)j75+zHd*%A`RU2o<$zZ-Z2z;vg$m*j1&XO0p1M_p5}Y zRzbu22J8{%-;T_BeN#A!-n1e1rW{OMNRB1IIA}sVt-bR2^g#P6F89-WrsomKPS+1# zv6Lzoo7>Q4mdcthFd{x{A=OAGBwFTOh7-I7wkBJ&^^Py6{)P9n@2TFoj4GECg*o4yVug}dE7wWrd=(S1+gXC zc{p&#o(*5HGZ8{(q+2n2L&>zuEjS+r!U}c^lb!bRsj-x&K$o^6R#vP4ZNH_=yWy>SdS++Ar2#ZSpyoyfbnqR2n$s878)#>18zEw~sAF?!JkV^}j<6_;&1lB(HdR z@%lP}qF*G&G=*>d?Sr#OR!=-f0}L(+kLDDq&5M|e6)%tqU)qjeO<;7}ppm_b5!yKg z>^yX>m-r9RU^U^!-s4t2_op8CT^Lxi!8yVPJHKH98bT?7qT#Mrb6GOKczV&iS7kXh z)$?UJFofWSh~eg}iQUZq!k#qRgz#=R#@qkA*Y=6u%51Pcm9upl~gJH~(=13W6`wdPWhcG6u5RF(&aPsi} z9Pwr4enFW>!_pZSjym6BR;jE#mhJ;QC6*(on5$ap>P%{In#wvHPEwRD_JJ1sncdZ* z=HQF1SoRj>QD;j(d}PZfI>n2%nYFu=f;rB<$b;%+N3{IPSn#_Q*^s1`Me~092sZcC z`fCeDI0{$Y8`kX#}#YqmefRg7<{h4I zFJIgX1{>Aa{hoE*O4=8mF*Y*3sjt>#W?53;n?EkMg%@2R1uu`v6@PML$8a{cpd>O4QF@>gh`nU`pja>~x4dzrCEdU4UHl1E zx-g(%gYs1B+>*$zL4AESFtB%_D&LjQ__>(gM^x|I#biPbZWb0akQy0Vdu;|{i?I5+ z2CW5t>s}IzI8u+9c1WpzhSA`ayh2-%;(rtkcX->&Pxl9X^x$Y%xZn{jyf+B<2ctUNEU30JMneyvqbfdL({s))+6m%6|u zAdVTezi`y0lQTQDw{RRmjszl&^yu|2}08@d@;nlgsL+*;Ct?(Jy>RPb?nUFf-;oAjPMC|EP)7tb7Yx`Pj45dJcM$a4j|hM#h&|7-Gd zXW`L{%}0hO#_Of&>su=2v`*UAN?KtVfYRF)fvZ67oUr9KFhml%9;`*oiET*jcxgr5 zvwN#>a^(pr%=b?gv5ezcPs~wPVoZF4v|VpvL)!i)5&Vf9a>(YHN1@LNt*;H$oe$aL z{wAC+-RFfjh_scqMO*m)BUO@7 z<(Q#C%+T9?+=83FcHmGLN^s%%KIr~zctDt6w$u09rF4ZSMct=`g`Ypp&v(Ghcm6TH z!bddd|8tT)Bb)0A$|vPGdI$6h`xgoIotG2hSJ-$3p|S8rcX4+HUG+vgUjZt<2bb@U zR{F>v&~a_e_B*DTAHKa9YJJ`{wgNL`i=V3InYk_%OsM}}Xw|YY4FlN{ah>$=R&4Zm zi6}MJNX{?S(38(L)rOdS&|6X>o?ks|Jvb$RbmVRw$36hB=?Mcon=T1`VE=7I zFa0iuUU%ZWTmD-4skuh7pdj*i1;t3dWEjUUe2Fqa9y4&ko;rydbX}qn>QU4#1&hc+ z=Qo{mgs}oIzB-LFSDv!akP*7*_x%{}|0N?PBDNyx9Nh($?os>E&Nn#f95k3bkIf>+ zBTl*Nr{6boCW@Wxd5ZD}R8?MTuxQhtB}U7ls1iCiPF*nNkMYSXc1K68SBUpRQUE?{I!7U95g%Zu%0Jko!M_ire>Q`4Du*!vV^8p(})1 z)@GzZ7M$V>3{G0s3P4RcBDCC6a-&n!1Bi%P#{iWBhY?fw1L544YJiQPcR=+Au-`H=hu;sEsp)XTL$4S^aq?s1fw1P z+D!{7&cj}0%Ok`343KQ!?M#ugw-{lvP2Jf(UUp|6%@cNbu}``q%w>OPuiqa&gd*h* zUc8uUdJ)AMaa;!z1uGfx896|b&Eb|LHL%W=EiN>7?T5a-*rKp_>Kaqj-Cjobn}Si9*j%6 z`f|KZBVlY>dg9ts3lu4cAPQPTSTh>i$< zK~B8m+Ye>i;W^I#Fn2`0_g-h4TK1^uTy*I9b%ljK*|Sm4Y&R9Myu?5LbA~Cgs>JIB zKPzL^ensXx%G#YKj(;mFD>EPXe)-YK<42OW1wP2g7^vE_f(}DJAeb}OzB*w`!|}$o zR|Y3uf;lfLW;`k(Q9WI8CVt%RN!F9{(d_C6)1=XqRjHEg+0RDPXzKEk>*|Qb)3wH@ zvf9s_mXO>Y;Eoh4GMAa~fB%a={i!!4kU5x^^6JXU0>_N&cgNxg*_Lhvq{M5=Lk+^j z6}40~>~q`fX-IJJjU*>6yfGo#GtI+MKE2wyy`}zNEoA*$G08r%KqyeLivA(ah5iq_?hw4=^Fhw5WTYj;hsPt| zdgaOpTY0?0Mm1?-@2f4^_jIWzK z%J@*~3ADmX>S6tG1uHS*RoLjv$rk*jC3Y61Vru?}m#Q`|pggVOeH1|Iq}2c>=JaUM zhjSrw@QC$@6TzIo>Bd8~#9b+^IEg~gAFt$nF@I{RBM9O9u_DPA2alqvxc1%eZ((C0 z><|qlEPj*nez#!4TRcZSH+Qb`XlhJrH_X_GHC6T>z+e-ZGpA})p*%kb8M4}=No_H#~AyN$gpZ`!8kZZ4>8qQ_rmlqt} zSHdfZhE-{&jdR{(^N(b#Gp)f`%INB;JnD)+fj_(PjTt%dW&$nWk5cQkM{e!LW^ZaK z4toM$IdyuSGI)MOLR?&&wd4fzZqk=8UycX}^)IGMeo-wb=vE%$XqoG&TazKw+oBB5 zfCTB<@Vj_!o^CJ~o|crB_L4?bCK9HB?5k@xH6L*z^999c?WTU1Kh(CvaG5lZeb^66 z>b%42MbTcm=iUHKuS%yZzu0|qEm+{>q&=`c=P5JrKj6FzIlW=9oQ`ZVz7BfuSR(vu z`7AN{yh{qNSm#!;1DfsfAF%$n2=W{)NBcbAF0QkL)~60TYmDo`l{<4-? zH*~oR^WoF&*Ka3*fG)pA*k|+o`#VhWpFu&Ch#1g4rsYB`zX@_}-9Ci>5FBif<4*r> zihVoGkBO(Q?EJSie19LwAx&jR@qgahuK*m-iI7Q0atl|Nsyjr!oV9f4Jav%w`a8-f zR2svsM5y=9-~IM>u3{bbrUiW(xVQYR^7PbI%nvcX=hsGzv`>DPc2_So8V!UpRkYl1 zeI+szHi$A%)-2Ag%|(iE3A)GJaXYQ=5&;?A)8S-8}V(qRSpyU>S zjWCbS1;oU}WDyMuM?!5U@~;~S%u89ybK6cx-=R_bEUkIWT_C&G;l4Pmg+q2+0xl6+ zoBF<%C%NhHkY}I6lRMpz?bpXst6jggH6xg99&3J7OQd^H1JJ=hN*X}3Nu3IM`0!}E z2Ja-iEg|OJEpFbR!opna9zSiosxIJmRLnHx+iK!BtFyP4u4aM5xe<6TJAI7S83*x_ z#Op;txPpZyeK4%?wiHbY!FhMB`?11X+ZKdzb(TlbtzIcSC1IlZKBL&3tB)^CX&@sA z8gfpsy%YyO|D%d`zVV|fV+k8^4M4WrTZqmGwH6SoKJhvYv@sQMY35^%b!|*`I0Z~} z%^t*0u{=41J3bfAHC0_dSMeHoUg+@q`H<4kG+UT!OE-L;B*uMI&N4_bZpvM*G%;@f*GIdx-|v3^zwP0% z54AdcKJWMId>%Y*y|VJnfm@91w}}nEG72or1X+1``Ns_qZ&c8-2Q0R=^WYxVKqHaRMAiCs^yBuh9Akp1 z_2ZHw(2yU9=TEGriu`u+OLTRaR2w~xt8BYFod>hTURV|NTe|n+g4|Ea z8R_4nXbJBhPPcuogq-Jfp2@U$K{tRUneCS&a0_wqL&@9A1nE^Bi-RCQ-uF(rO@f zs7d1Y08=iWoux5};I;0W4WcvC;X7*OLHmU@*ex!7m!xcV#2bmc?k(L%P!|d0c=M9S z6dw$#*DTcB+IWKB@d;*xq2?5+>yL42tE!7n%+1Zc1BrWvb^lSmr9+R$>jP?|jknW2 z97w6yK5j|p#!=gfoh!KBeA&#VGE4eLmb8I2+jPtQ^iDaRP)C?S#k zXb3!*VBEz<4lwCC#qO!sEy4A-GfhLzG@&iC^EsU9&Vx6-ex7ML zRP$)B>YPO|Mq;ozK@p4P6zWj=;NBM-F3SzlZs~4TRzth-iplq)U%Kp_KaMiJ8pNHA zNh_fOsCRD?qfS@>w8hM#h0y5@3p;|n`;`o_+39V;97?w>=KVnBlNTtI1+B7Tuk8Vn zQVh@X<=6$23Ct1foB0ag$3&A0XAeaQpuDZ#r)nnNbNfbY2E29lII-JMOmxt5;fA8M z!Dl@TALLU?G|99*Qi%t*s%O#5r~Wi0VRX`P{|MLLaCKnPC!move z>MW5=@y)tJXcdlaociqE=Q_ZNE@|eXmwBwb1lj3Fxk#uv)U4hI9a4)QRP~MiXMUXB zJ@++m)U75q}Q z6WolP2#=>6Q{s8!5fUO2Q?j>;jvNhTZ>gd#lRfAS_`G{0PE^F>jH z!M`GfyupE4klzO31FZ`$lLZ9@mvR6`RLaRsE;jnnEae5ha1Ky-V5Tk|bkyT(sO`!t_v%B*lsbJP;20I4lp&6~YhNJ9NXd%LXKDb! zP802+*`p5oMb}Nsa5M1_;og0k@$V3<1w`xr(PmSi^mGpj3NjH!PiI9Om#hSpwHylV z^XFz);PYsYfBJO%7ST-ph}Xr=-d^t(0I=R&RIvE{&o<{MqG=P8wyPeL-@nhq{`TL2 z^`oT##*Z`TuT?iv*9GpA8!INV@Mg6|5p~3Jjg_!C5CRxk6zhSnhH9Uz=T7%He|6)dMI#qti5N zZk*!eM9;>)wdGMI@JrSo9yx4MbiJ;nr6r9AcbD*`_oy&Eqw)^G_BfP350g6Ze`)V70L zkOdYc+#pO*7xY;5(mz^}U)j?UXl@1(-*bY{tBMT}8IYuds$&fNJs*NZ?m-mT3?oD- zu!T$NOhymLy7ef6s_9FlGTeR+1^^1m)#V)W@-KS{U}F3G*oe^mhT#x!KBwJqwcz>; z#ea9)ML|*aXj6CrXL9=fy_r?H8XtWL+z4biT3V#=WgT4fXlTq(#WzWw0XC7N?m0n~ zFTQxMS)z)cjWH@Tgq^;@Q>e5Jk_^XZ|2C)NaMhR&8w4{-A8=9+A}vJk*|bVhNoY(k=h=#vy{r4#2EV!kbr^KtvER%HlQK!}!PCQq zHxd}R1D`at*uUE1s4J53as!Z`sE8Op{;pio|AM?8hHo|f&O0hB2~4AI-5Hi}XtpIS z0J;gh29**Qg2H~?q#^X7L=nW4G>40d+$?88*v}cuC`wBgEPeU%CHzu%90(q_Z*M8Z zb;jEVE$BMogW#;qt)I0Rtn(K=&4JgohH1e{ub$tc|e( zs>z0gtqX^6)I{6-JKUR|oVxVsT?!(v9sg!28&DaRS5ed7}iLLhAl8#n$u_%;V11m^1%#~ir`;UZfofCa2d zG_Ys4{~rIM$|bm{jZ1PjAH#bM=4>L;){z+exlakSMLz71EGsrgj0e}(GXV^joJ%9q zHT0vk&g|#*1}}$mmHOw>x)vH=6m`eY_Zf`EoQu?;zpzv|x7 zfIRMZyCeCe+!p(vA@Y$LGME&)W7BdJNx!d#L2~%6MUlkb_pzWwseOR)$bhh7j&(k3 z*Oy!yt#FnRp=&5kFUG_+gSFD8Cr@GSBepB-hFg%H(tWY28#f=}g}G%seKgBo@MQ5w=x1+vCA({a^`@uG^8F&&kt#rRL*##i zjAFkPY(QKdVSMxduhtvQLB_Iyw6HXzp=@PHm{fX0h8$*#UUaqdO2Oc#0ewDeNF3XS ziA9n@OneY+(I4yVb*Nb`7$c@7%8&>W*<_1<;E*>OWe|^-&@wkqY_gI#DbL(Y-vt#& zU9UIN{7DIzXMZz}e=W5~8s5kv5)BiNqyJaeyGo$!+Qcz){WXhYpeIaZltZmQbdqr~97*k1CJ|JuxrBHSccEF|W*Z zF}zK+=Joq+%Py(_H+N@wax$(Rz$SU4?f_w&hO%~fFjOfa@v8hYGvrQ1+rw4YkomYq zXUkO$0?D_36k11PJ8luDq-%GF)pnt9e?GELb0AQCv!|n@w$Po*fz&w&```cqu13i=;0G)8=0+_yApbXb;GWM(WCm(;sVXeel|9s&SO=d-h%mf+6ldfoIf(UtT%+B6(~|s3_w4Hfwg>PdRhi(5lor@tpRpFZD_|2^ZpCX`_ksVA57?N#_n@6imF{*?dTPyv^~U(**P3L1VC|2>U-};fbZFc;?v{d z46A|-=%JNT4mFS}A6Wq@>mGk%7KK)#wVC5cd#;7E^S9k-*%0E~;cXIh6_h({sq3k1 zo(YUzWSldckxWqxBid_mH6ukV**2H(1{@MjL>>>7pf>>N-|2NWDkz}-y^#{vWz_3I zaU3J5j!z0Yds;&E_-SGSskmsM^Qj_sBAO6)fbgi?#&Z%Z?_}Nb& z0yoJ#j{z70RTxXI>Yy~I{NHwllCTU&=FWIqavc6t9tz#N-$icTLcaTfxS)>`3>$pQ z&cFIP<}H#A>f`vhV}u>B0TU17e}%d!ry&9VoVK#3uN_Wu3gZM15X z;D!A=9AUh264@-B#twDQu>P(_H_ZkfGl0TX-W7py^7gP$kuT{B+H&UBClV<%iyq;vRioRsZR<508u!v* zpWWe`S=#z;XG#L)6Mq!q*K5W+QoloMKabz%U;vGG~O9jK2tj0Jb-2MYIJsVhU1MX?ZXU zZ=#x~M@dWi3U0c2evqC|iz=Dgl)EGfOWXGC$#EB+beRCK5fm}pT+d&g@UkKIAIs)l zaMbM?U|w^|9M-chLoFQsK18xAT^W98o_*^cZLu8d>~biF0iEKm@-#|?;JcM%+GWsH zEZ+0_0)dY+rS)_8<~ui)NQHb3le^=ubOBu=;@9)7 z0I($(+Srd}Wak!?>0elYKfm@r7LzI!-WfZR`!rN;!sJs2Ttp=W#%p_+M{qct zr>c^TA?!DZKDE^Hnguq{r4j&E^0S(iQd*Y_Z}i*Z6WG21OX!3|!*C;PU@K6ZH{+Z(%0(a&i{a+O^k2ZObxz#Ef<{r2fb5E$RrK`;6FNrw@kZZxLjg4*)4 zS=V=`b0x-~QOj_-yBL=xNaufNW@a8N7?$ndF*#*Da&3FdGvaP7ti2{jwoO{_K%bju z!({H*>>uMcuoQZ=bu?`P^3O{^LiZ)*uncelw-&S+mFM2VRUA*hp^im!yj&VR>e^`ADuQ4QovNLR7G9<)yTw8d0A_J#0$D z;Aw4k8HX@GFzc6G5l+8ryKhFOKS*^krTK!rM}Gp$5r`CiV%?m-eF+l6x|ghmPPCo4K4S zkvC-B&Qc9?&fj!1p(yHuwzFF{c)nsy0p+et9W@NXr0E9??o>X+EYi_C;@&3*xQ{`5 zjRUhy4i$+(oxPw0k8!-F9t5gGU5E_an&#*2d%vg6-|X!_M|^#6)PDvmQsbE3aZ>q! zi?n1NeKiX+GPw^tN4Ag_25wUnU_r@@9TGdq&NSF-T$@3zL?*j~==-V<=Ayd;p*spe zcP~SK?jPr!?4vvc!D%{b3d#*ZaJ6=qFCdz{rj8oazji6Rhz}y|v4^^32}4(e$K(f7 zgs3MA71IKzu%&_Z<-;YZdN@A^xKVS^&Z#c7ZWsL@cpIe+jms8h#kX8zhK+OQy0=~U zq4In}j^hMiPcr1jZ*Q!aHWxw*9P}@Oii{twn-9~^P4ipC(!geVQH${z>U#TE;f#I& zhn?rTJAH(~C^eG1s3ETz5RP=x!ra5Ao`OKY; zXPjK#Hp4d4i$xvpg0koV2YHqc>#Wy$K++sMNRCH7&|eR0XPynJMGBPJ z7_eD94@`stmc)$VL9AB7sQ3O&8`>Xl`O_^m&RGz(_DHT3#9TwUgLu^Y zbC0oCv>xmi9GG^*Q5Uc~9!?!dah;LBrh^^94kFeqMj}2P9+91e0!M3)wO~6lss5|2 zFuYp>9mCPH9KiW^gI#!bUe$wi$@XB&vX9luZPEF!j}gd)ZK?UfctscrCPo^AF?_%k zt1JLh{Um<%h3FVW49DV9xjOLe=j`a(u9fn*V3($lGvx4%x- z*}p9DG*9{HS;k&ZuDkvh2m&!p6BJq?F};hK(GY}b7aqm{5i$+*{p~ckNlI^9fn+xMxK7Kc=1BB6Y zGe?--=WhO9XzG3Ot^`SvVXY(ttpU;A-OK0NwQGhc%p?e+rp&<|WjQeHx=;do_s||3 z{V2AEV1GV14S${WUb}%>__*VOCVQO5OYH^?eRVRd8f{)Vd{nW+@KsE&3HFih5X~SQ zr0u#|YBI_NTl0sPV(a}$V(^mQjX^J#cmk^wedw(v^Zjyve}7ZA`z)Z(B#V~Kyg7|b zS%i6c>FYxrgkbed>7T{$Wd#M9dI9VG*Z2P9rZh4=GrT*zJAW z-d<=8>h%p^8>Z_O^-UDLn2j09?R>STq)8%vdT>9#_HPo(E`+P1M}kT zGy@$ffbY`@y|eUzY-kfW!$Kooe|ikF-5zM#8l{G|av%pX9r46*5v*eR$Lai){R~n; zKYg5_vByCD@N*jLL%DR@*AMoY^2*q^=FQ&vCX^0}J(6^WATEsyS0PE-kpC_Y#vy62 zuyf;RnZf42p_hdMzjLyW(Dqz7rFB4kDTFASPb=o`a@)3I`qu(g9o^-6#yB9%GYx5b zqWT6~UuAP&jnKWw2+4o&MU7And<~iXeAj`F$cr*Hukz^ed^(DBn$PZlI>U>tP}c~S zZ-8OfzrYUVDUJ9%X7_|qS%X#&KAWK8N`paaT8?S=t?@;ej8OeInN!Q07?&L9bFY*!wkiztF)1dxdJumN1glmW5Ex4Y+) zY(Edg2m2d0=3mQ>P=cS*7T;r?@m?p?-SW{Q>jq%aV|nyOhqdB z_mNC2KO4~>u)i0`zVGNLp37HpWdqugFP7nA8gDu}0)mjc8eu+gkLXp_ap%QdiyQCW zzJ2T6Se*3j`8%UE?d4)G;qXp(1U2yKa+r@0P)V(a`uMJv_f&kEm@tWRzM)?{anE$B zx1zhUr-JUi7Kt9{Ge3ZSj{drtK$dH@>&j7CI|A}BOMtj&7Ula;Lk%>dINbpl2vKq8 zf`XmfFEP4rR=DZM(X#bzAYceqabi|^7ok?3#O6P_`>GEqGJcnfhFnAyM-q!D9KwG| zESmb=LDNgwRWI9EEd8Hg1^ARMUJ*k-_;f)TC=*&h^i90%C2%Qd!8@gBeTW25k$w)^ z^MhNfss{LW-iybY7=}hwuhc4K!}y<${`xRIp6CUHbDHDv1qApS;P#~GK52S8${Z^} zjpGc{AM{z>H97E6YZMhGBT3Ke4TM>jHgA1d6~kNPeut;Y3rAm2HoS*FK68fPy&qLv z(9LngP2#+8_<}ENTphYR0LlgU0HKc6LJF;XEPVk){-;^0zT7}LEbZlALK7xyrtrF& z%GL40vm$s`w)bx6q)H6EiYou5nnQbQmU*R5>>sb=yMVjqj^(l&QPWLl7;T30(x=+<4gAyH(FwNTzEm>|q`wCh-KUoNS=S0M#e*(6C zoSzf-pqx5`a^-sr!V1MJw;5ZgWxyOyVeFoIyO&W`Br`Clh@;X!Z+ibQRm~J!!I6}A zwfMt85`2Ac&BYd}d-dZWnw#a)tL6_jUT)J!d-r%u@JfH1;~4|;0lj>}(6r?`#I~j7 zV_zlK_w>wsCaCv49M5`xUs*F5N>k5X&#ntZPzawo(91jh_UrHs!|?h!kccI+PdqEFRiL7 z4^2E-j_wRrsHbY*wiv^TPK(n2n0Lid&n7EAP^@;_a2rs7)(TL4jRUy-qXiehxN&Gl z3dJ=Ys3KEUE7q!a_*#wuu%Q${2l$EPwYv(po7YfE_%G`C^#Ph zlZcS^`Q_+F*QI?gRaFkS#EiPPV||Q8LZmv%52HLnpoz#1hq z@AjYH$rA*Jdvt%{wXJX>DhsA8CSWz{GNZNY2;D=QE5n+SUHS)6{-O9N9!G5;%nZv~ zvbUT#Trg}Gy>K6#eN^ixG%;ax`{v@$HB>{u?rP=g&x&yfFB6GJxcT@R0lRhD+B$7> z)!XtO2aL&<>G`&2O{{9mHb?MHYcbEVV;Ef;5-080 zqbHH*bdCAnlRgh+5_lWx>#!m+v8>Z*oL@y|8NzKp2;eU7dpU!^1PR;5mR3R06GxuI zlwPilCJZ`>%4=wDrXL0!@by^)zLCc`rQEsgsPd2rVNx}d#{iRF@Ov};;Sha6X!ZOy zQ^vBR>5+_MM*Gm<3@@^Y6M(CNNgF?FB^}$l=xATq>l&L^#VQwJ^tjGc@dS2l{OCC? z&H=0t%ZAXaPSFc*_iXC#Fzbvq29ka(`;2Ba;As5c9lNcTlH*W>v zmx9BzI;z;_n;%Bo_M}JwQ_xh)cIPhg!5y3ZqqH;vN8lhJQU3K%BlQ}17tvdap(*NR zej7f+NpcFujOe#Sp(>^;p)1`^_r$f>-~0GRyWL31pEwn2OsogZHcl<$OUIDDj|VBh zG!NY0^fT)Gh@V1;+{A*k%8@8fJitE(Np~!Yx}gs1@Ad9yH17-(wUlN9pDXG9d+!?S zzi%9QTx8jJ&$_wnOsu;ma%sOyV9vfe$E6UG8%V~rmiJ)bwHnW4`6sxllS_aj>^zy$Z^L`z!lq662iEJZ zLrC2feC)Su)bM*Fk%O|qn7nL!@CO!T0r~wS50UWCE1V0ya2!CJkB+SZwoxBS}kgA0A5?tPv>b109T0AY^%z$Wnx z0tLlJ2Qk|O-ksL4fTmJqS{tY??$2mur?GcDMjz(2858lR^M z-G*_7c;)f%*KrqZ&+&&ojqrE~VByU9XmL#}O@i)T8$JJ&kdMHqKqt7UT;g$!7aw7T z28Dlr38<^BVMk$}1>?WCJq4&R$t;S?biAyigV<&OGhH7)d#hs>g`+Z$&-?~@k$(Rd z$A%wy;pXHzx$|)neWU^78EOe=Un2-lWZeVvVuGb~AvZ4MO2tmi!^s1`TAJ2I*9Gj) z8X}u;e!U?x@$_>3RaaF(*7q3^XQ47Vf7jnXF>7DHgz4@+WV0MhkM-!6`s@aHTxOK_ z!KNp$CZ`zCC|}~P#De{aQuynkL3gb0mf=23h=5u9Mf+>(uJvNoD|gp-@)H_4yfc6d zJhKS61#CbpOkqc?}$O_Wa|5KhYSt z=wfs14%B7hSR!@8D69XXb~^~}RZy2*&|NYK;Y*$~J;hEr$WqF+SMqu1gipd~eE1hj z>sVe9J5(4j&qW}>$V3>~YhRw6DXy3nLxlXhAma?KnirpcRB;Ncs*mx0QGTM{N}`{C_wF8(CRfls)SmqO*^%vyx#?k7 zD=&ALoALrEHm7GddDbmp%t~g8Nd&;M`m({9A;(-u568VEDmcyU$>qgIti_RgtOl<& z->UQe(SO|I7l#luGJU5pVX{QGbgH#w9H|5m*4bTRk6gf*O95u&^QQ|f>pVUCo-x*B z*|HmB;g?6ijaLQl10G4Lz^2bSC(tlm&~5MJ+F{|(R&4((wTJiOdnpjN8;z7Zra|!j z1+{Xd>54$)V&nK1&3twzNE^4`kXAv#z!0uO2-Qj(_ps^p)qZj}TKUfTw%Nb%&Cp$v1k?z*2l7tT!&htp1jo zQ%`9jj0mu=Isgq#1~eZjB$+s&g6*X)Hvr-$QKf&Uluf_8`qOstJWoysc~r66E;m=` zDio0Fo+nm)aNC<^mXw~QsQ0*B;lmN?gN0#@csCb51F-x0GSr9DFJI@wRqT?XT1m)7 z^qp&Kcr_{6Oy>_>5jG``2V<-aG&dvSERu+7$~SfK@oN6IRb3e2{vU+GWeQBD?x4Ts z-S!XM@8WBU}*ZFcs~4NgDX;kd1lMQwCpyR*zc`?D!-l zNkPI%rtmIa(aTOY(|>>Cl4>JlJet`-8YrGRd5!DtyOx$*ba08Z2VeNi=xrrfU@=2U zrHJo)K?SeX1?V9zL7Agj&SwP&`y|ziN3t}$V?b=uC1A6g`_*S{m zruZeg(8sq{G}9|ur10vVh%5}gsAc8@8?1sBkS(Z})Jc^eQTFF2?E7>kOAn!rzcTV< zXy}MCOe}<*)Ng^qhUTlfR%8%SG7s{nzqII_I>HRLN&=NR5nX`t#VHSUV)G+NiDKwA z{75V`=7k6O(0W0meTGFmR}xKt?72@LE08d~bPdqV0l)fmy?;-qq_aP|;N~W7 z@XpC0>Qyj^Qk8T}>8h7!-$*y0=nW1cc-Hu%Pxn8$j=PpM)EXdB+s_h*C$IP%FYRES zZwMIM1`+Pw9H8i~Vk4qeTh><9UPTO8So)^BwNRI7z$=*^k+ST+Le}1sBv4If4-+sZ zXlLoC>!cQJTYzNdtBmzm!9iNZe!I$uX<%_D+JF)q=Iw5gAn*ztqPw?8dcR7c*nhi* zi}htRU2}GB<@@Gp^6h?6{YQIW=ToB1^orPTtU7Q%8|oMn$cR>dvxLu?K0z&=9mcc* zCBk;vk|kiNl7&sC#N`QB-zBQe+N&Wv1P#PXJeB);wZgwn9{@%MNt+|AA(>Rd91a^M z|5T*Le!${vmDkxDU~}Hu+S=m!^|$eV=;xZQXxsuu98!GRxk+$K_Omm+s`u2a>$J}{8^N~sm>hYgyCl_8AxaurEIi9(;CL1 z(rTqtF~wMi_*1(cb^|yHz<1{1wG5HG)hR0Scxhd%gq-oCc;iqLQ0$+P|#$H zo-J3q@h9u=N}uzxw0Z@s4YAm``6qE74!mg#2m%`Hr)Wu3pAa`*Exr;thHS6MD|5;pjA-=$xhRu_TFLs4dXk7~G%)dl_ zdZhTcTRE4vMkHYDo2}@Mu=9fg_ezf&5!!dYaAA&ajeKo2cjogkcFviI+XvO3S_A2) z>>=BX@(?ocnW_Fxb`uGIwyqkDy8>fd=0GCdaC(*xM?Jt2{y;IivXhJMNOl>Ze|z$x zmP@Sfljn1ERK11WwobRpCAaKMRq_MRl%EJl-@9*Oc(3CF)?b(PF3rEN0RJ~lb|i;9y(AJ)AADJ&To0@0Z&2B2 zt_nE(woO9uAMRxHx20R`X6iz)9+QUWvw*1opU?cnqw@9OINNGex-qZkSj{QpsIh}iusM2P zADhGgsRBTWAEr1p81)2qNVpQ^gypcrV$EwX%Of;hl*4D8i5s>O2} z@ZjdOF1QJxw$)7aW3f{k7X@116y)VOrRirHtjO5{RXN_wq%JB(j9~%9u7)g%@txnc zb-6`WORsa8oo-^PREB`a_I46@bbXM%9ei z`b5F?`GOr{=4-8Nr^R~n#pLVivj<#`OD0B%-lKf)u{}u zoqr)+ED&NF@qnvORZj(=)+DT25wh6F5%ZlAOuw=dw_nJ&IX4Yskr=MXNKk=Sl}DC} zCy`au99%WrTq}?0Pq(ih5E4zK3byV=3Os6=HmqRoIZmrV&c98BZJ8&?FsDS?kj<5` z{ZCykCyXAZj?zP;CZY~qd`AI8w50%>en-W7mi`b<13izS;^VAf3bYDE%gZq!1F)iH z58|h>zhsBk-)>-mggs%6XPoo+*42d)9C1UTu5y$K4bfrV%Jyk(U?SQb4EM&P-Mu3c zV*yEHRaW})H#J4<5JW_=-V4xr0lAQ^dNK&p?L7$ESNBQ2yc8E^A=qzjkYv<)^@yRZ zYbN70AM4Zuk(BwIXFo$mZ;bf$4`R!oX!KL=O&3wqg}#OBiSLen}{0PCYYV3NDW3$n3-mvf5#5PJCq^xtnL z_FGGk`nR#Nu4b~*#Nm~k@q-(5;KY%;5D@?63`p6=I@_Mt9jS4BDHmxJUReA2{=Q9r z{6TmMbU`JwSd=|4AD{O|Etgk)zxZ_`pwmu6rz{|&{`bYCAQ#hUHsj&d?;vQ7+rNYz z7d0JqVYEPT0U-O@((_(>Hr;GU_JPUSo+Ps6U5R!DI8`}8{1X5l3?>RYGdnnenGvl^ zTX{tfC5+Y77O!0v|2}dmXcAVw6&r-%U=mhyFJ))|9p#T@35ZUG$NCwJU13T!b@4fW zZo<2(H(X{_MB_A6m#aKL@FBh^*=Sqc9q|Sk&@KLkD392Z*Vs9Gzay3HUTf#RZwP1c z=?A8s&yL2zyB+7HL7MSpodoLj&eFslQM5piYXZsfzMAY9B<^yU8XdM++(<2pBgWN_ zSfJdgpuHx}kqyz+1`ro5bq5bk;_(%az&`sDAb+*Cj?BGty$MAdg11mZXUH}(R&oOC z$1}J~`p<%6u!W7y7-yRuhIPZ9`}|1Xjcl?01+GA7Ex2*8b+YDt!a-p^rE?il2E;?q z+4UUI1M@j;^y0Xl_V@-(q+La3dE+!z(myBs>yi-phVf7t2qujWW?CZV%O4b935bTc z!;+*CSv?=MhoN^3+R=hrLI%!QdxR`A_9|p4bkgZ$M)X25WDw($!oMKXQlDF2ocUc{cBA}g}hs|*~cF*hw z8`18+@YhTRbX!!7#^|+Vjk;Lfj3{1QQz(B(T0~b3FzRopMp=%MjIhe#8Iz#CCD#s~ z9?Ez{%J0kov`a9&#is!$S{Lc&)E@Bow z5&P|9^t5phMuhf!>P(2QtEsl7(-bN!3i@O{ASGbvx~K(qNrv#eB7ko`&94Z3ROwN{ zhUC)v3=TvA=quiUY&1LlKoeNRAVkn#?ahtD_qwIFeU|?d61pN{542JQk~%CqU?-WF z-Cz4BUNsN1*SG!35C^U6nlM?DFnQZl>%-3SBOoiH6I`QLm)ALgN*FCxVmoNo;or6i z^87pGYTuIsRe~ut#Z9zjdWNX#i_yT!wbMmEpt&U2;P$SXM--dW*1N2l(WE5HMK;IzH0idQRx9i=9Kh>_vECqeh~DaNGTq3OwzJ`D>c5iNc7$p)&q7W2#l{3a0K}R-{RZ4R-ry_bH4zBtnAjWEY$9Cl7mq7ZmxI9xE;t0_qwyn&)gwYK zbH@^iM}oTo)@h|}C3E10XvcN8kY+0o=%Ngo{=*wLZsZ-HQ)o-jh zc_Ml=!w(L=e?4l1qdvDKkR|{`?a$t#y&ZeGyVzJZ)_1Wq}9=jhVQu*g6o>Xm2vO z0SnH!W1W$OQN7O+=tt=^X>-4<3Je&V>xi2y;RNz{Hqiu?=h@1YXH6E0)V+ou;kBiR zphdEa6}DD|M$GSPY%>xe@3me0UJ!>@^FuDC3wTcCQc$Tt&A1w`qs}0?g9cuAfbZuE z#>O$W)kwVeHmw_%9 zLwu_MdC#{SL}Eo9wepMnGy^70u_7GIPH@^%k!t^ao%9rUyn-bbS?c`nJzdZf4t;cH zc{i6+wFcQoc5OiZJmHkd&*Pa6HZE)vY(IbDPmC1|@DMK0Q8(|;ukHUF1}>2T*o;jYFP`t%o zL(V~i>uxhLxfB~HjjJEMRT~}J`g}^IYd@l)B#&A_{2X)AGBpJ0`$s|I%NlL8ofrJ? z=vB5eY2$^th*A&8p|2>M#OAK!9!@3I1ada_!uSiEjvw4;f?}PW$v${nB@j?iEf-lr zrU9V9gDc?RQ-*#yEqFT6O)`%odIcEyY{HXEcSTtJAPT&?hntpGFjCl7TF8D`qCL?C422Xc-zplG?^)8p-SF) zF5TBq*yZMa4|niZ5>5h5itQ-(h?^I1L9S~_6rY_aZ;`!=>Zs$5M&nGIQZi2;pahX} zG@iLBaqN3N4F%gVK{;B?qg%?<>$bqIC}Kx z&{tN#tRSEjr+Awxqa{)FgLk*>q%-B4onSg+SUk1A6=1i6%i#IrqE{~=A@MaI>>B0I zSuOITyM$MX#}PP>(94l2Q#~Ouq+5sLlPAf-N5TNY?hSqXm z$tAZw-}m*`Pk0=$xLYz2MRAPR1!r19gj4b$ZBUyqIrr{l3us{1LHcYy6}=D7?(MXHYvy+^iu!YkHFwy_j#;9 zMjAx(kA%2pknHS{MWPO(1Zg|ix=9q)Z#%%xrn=zf-)@i1`Z7cNun~p>^r>@g>wn1F zJu*e$ciE9OCI8(7fV?XL3z*b>z2*xLjl5p}kKq^E3PP(vlO+Krs2@uVsRsr}e?=`w zN;246=q4Q!uH6CK50=$+XP;MnZG9(X-!yErv#dt3oKVM}Pob2u?rU+u&|jZq@7WQ5 zUgg-#e~Ky%RAbJP7EIN$pd?%uRca~(^1z`5JzabBR}c_ngx4Zah``HDp~0Q-;FX*J zzusL^gs`*y!w+V8U(SlHL465V7v&yEncET^By$n8UlWC$t2^v-gclM= z`fJf>p6H2^9{KN@dFN-&U9o>R9YyNj#q@GCqef3XK3r&XX8u~{LN#JupmqMdYSW3c;=OR(p|CLwt}`}rzi&a|E@Cs_2OAI_)GeM35I zkD^ziKhwCzU3j?RkMAD;`CI?0DF5ml;#Gtg7!P>g_lXJTtjmB1)m`Ea-M0LZp`vmv zfpbnG{Kt=UwN-65wXC&g^TAW1e!LNxx1f$SLn+-(`vbZ!I2fj-y%yUVab27d3jRJQFRvf;)IAtWW zrT^%`lQipVLyn$dIJ~D>{Ym?tv$aj(Slt5xXTMsnL3I*lHiXQ0;AS>aZ&g1 zoSy5Vvn8w684rVh;r$s_Yz#NB2#jk7c0i#y*K<<8n!k07IcioXuBq}^dpNV*Psxlu zhmi@2GLOSQ&-aN-j^EREze1#EluY<>%@D_uya?T9%aKIAu_BcAm|$%*TkSPla=$88 z%KI0A*d-f?+5&(e^z2E~{X9zbvieprUXTlb83n{w5PMt~235qdtlKkNf-q5$Q=rJa zK++I4XPyD}Tf6Ecg$7phU)JjSNH5fM^%mC3v%r$c<;3+P8G_F}SXr5wE;A|XoY2w1 zo>W%0Fl5>}!}Oc{{nG|Jt{ z^r9EkYhWbu`KUioJrl*Z2C6ZFG2H0Qq6}Mw28;k;c$sN2X9O7(n*FxmVKj|lhNGSf zo{v2OTQD8~deJSy7qj!JhIc(FJ3ul?Z zlHDnt;+m8a&S94T8&Kt{myb5WeQ@GUSn-6n7#%T??anXqQVzumGg;wV}H}< z>+KK!Bk)OjZ=-bgTY3G}rH zk9FoR3+BoYpub9h9gy~R+ItLC@A~viikT+XzxL_~m6SIMG^0;+y!^v)mu*BU`U zNYd}7|GapA&6MznoJ)yZJ4BZbTwMsC>h^OqdKu89O|djqHnz1ZAkS_5ndE&D`$ZS+ z_$rJgmeQbTKO;X~a_aGQL#aey^_?SW)CQb2csMDn_wvkm4n;AMSgUV7_URLM$1{tn z+v@yqn$^HdsPqkf!Bcrio!1}z{_K2_b z2oyV8Z+>74u9a8wqyZ{-Xn|Z*+Q;Rj6*CRTrp`S<=A@o7_Pm(|{sH~88~d0Cgsl7{ zHOTHmyM3e*uN(7~`7LBNT zqd-DJBvL&kO_J_|&3E`#GoizEw-WHrj>9dgJTuu5vXLa9^i1i=xt;3o#}2JUcg9x? zuda!s6tuAXr?E;vF9mSy*&-PUMLRiUK@uM+h6VlVA9?EgC*n&mEyOH2Dw4RxKq;zO zd@BPLmPJ2*FcNCq)8Le2T<&8?Hfk${FqtOq1RPkO;hxrRL&Ay`-Y0;#)ic2j;9>JP zN(CsR-6KVDgw4rcv*x-xRruDW4a@J~n|-iOX>Fl-W0iO-wzZiWN~{+K2@c);J9<)= zR0f(Q=ozr%9^b|*ZI|KJhpmP`C8YBt_S?A8jF?H$7YtH9oxplE(=He~T(C5GJua z`rW{aKk-zrOQ+$8hdH8npcqJerLy?4U|orq&%OH4j{y4PlaikB@10{-AZmf6Irqrjn5MJ~Z|6^Xr>8Ts`%u zvT}_b2#X4y*l#;=RNn$Mn{EE!!6%Q3H%%VofvF%ZaA!&@pz@f5e{XsJUJ@e4-BZac zm--i=svBG0ZHd4W{_CR`5jUG^62n7jEuQcPZLt*V?D#E%73BJmwzptulf2%Qgb|RTFlmoTtapTj7zWM!>B(e=jdX63X%$U78agUlA@1 zUK%%GRryKR^tIAB~L$vxYLbQyQl$Vb?ykqlIkylFAs{iJ zNLFwV4>{0v2=Iz^ht3{L{b$5@>wrA^qKLhqW96sD4pzHS#6E79y+}XqE6q}9+U?So zpOd@vHA4zY2euxKd>N$)y)NoU?LCObZxI_u=05YLK((os72CuTuKN@vUgEo#i zsb#UkmqIkrt8f*NRFc_rv$AkB$cR(_RZIntVzJ}sXi`{&u6aoO)^A&IR7suy6@vFe zVp(O`6b%#<1^~m|-BD6{3VVc{RSID#&P#DERHgO`tsp#Mi->wBSgbLKV;6@Pd>Xl& zen&;d#QVxI&blvLPr;-XhMs`DGFBiSX*42n0Q)~SBixhsqJ%M9sg%YOtY!Xq4c2?!bZ24I?|Jj%O zO(D}-#>g&7WZrmYPi*67;LIrL9n>u9?*aKGo6*6>G)^LAj51zh&a z{`n%_f`1bFtIdEX&p}V;w_m|oapl4A8~Q`_l*!!sDR42;FMjB#yBuVnWzX?o>h|i# z8;1bmlViZL9e?k1amFj3mKjeEkKaKiT_;^cSVXG>^7})Zi#CV^mZwQdW#;`CRNsI_ z+z10W<`)%k8nvnK6Sjb$HSxnkuFNQ=lj2idR?+$A&B?PM5F66rb>7?{AMN_RmUYeo zXXc6H$BxbGEX1TmED|pE=wSkVXmk5h%s$_Crg}M+S$Wv8`hy3``KxB?PdNhqA7gI< z4rTlHkGH5O#n_THLu4si_O%)bMfQCuLMEihPTGt;Av;B~HDnq4Qd!d=vP9v@GDOH8 z!vDO{^Ynbb@9+P<$Kh~fV&=ZD`?}8Syw3BpjNP#AkHC>=+AIT(HxT#nET4>04FgNM z&qiNQ|3Gx?UdO@Xh_NS&)LJdu-4Np*4;qG$o#(`f8^tPq zzf%=wqMD|aV<50s<)P68IBrBtCE>1HlY&BoB|4n*C$(NE?f|#tjSp+&dO44Bqjzt z=opTPT9D||m3{mcs~CA8PdQ>aF~9+eJ7f#v*R&PxexdC~e24{ICv!|b z8dWFZT2w)b{B9>c==lQYH9VA_@;{Y1>F%%6)i|Xx5sK?# zwtP}tQ#MgJ3RGN_s5z_U2CrqX=k4+waH#?nnShJmX~E>YrPk_wl@8{0j?cXa3iur*Ip*Up)CcSLE~h|Y};)z`-uF2!4$zP9#5 z0haaXyBPHE9QB`6Rv9sVP5tQq7m}=6G*JLDNgbn++aN63Ez1Z}`En|l+j~#{;6d-7 zm^lE4PQP!+Fwft(I4eD22AKHN6i_q88?!O~Asegs6loE4sAUy=haz*e*EKxIz82S` znTx-f@h7 z7Y$vhpdpa^i8#DHR++17$BLdf2-jCUr|s6q$9NC;9yd#bFjLfJrky?Q`~?_Cz6qo5 z#zVl|Bg%sUVrqHP`w?}>VvFpO2{Y>|o%T)Q7Y`UeuJv^C8*mo7+q#IvfK}4o@c)5t zc8Bv4*8B;MMTXYDsPt2N|46gd1p_f;9eVAS%v(-c(TnIb+16;sglFE2dt1pe;T=}kqh!-WG7 zXEmYd48qra>5Iz7j1M&+TMbP9YoP44k&srC^=cWv=ku541TgU9|Ju4WG}tu`bf7|* z1hb`C+ue~5?Hq+&hG?chs$SeD+pOK7%p@K0?!s|Dd4R3kA}5V6ZblROp?30BjLe>2 z;xq!)u#@s?heG?xTfh*-n)EEP>{0AcN=%&7DN%+MXG9uRno|Qu^ItW8i$#ccmWw1d zQAE!ch}FO|j6p!s(==_u6w|6~Ae z-}WKKSNCGbr*Iipi^v=>^Dt;K7V#RhYvaqjE>}Y zRwgP#PIx$sToPc|_)>P>Nd9p;;pDMjcf4*=lg%{k33x+TEsKBmGr#90{Bn~1dU(H_ zzZ?MYtk>ddy&h)jBW70Ecw-0z`7rNdHXtj0oP`L>1Ssi-Dye8 zD$F~4(&Q&%EvoKzgcEH8PCa(qq)0v?s78m{)w8cS%Ohi~;2*Q|zgMa&vQo?Ef_jkI zq!>X4ShI8h%SPfzLUS85po9NQ0u^%V8*Lv1RqGhYweV{24WSnnI?P=DTTpQ)^%ys@ z7iz=t_Ata`!CckMR-rj;N_YT*@gL&rew-yq-3BtuR7C^9-1M5Dqnk4}Iq97N@BqS- zbZBE~F+QZNJqT5@*Ez| z)x>6^dZTF0w$Y2IJ9veK#SWdRG*&j>k=P!rF#pX9833O{!ha|z{+$dUirB8BhYKJX z`2E^`cqm523|kcH|7Ahzs@h*6DYZ3zIrkS}pKf)wlGl(FrpgRasp3$YWOR>usRP6NN;qYcqZazVNm-!

    *iCs^9MPsfU+=ct`#>MiU&N+g#;Wf$< z;nVs>rh;f+E7E6%6GqN!V4LNS%>yYf$*1Y*>5|C)eisie(m}BroGG+TOWnA}Q9OZN zV{USF-6{3fFQ;GfqU9!1PgVmO@6J|$%D_?$q|M1-3Tq$Ep2b}`6{*D^v#g5&(%U@z zXQ~io^%eZu%0zG(VqBL$lB}Ft8!Xlk%#|Atq}S-{Srv4yZT1US~UA%jI_U)~O^y!f8zs zWj@kParIoRDU@aJw^ngMV`(Cw5npFpUc-3n%E;ZWUh43`p67ui$j4Jb3Y~hxi#r9J zm0Pe@@|wo@1yDnMVz9v+ES$4K(JKIYOAP`Sq+k4@ud$t`eb7X*^9*N>KOC=Z5%bK8 zsn`-uSYB4pfU9`;Vdoz)@eU6Dq~$8iBhL{+h2gp&g(jB(#Fi=_~1CXoJ2y*1NKE;utQnyoS7|5qW1F~>aB3URTE{EAjxxWO#ms4eEoV^A2sZ*S>?bQ zfXn_e>uJ3n0T8g*)=`PB_dNlJJv=s^m|K;PDaY-8hHRR)d;nljewz`yKZoC~sBi>N(~w36#7_`l4N?u(zLkgu1= z4uS4w>+QKfb9Q=5UabA&+_D!jS_Bf12_(8lcAPUspkO&_yxf26N53b-Rb(>c-@hXL zPZIwKIn#HYIJm|1vh5qJ`9n>7GZh$naeWa* zPlo$o5o63xPz&U_J`a-XJb(!_c1C2P!agSeroMX)eA?^53LYsZp581kNBbpO2{USlb<7eg0!^6eDw<5Kh_1Q- z`4&~$vr!5W39sJ;G^R|Rt&b@_CS_XTHj@gywlAYG*;A<8mzwZK-a3DYsNd09f9Z0iPF3MFPKaI@Ozh>b$?RWa2K^FNk-!ECc^}?msq`1X*f`3L$T) z!!7XyR}F?sGQQ~v-@n9tLv%U8=c-wNbF-}TdmqWr|L!*(pGeSy>@e4BVQEPLL}WJt z*9>l?5*7#ey0wz(oH|d!ej%!s)~=7Ww{U$+ft?)W+Rw}UXkrgZTv0)+ilX18%V^*% zM)@3z_h-hrGie=1h+dX*yAilK@ImAB?6Khnf4C|>{T zxb-JdK0fA9@6fW=GHOt)R3&#y%SJm&S+_JHS$F~C{Q?Q zni`&;k9(1rV)?Jb%jw<6}v?uN_hcmCcOZ#&Z__~ROl2fD7^SHgj}$_9*B4me3Zx2#FdnekJynZ3m0TJ zX*znA3fJq(Fk)5i-tkBbNgDPI5GGa9129)V5SOj@sH;nK?ro$h3%hXrg^U+q^CXF? z-_skc8)|ICG_MFznhT#g<(ynD-{S(=)C~HjjTTEM zMrom5uIDFuNRs0<$nc()vn*dw!I)c;i~)G)YXnuY&hd2UOeMK?rXTA{;aB2#rSB4g>l7~+~CSDO6EZiVB4pAV3T+hP(<-IaJCs*vruUZ)v`;% zvAiX=T3GA5*j7t>U0?+0h8}vwB27>fvFI_l$|`Y4ROV~Xrve`Isu6bI$NJ4gtwGNB z>Srum%s?@qrcb!kN&`r6LDSkt`+I$Ku<(o>dvl3$@*zYbaUb_Vj{jk`@*%4=%h@vj z@6}qcl@71Jr1gAWN&WI@1F8J~@5EtDA~Ya?5>iYLR0eWiQX=OCr1wQs)Zw(o_c6{;oaAF0iu?Q* z=)JaoBK>?G@^vD>7Ki(ix!$&D0UhnrnL0O_M>RtF;xCCoGsc%TTu?W537VsxsO1P8 zU5xtyQAgmz)$^T#<63gM4r58*Ttzq3=MG-&ceEmf)zw#YGTu^^4b3p%m|a@-#0)YBWuyr}a653B9F>SucY=RETl0gA4kh zE!Lw?iYHqSKi&t(M_(S@ChG7YJfXXzl=~x*o8G)Gv#VFP=?40)QM0abDM-kY?cyW^ zr!IL9Y&^tO9RC)3G-X<7j!HG{F|AcB*9;Ea&H@_aq_ZoZKPZxqc*hdcI_j!J2@wvU znx=v0Gf8ys-@e?5ogCn+A2DcHH!TJ0R}?*m=Ea6@dlLW@NnZ0~fr4JF-lMc6#(e{- ziQO65>+3!|Oi~%?={7F%$r^O8m#ch1-X0Zh&SV+bc?Ey=(rp=()XsN~Akmi#3|xir z*z)OB{h&rA;m>`#uH)aR;-?Vq_|gcS=s#8G-ywSJByup&mlyt{yM7+|^@%QUz@aq+ z+iIyI7v9%@Z1FpG^+CBKZ=dRESG^*R#j%%Ooq>ATN%eeJo?ae8eC+Zew2>JD<1e7! zbu;XLpl~jP(4}s1<7%$~@z)G!URM#c{O;fq?hKVZKRBN>%4!51A>$ccwU|qJ{Eq$d zwE=vQTGKY9Aj_Eqo8V<DiS;qGxbu7xky?UG zeHYe4V!i@!X0%=gc7RKA@!u2AzcyMuF|ZWDCuCw1>~7joJiXcg(8SbkmTIlc`CX`G zUAd^^cKB?MX*k1rg96?li~dMz7%1R*Loq-K&DS|$Q}!JgzfU4n)12L@x_4%_x#*YBUck{t}4CiCmIT~D@z z145lf9QiSkmTugbGO;2ZSJH~24|@|p^OC@h`)ML>RA*!QC1?92Z-UKP=m#SDJ)DnT z-8yI#1^F7qPp?XVC>>#MrtCe?fl|O<%_jJ?j?K+XeG>iEe3abGBwKLG6+tQft2g_5 zN57-Egs9qj-VBKS8%ugeQ4F4bp@TOx@WMe?8*l9UXHl!{p(36=m!B5Qc?z@+<>>Pdq2-rczJ=;@77j1 zlNRZ*M|40ZNLo~ZAF2eq9@6c652z~U6dx@<@d_}jX)zkA`&Dvqe6?H(@1TTHzw)B4hKPv7iMS4c{;|?v9;5#S)L_)?;J_9ehr-hn^~n!$q0XT`{&?lgqT> z-H=F8-!~C^>d)>YITI@wCaNxwx1E>XiZXhlG=$TS1}{zwraG*l7m!$P06&W7d7m#^ z+)dpklO2$DZVwNXH?TfM7sT20-&ljoPq*k?tjkZZ;<<%6QBLRAw-hYgn%jAJsT_eT z^u_8E{~JsC)RVl!!(2RK`d#}Whxm6fy(i{In!fE5n)4eOSlW2<&Z;W0b#!i$fqxL` zpKX4{7$OfbbOS=YWTDvlE2PRa@vjP8H+9Cb{l_)v2E=3ph6uBE!LfPOnJ?z3Mh~fJ zn|=l7FKg03T}bTm@KJJy?1q5NAT~gAUE!;H|KSu}=|SLMu~WC*O6iW>%xSetUl7)X zP>JT8PxwQGqr1jLztZ`pb%0?{tmZ)KE%yk*;?M%IuSdYV%BwBMv|PMyQfYV=N$b51?%TG@k{PmF0Jn+en!M_X4( zTl070;Bu)jQ0DM&Blk}080M=;^~( zx=kO%+vw*&s>+UZQXT|j|Ec+q;&1zc9@|qLzRJwd)&n%*&5-oCztzF1jfWIN8`7Y4 zZYt}ld8_Z)D6PXHb7I2gu8fxIFGtw_m*)*t(b!Uf>J7Je83XQXd(k&>gxeQwS`KOc zz0qIT=lAVPL3_&pZ~DEfCz3^rgfsOlKDsMh%rreB`t5n!x8QwMeB-iha^ zvbk-RgdHIyg=BpcoA~yt#=YzcsP`C6bknw#WRW@077}i&RK|C09nx5`cWA}7hO)|# z9DDLd!tLfh8g1~PD!J@9(b0z30qs&fydAE<3F1wjWnCBExIw&pwaPaHX9AM%SHNc_ zwfnJGLmf9^Ry+N8YFvC?^MWR4T1VQ{T%J)w$$2hb1|I3WuFyFEb%V30x9|YqvR;Ej zfN$2QyLzK$w%mD4GKd5iW4rY*k_73}Rh8uInQ`fBKJ7_w5NUS2S*3?0-n@xv^|3H1 zDn}#m>1&E$lRYsMgON9rX=9fcKgCRdVvU11Tpu{3aW=Uph= zr(DOK4rghFE=yT@@vE2n4hM{VU|R2Dvi$Pxj&8ixcnI-VUJRl}ks_1I1nP zg7$2xnG!;`Dnwe=>SJp7y(6_i2|0IAetb>-uhn%`v_}=LDRB1-2(ocI!;sMTT@0?d%q-++lj#NU+5zDU5ujTd^OkBRX z&yna;tDOQHq15z-Vc$25Q?mtcqaCN4uahBtIMrqtgkaIFtwuxtJI^T%zD zSUTWVKrrwTP90}ML(tZK_AYyzz>^|I^qK?iXhTrlU{RqD47JK&rmnm~ z^uE)t9@tR>bvt?d%9h%`*NVx=>%ns%^_f6p32mKW{@&wJ>jUL-aAhsaDYr_>dna8qX=Q~x$St0yf^@^c;c@v>|labHgKmr&eW@Q}w;Sn5BuB zvwxWx;CeR;bEj891q|D#k&CfX8dd^L$W}}XGb>1ab>kXsuB;(N%p6Vg=3Xp5`7|bU z=FPQWWhzD)Q;hsuVi3!a5T!=;Vp3L*!Q&Nzri8Sz2pRkFgik87NdLP4 z$@L~lgtE7D&ap{S7r&rhM(KoNX z900)7bg&t--06uR%zV?B*wPSCV1vf{oT2pI@a4s?PX^H2cZ_(aKP?EyAlpr{WF)`rU(4zD^FPm7B z()XRdp%J9-7B;;)RsCRJXpa9X_=@OL9Xd31-HGzxL45{^suS1Ny1oI-myxIB2o~yN zk8%2$jQS|f<|U9+@<23Tt0`Llg1SN&crWI+Rb53zz2A_07Ey4SbPpcwz10DuiF>%Q zB?_<>Y06F&ME4n;^p%(8cL26Abh@%kTzBELGKZ5MIN{;=W>NF-;Dbo_veu(Khi(a; zVZH`xle$q-V(m_R*R{!9S{4MMPK7F~j&%an#ar*aeUv`$u8+}e{&_aJ1p&E?j#7(m zH)ww6e(>WM!iaG`=L4vafW2~%Z@cj-)|9dJb&N3G`xHc-{-{^YpM zs-3CNyKvQ7UUWZrx3H1D>BKi(Z~AKLx$pFkrJIy0!N#{0)ghi`$u z)m|7$x%?Xo5T!Gd5Suz-%bk8F5Au#O#P|uXHvE#_Q?iz@{@T`*>Caly9YqP+n0MNN zWwBU(5@4M8j~819N2*dsy>X;Q@krk2WD8aOWaSOzF6>A(r-BkDR}HuFd5VnR@qI65FjbwD^kq_7~vHc>ZLPj;BR*Mv5?}X0X1!iGJONYx65^Ss zJt;M}xrg1*mz|mozzo$gg=$lic`9coQ&Lj0T;hvbj}dZwPfRB}{n!0{!#C@Q&_$XA zRXx`EOL0w6Jp%*2jTsy4O6E68-5?OR8^v>p#58^7nan1(>H4V{t#txgIq}; zJ8fVZ1xAZBp+0moy||A$*9S63i7`d2{a|w9RDo@n+@f0x)W}`M#vF)XA-><5jrK#N zV@WHk{x@=a2Bh1~z|E=+8cP2$1;>u>A}n5M9+nZ+S^U|p)oxTC_-W|SxRdMqU14E> zbcheEOObhM_fo51RSvM2i83?{R=y*B=T!-o`u4`IZ_lvdV`fI(ltz(sh%rp>1zYwV z`H0}pa#iXEmfEOPt#KaWFJcCWLjW2pFYA=bbU#!&+P&lzo_Vb3)RG|rW(CNB3jcof zC4=WF&AwEZ*f;;GSD2o*;Px$?+J8q>mIA1dk~_5}y~N|abu=_u>+c8d5K{)RR-EcG z7Qb*Aql5oaB2(92x#OM4oaVKm2bX`=K700TKzA=3$76Br>Q%{?GE>`OS`H}!$-uu9YTnGMR%TOD361&^o+3>OKd?xVnI8t!pj_7}KS= z(_r6`Gn*hM$(QfMyx3sIt?PBd;FZNX)S%c@e@v1|BnO+gcAi>)^2C3(!gOR058tOg z3?R?#E7K;hImsgBj$nyI;ob{9f-AZx$&QihdqbKAfmFkv%*Kyy(9dkt3J}R`B7RG) zaLf@o$m1~{q`TqvP`F;s*nw4grZMJc>JgTy9zZ4c+YY>2KS!54j&d*R8-1|)1t+m2 z^|h<77aSS2=Jy13mBsz2U0yE2eJN~U^ZX)yghNU6d;1EPT*FMXT5~X{IP;<=89poZ z0{_zQ{}ib(KBB6bSKdySPbgE}aDPa+ef$~&V0Obm4-5Ot3^XQrl3N{}yR{GX$K5!7 zNk;4DbT~Y31==f6FD7N4k69lnI6t-t^(2ecF{xj0NSY zivY>AR7=xJqq%Tu*lBvL_wq>`^edkhAUR>#uV%YE{~D8tu;8w=U4I}|AV*`~>H654 znk?G&bA|OsFKB(`T*uOlVNSlVu+qN4$|&LYtqKWu~G3yxlN;UQQAcY*tLOcx)hJ=uaa!QaeJt#5;R5@ z9C{1i>DHdlio7Io2vz#YNJcNDc4^K3aV;ZymI*sK+Rl>xE9N37Yk;60GvAt4lc9dl zk|&h8y5*iRJ=FEl8R+{sR}uB#3`gclqs{{%Qy)pdg@6{HCKT2<(f$qUva2g#M#(Jh z@TKG!(;b;*EMV|Yb50!hxfwUTnjX;D9iQ-N4~S$&tk~f}%7IF3JUUhZV24V)@&js^ z>6fI4*bO=IyMoGHFKK;CwHM5$a!Ru}FwXv??gJAO6Xl=?Dx@;jVf#tjF4=i75ocnL z_g=Ny3%XK|VylnLeZ6n7s!=bva{D9#^N?`G{bnU^=Tis->o#=;feOn_rE>Oa*hckU zkmxLPv$C=(mgMY>*{n&Z8E5uf*Mr0QQ6FD7H#f}@*S{i{gjk*2)Ow$pz>OM01+gcP zM=~UD)E#7W|2mX&(~~u#--U1=$;$&-2sMmG6E2uS0VcJ1MZUC-{nocTn;HmOnT62C zP^Z35s7atkxG0-61{e4Eg)3pR(NB{B$%C&6{a0>Ymbl|SWQLdFY{(fFZYcTY-seWS zXQ$koE_92`T{}(vGNf-B3{}A1_5<0oTsWnj#$2w*)w0umyZ5IIF!}YEd9Fpc^sL)j z5?lGbmFw8jO9FQ+)C-{Z8Do9f3808*$LiWK!TDxN&ypAvV9Q0*O%*V9KLej#k_W-l zTim`k(mAlpK|8HOId=IXg=O$(Vzt$~#}7b+c!DWo{AQvs$t5CYs3b?7s{jryrq`ek z_VXyYkBIJ2^Q&Yc5JcGueAZT!aT$>eLEyz@z3x?+$E}OA)v9m_fIgw1Ry^(F;3~oL zs{VzyL`SHz4XLyiGFxqNO>}gL+De}yp~Z6xXfH(`hRubua@p zN{RSg-Z2%=slMx_Yv?O%Fhu8&nY&{Db(G#gUWgzI@Z9|!Zd0rcH!5tjl!h}d!e;Ho z_;6?aHEtsc8S+N1AJF|OpnO`s1J@6PMytt3n1^%kYirGzF~dJjS^%!KIm>@ z*Q7ZXj~C=DG@Sd-)0i~!#1oU=_|`#_b7@#anidGc0_f=+y^*`I10TD)T{{7j(?Yi& z=>2%2tlqN}h7+jLMvxKVroGRWle+~TC$<73Y|6uqncg!&-bnK+spND!#WmH;pqS-x z=_{28di8Ol);No`I=&>FR4%(g)u6Nk64ENC?gk@*Dg`}fnojkoyKk^#lMQJOT-eTc zn8PTw9}i3JEb#L!^mv;E%>u;}=R&@8Z+1($7Xp$RJ8`Y*#k#7d^+d63bE&Tyj1PAM zfhEeZ>CArpW1}5RvQv=i;1`mrUJzPD=0JWhdnnzq)XC%mdUhE~GELT@j^Y)A5o72?ah4(nQf?L{yzuuDM9Z}&p?nh7xQ z=wxK_G=RS{sM!WI?o6E~rxUfo44+0begF1tSw%iM`I2xtTXo1MP^$;AO*)#D1dx;u zFHc%zbDS4?+3(SHnRDhf7@2K4Edgu~V9jsKy4YFpY6t!R??d{nvy0lp*^cyh=pkU$ zW{)I?XpLL6&Moq!tP4?^rUhI9nfRf*dk*XcQij}Doop^9qluA`kKbF>GMa>ocx5Br zd5MGlRVGv0b@6gv?ua7=sPJiHtm(p|j{Ow$z7%J|_k{@aBhfjrRSn8jk!}vwBnch$ zLDhWZlnYuDK{RH)!LiX2wID*zPDw@Oo1;}PH{}SWN(X4O@lfgx=0SAGhKgNrY1jUa zP57lzUN$%7B}9K>nhUKzD4QvF9PL=tRi?_R-Rm^~4gdpc)Q5Du-Nn;R7d{gmSn5+l z@l;3)9IF6+mCMY2QK!$p=~cjVdR=M3kwS@DON{M15HACh^K8V&(PL^)1c0K|!xQmN zwtm@ybVWY7IHZD_SKoEn*5n5VI$o~T69fs=DLcM3>#DD?qi-h5wB1Iq#MV5}yOqJz zQh1~^dsmgVIusfr9LQPVKg9CcC zVMvme9i$>h7}?lPp3**Hh+Rv(A2?MlR4<#__7c=NZLB5yhhp6`WpGVvY(!#?NRy!Q z#@e&zyF*{x$ve==s<$}XmS?6VZ}DAzNoE-)wEQOQEcI4EuZE^qxoHl_@r0clG%UIC z)l#-K!jAqcXWmV4H_!%xI;Dx&bjho4K&&n#EI#z`nQ*mijhC3N8QGnpg(^GSN&>yf zJidMJIv<$xx}<;?z@20Is0Ip^9DIf4y!XZh)bhC933D^R?n}(Cc4d6HF)u3!o{$#zco-Kz#J5K2 z{=K6&1ljENoAB-AMG`6-(OcIb2o-PDr`^rUoWBjiCVmFvWqljXzQ0E3RoD89a3^L{ zHH1G?02^kyq$aT5+CP3QkWOwLWnTCgvZy}R@$qBod!EZ6F-nCtatwAF^vk(6)gg$I z!r>OLkg5S;+G|u^Y0RC0(7fc2#J73_t2{0de4KR8mK_BtaP?i+Z?}x;C&Jcc!0~s> z{hxnqn|%G!N%?40?n*Hbs%(s>As|`6GyTJimN_myCUsUIREZg!1p7?K!Y=*LD#22& zn{)PH{jfR!Cqo&(`(UIcFxZ^YGC> zAp<(0wwHbh#cKfX*1%EkJw4YK$LH7l)hBQ3%9o74EV*+vKlr`-jeR~+UzJ^~9jK4#e$iY*ajdEH8_&yUWZZ|1 zvALPzMOt=BQvE-B&rjS=6z0aayXt<2PZxR;kFC(E@HEe_NEi^T?~cZOxe6YBgI|==|R5h82mm{FgnFA9B4BwwK!nHF1f2F^I&6(= zoVqv;sZxg9*@GTDjFw-{xi*d5^bc(83vqbMld=!^5ZZ%p?}9#xLK)PH&UD^Dxx;Dk z%$`WM1i)JHqL)sEuR*G=F1Qqb%5sz^ZKZySV+%Z16d-9D%^M)7GG9YX-vjj?5!yJd z@f+6by7T8sRo~5}bb~^AIj$o2O=+6milj!xa~qWJ>BSTm zO$@r5=@U~YU!$L1<2Z_@Z|f}l>90*4b7t4i>xpM#ZtU4*f8OH42M)Bp;$LkKkHif= z0_VIdPj3Ks5#eiyk%0=>Ci!*^Gfa^xsA8^;;o-TEGycw2MF`wC_$g}ukq%HPF;T%l zkZy|Y>Wwek%f;PJ4by_@?HZW&oa3VXN@cvCW@AVdu`VzaQi-dRefbx!sHGL56bP;2A9en4sE*VYm(6+7Rc@C8T z0~kT%eywdf+U9fe@YKM_Kmug$`#H+@VDd=!PpDgn6g{zAGQIlPJg9M(wGS%_AKd3U zv5+xF;Bo@~9f(@8QlK;4WRYT=^Wm-XYrn4&rTpb!cgw=FCbD%zK{!7>dGR<@9eEU& z5Iq&(4I#~Xu|IN1zaowgCk8w9c0R~R)qVci5j;+M1$l5=f~giHa)B)l-gG=}4B_DE zaRH3xD0#x#eG-NhJ%~ZK9!*)EQmnckeT8-f%2mW?&p2H7AN^k;-nJXgQqLMd#l50nir;~G@UM18?U$d9r>O(3KGoZ#Zs*c8 zAH6gi9bzJHenS74#lVuI6a9Jav!E%oiJ9iiI~GWdvwa|JZA!>1?3z9jGTBEVesU3z z7QQ6Jq!WO+KNY{UFp4^CkgOPGpq36EmqTJXQqS(;ULrlXla zHJ||)L#KbY2WFt>v^3 zxyg6&s2&Mu$ci*NjnM!>f@;WYYpiaDRv{1b(I$Wr4iT?qIwOwfS+k|f! zk9O$tOLAt$&L_G^zrJfSqDUOPK^6^R_25=|0cE2=eZ~SGk z$tl)Wfl!(G+_3L3EGx^9f7NMV*rk!G1M3r^`&$HLAv_tdsCZJ+yG73v+JC5KddK!u zy4h!xMneVZ1k~tcn8wM6auD;^?&mbLf1_*uu6B=UA?-BtE2c^mC@Iig^VP^cdMR{; z&XS1#D7m}KD2zfPEBMygOdStzE&9C!O9K#*)8;EVBi>aAR7uH?e26s;78cC>se+=r zbb34hi;(S*g4CIa+KPeLP9C5tk@=*vdNeyXRj{n3GBZB{I;j5o3+t-;6_V#{fPoY zX7JM*B(J(makd1R`#W}hRDIgg#6~4DmkA}WiwEkVNrM@7yh!HxM40rDd*zv0G=eo# zqY1e757z8A5Y71nERJ4&g3*K^H#sFY886xfj$9&rZTxBCozw|lfCc+iD5-UySq~#$ z4GK2ddT?-NnKsV3In()R0S?2f`Z?Q)Kz}!*wu@t0*9sM;P^4@6F#6^h#)n}bA0Z{0 zOB4QHi?p`alE{-D?^*s+L;8a4yz8KZAvV#LJC8Swr96*M;A$!$Mn*9*w)n)Dp}&}J zfvB6$BY;>zJ#jtGDxUi<=nDiOy1szTUHT#G$SYc1*i_&WBTSOe+}M)P={M%A@`S3e z^YwOVhiDe`A!NJ6AzWyeiJh>M=hgqK69Hq#ibRe>YK(8UuehCL#3jx_vO?V^2V{F=q>DxPHR$s%irnuZ-Wv2xW?e6jKXkV? z+EfpV`D#G5=L5n}*Ta}$&-ySv^Z-G^7qy@nr8eWaT}F*%0Fm|{jI;85)oPzG`CU1_ z?(sa(THB_V^1Nx*U672!aTBl6$bT-w2L80YdxKM&0>4m<8|(N~dlDF`5*~Pyz9CPH zvq2)Y11kI$o-lOim@4W=$G6A0*j(5V2i^QZssO4{b*Gd^vby$-&MkZO-Fvf%lKF?m z_dhOaMVU?A{4a@QbV_`iP53Dx;&zyQT0s5^dcAb{X_!;~u^Z92m%^m#0Ndx`nZJQ( z>gW_6YRyKXagETaq|p%|;6?-WMtqQ}s80hP%2cbcHTiGNfX#W1%=QmR9m_WP7jIh9 zYa(_WXA3}M;5bu6ofsec{3xJnrTbbuh`x(;^|tEXika`vIh5Dnj-ks<%IjjhWpVYT zbuFS#1ArPwEV*J2Fe4Q}fMf{-p5;3(Y^YDdxsC@FLVJxmJUjp94wQRF5D;YtV=fOH8H7;>^|>YKm5V~C@Ma+V5Ee<#OQ|(e3^E6C zvE~rpPc#RIY-+^_X(N}<>i&P(sF0POjf8>DZ*{}Sy8riA*&T?e_E8XR7jEoERkkqR zGTps+FkTPk`NbyKDIUZL)d^shqC+HUSrVgo5vq1_oy2-jBgp3XX3w`lahqBlCz5l- zvLZ5M^?9VUKOAkeuS_L8{VP<6GB-=B^7-W#i5+jY)OluNVwZmbgNCL$C$cOuHWz0; zQ02YKM#xgLDKH$Es{mb_JoHTwQ9;$qVnseswBklj#96F7g90yqcdq${ukMPTCtEcb zP*w&wwe3M}#v0M%z}G+_cf!E(4|*sP<3WZ4Lcjkp%CCQ`9Xg9aq$-oHjA8k&!nNXI zB*0ZZHdsF~B8n+AmkSTTfv0Mx<>%m#E)OvjYutyWU6w=w!OIR0u=I59PptOA%kJOH zXgy5ijY57tE|C*W2I=pi{Bn@?KP4d|@;K}>i1nD{8BjhRob}#tN6%J%s+7SDqc>}b z?&y3?OxJW?1KS&#o4`=WqQ5BNk0+PhUQg-!Fk2i1Xwz*{3t7dK@{165d@w zsuIQF8x86fZ-`Q84|-H70F9VL6)NmM7p>b~B}Y`F`kx4`AFGd9I=P*uAHmEIYvwh% zh6rx}7yynhYBZYjpt+)Q3&bj8z;O?R*L;MGu3hQQF%9d1;-dn#xjJceo043Vvr7d8 zWG-fFX@w1B=4+~&HWIs^|14Jr^Q!_{^9rsyl8z4zAP9C6Rb{qg~w7FU(E z!mg991L_%+b5Egg4tE>0ZXEFp0Ysa%Sh?Z z6gvjho(k+|14V+!u6lWY(%`XdMiY^s)V6!uNA4lx>Z zW#k_XD`U*;@oT@V8e*<*Y!(fGGv@8Z+A*ZR2`^!%KMsPt2toc^b(gNmW$PMZ7)Qzo zI$sdQ8u51I9Mp*vClRh4gVWm#+w#`Aio-yHpwLwSSn5YuQl&&M>o35_%dRa05S83* z5To*?5dH#VotLyMflVc>bD$Ebrvd7|_ztA{;unIpglrH-#_M4F3wC{U!haC8wzm3H z%1^u00NM~Y6ljL*SzBKpC}GIso;}>$2mXF_`~ReO5&E-Xp_Q3*FsWd@sZgv(2$rOH zFy(u7t@25($8pI2!WdsU9a~1m@V`yD!ow9{V4={A~_JpqKV=1m(U+=+9 z?X0^p)2f;&$YVd7+n`))-6XyWMGpX;rU&~L2$Sd!){CDY{oVyTq(s^2$lkE0tO3vC z)~=*;-b&vaboZ;!K9JIUOdAgyAu0b;!1);WfkGDph8HaQM}V4y~Xk0+A z$C(nkPFmAT%*#4nMa07{?i1P!;&ht<BiUB)JK_h zOlEKfb@KO?r}+QRFf^Lhh4dm!>CyX`%bc${SXtHP$rd)b*pj|LcrYY*SpzKn;8U^a-%E)A^>bvm069%(vtT1N0SpAx;T zl{|ZEcIm$760>E(Uwb#9k#Pt~_Tw1S6nUOSE*-=#9Y z+lZ{XjMYxz|33cQQIz)E9<)wa>?cu)EJ4=2{0h+UXb5V|MGpq&f|iN&5k92U1*Uqh ziD5h`K4=d*lsza3#`>Bbr<-!H5cRfpJ_z{WX?5JUovm{%ymb4kLk5hiyp~f;%Ws?3 z5_2;-WI^E)T!%qPCNLt_)#$h8(V7@AluNGUmAFXPCp_0^8M Yvj#o55~=xZ1C$} zSPmd8`eV2{>ltCYf@k)nqBR%w-UGGV4hZ0M3BZx0#o=jNsN0H}UjCSwLO!ni+Q%2Z z+{=2Zat~ssgT8n|h^UDe)+cyIHSqv_vtv*!@!1;*09b5hO)e?`XLot|pHWQFAGI}< zht!U5pN(KikJ;|3nl?X)o_eraQcu(eS*K+y?r3fk|a7^ivkI$x7N-mfYq%=eG3(eZ?*$kQo=i{S(Uc|-`J|zHm;uIk9B89E&G1& zbHBr-6#HX?C%?>QZ`9cbr^`h1<6==RmkeTcQI}5Z87kEtpeNI)X!EXoZAzu+jjAxM zh|+yCv1gi@icyKWt}RMeCn1%evpn{F(ASu35g*pM?6~H4@5YJaIW0Lu#=fVovS@gr zv&z#&lYQ1TbGsdRA0N;2TAtnG+V4L4)rkcg?UoW*R%28S=2!~dc7z*z0ng2>)_aP; za_iVdmS>Snp+j!kbV>@e;fV#T#h+Itvb=`-u1xex3+$@XgBu?m}AOoVSQV<~o$Q7i7){^oJv==2M+cWiP+&+Z59*hPNy z&W>GjClvFQH5}RBaKaCpuWWszyrbCfP5)Sqv_Y+t-Dx=}StZXJ0KT4BLN7Z>;S5^w zsmz}G`ual|I(!FUOEf&f#Lb9#Q$<-FwmJ+bn^GZ5KK5 z-i~){BhMnfYbJ?_av+}=De&Q^-<2ivUplI5Rz<$w3Wkpfu_71pTK2$jU*ypq%OnZh zH`LZWSUe>=zsQ&|X`T1Sm2Td}@kg%a53ItL2RgvvAg{|ulob|5#qq#?u?-Z>&Ryir zXMnY;d_2v8`1577R$V$R)`F4@khnV^%Qbywhi?Ve+{j4z(d;pj+c9|NlB((XcIANh zH#+c1id0*dlIkxJ{bBo3o{qw$jFq0?wD@CykvG^$+gP4^CcohqJ2`#qMN@<&F7mHdxOc}(^~ZQk~w;#*~v z_&2AJ>j>Mvjs}UCAHT079l4IoNwmR#UI*Rgbtt>C?8h%Kp$yyk0G@U(twT&TQv=Gl zLsPi8=%60e)-W4&GvesgYe3Sd%N5Rv|%*fTs&sAJsM+%@Ps{X zPmr^0uYv8=V4+AhJC*(Ddz;+8)7Uj}l)IW4rmMFg<@kz;nM@An9Yt3P#DT{R6sR`1 z(7w>|u#>Mq9&acNN}FcOwSAm_49n@G6aKinA9GICCtP}UqIvT(_F}dc+wPmx2He}@ z;QRo_#We3A8QjsoX0W1~U&SgPm#1^**O|hu!^6kCj;B3&c)6q~8_mPGeJe4po0E0d zA2Wo8%sTvg9=~*!SL7>|3&@+!KT_!k`=FCxiH!bWRmm*p+T-=BS* z@_Vuzg*A5EF^Zk3GRBq19=U4YwF>L+)ksm=it>K_46tB}m%_%j1H|v(zhZuLx~j=b zK1MlTc_Udt54Sz6BNVX8lf+=X8j8{)m#Ijx>sI*pHp=+tVKm~8c;R;aPC`uMJ)N%{ zZO`}>S;ok3nIC|K(Ea)S;PxQ>{W*&8Is4zSdB1umu=Dq#qej83mD&5RXE(eyYg@$e z=!dZJ=@n%2fty+Kh8X^Tz8UzCHQ&7k9NS|{`49;@lcN8+6ZdM_@k``DdgXDp6?k|W zxyZ3AeihUFu-0ZsD5mYdolk_n^)5~mApd>ETN}>P*6lE1!`yQ7_iV7(gUp7BKV}2T z;ON9YTxr5%KK5Ig?>`~K;|h@ucCS>K#Q+Gin}cH^gzR3y-qumubMg0jtmNO?z54r% z(aOKa@iDUIT-o5Ge$P3Q!PDYkT$}m+bHBc={ABZO=3555ZPphmMMn zZO$WCa-wB^!^s_4Ud|0tNB6_;K8@I3NzcVMXZhw^emp?Dny)NVtsT>vyi`th`m%jkSF3^(LZm#`BBjaA?)}5 z8e{3FN99eNDfB!z5a55IDKvI@^AYCnnRQSlj^6Vz+_;hC@zW_ym_)Wn`};P>l>PJ+ z;#~#@zCvUh3b=gnKQ8SmL}p~u{myz>9vNN<$UYRbd0mwB3cGqkR5M)#;S_ccBsF^5 znEBDFWGm*I31rMa38Y2n9_fVdc_I_J4;W0(@UWfYl|DSVd1WN^&9US5EYY>8lSr0O zB{}dg9@)OKy`~mrR>A7OnEyYP@xcdQdeCNCG{qFp5W}?L8Ip^%N_u6Z% z{Zv2bg4FE{NKXG_M;=D@$lwgi<#dmg1|#{{b0Dju7y0pK=Ylry?7o6hcD*NAA!agi zg)!=R#4a!gn?zu-WFLQ@YO0EJC1nxcrk>m&lHzHC>A%0X$yFIm1sUEFY_5P89B(Aq#xD6~9M4Xlv_j2E@iXwW4$#>&Bj)*%H8~JDP8o+}=;az#aZ^ojUkBMjyJu zQ-n5?^0=BJw?18=p2EYy#|d_JarHeJQuN?Mc!z)%zCDB0tCcmp)Aq=%yFEaBvY;ZK zXS-fa2>S}c@QbUAtWn1UZ&V&5iF?CF6D%t)tnSHYd>?vQhv2d7^x5qV|6-%v%v6ft zW}ugy$b7c+DA}H6@k%X3`r5RT$p>d@jP~D zfUKZ7U%&kUZfxpN_0^h=aFqqPH-O^D4E{&4fSG@9GL#?aS1!mVA)F=;+hY}%3YHzIh~C-meqg3ClSm!G2PT6y{? z)I>I;_=c``=OoHMh&=?X?1{>#FE;ghh@MfN2lQ(-dcIv?rl3sNxYNb{xzpb;@7E{) zXuMN2=;iY_%Pe*+Cza28Wv{GxpCa0n-&>zD06CY}APGsv2ulpN*L(Tyej?f+GwY`fzfQ?NG&ytQ8oqD_C^w(VQc{t(oc+=;1V z==o~tY%W5WYrqVA*8c)CHbJ878Fy2jX*-@Eg+lQv*=Lj1jA&)n)%7FQhZHfF1NcH= zBAbN6u~jWDg56W0)b=r|r$_})ovvdBTmewuj;Ty%309slD+7U6l8-A_+aSMQZO3ks zb`vh112tu?>fVtE@{fSq3_2>en4_VHWQN3zPxcjKMaVzZbnSC#@fsFV=YepQb~;L< zto_GiShjtA8+5$0F#-I;`mXGD2aIZa9Wa763g@&sU`Fuld(n70UF=Gt-q)EM z#;Bvm?@4z-)|96p)PL}#OKoIe;KtGDIeBgNx&NRS8x9{a9(?n81%Yquc;Lw3*gyqG zkG?!|lfPj@Qk&uAz2u{PF)Y_-SGw3HIq{b_5~ppRYt+ND<)x3-be^|O?OT?868+RO;Xv{LPj?*rM8*m7s4!_hWL+5i?(TXXTdbVNX(*q7HGz! zl-@}sn?OL`$T^i6cE*G(T51io+Y9NjA6W?XJz(oWm!(_o-r^cxtmwv*);7v##jG1| ztAaJ#S7BN1F3B1|c6sgNYE_iQ1TGU$EvuOITKGGeiA#dq-)Y!KH9m+@Z~moZOMPS> zMCcC4*ZEv35>7z6uj*&N@np2mZakR^Hkqzs8Ks47zAMjRTx&mp7-V(iQUVvQqwU%* zZ{%AxK7q2**Bm=<%v0+YWV1d|CM?){I;{IMo%~Y3(i$$I5JaEsidCv;?#GKqTM|!4 zT74$}5VWg&lyYK+HtLl%&lerjLQw9y-$4yJd1?r=Mml6&Jc480vay5h9k$k;gK~uLAEPq(P@JDGhwFXTE2Ff z-|EB^y+Vzp(__7Z!a>iW`10hTt6tH0yWq{!D{&DyeTRl$-dp+GOm^>h@LMfwkDv#g z)8~p$rG8>ZRa%vtUb4wttG?LOce;RKk?~c3RlxRt7jOV5APMGoi&<3zzm^p4>#oPq zGZ2mG5u=qD7hOEriCXkT+aRb7IiIjjch`S@IsEDkL#`s`+lV0(6O;HeovbOW{FSmW zl_0PgH#!z*dj(@}?^ug|$i&E4ST!&(pe-}lh|8i5W>0;cb-vQ|#jW*BNr%{Ftz8#> zSdcoYUdRK)A(8HLoi~5lZM`3x8ga7y7NL~GFX3>^lmWY(Z#B5pPMs>U2gTKkuw~UE zH|8qmJ9kjrVh15zd^Q7z)3!IS&F7Cg(1z<7u}mFf&HC=@+RvB9tn~A)Yt45Lb}$@8 zViOyZn(v}owtBFYIC`!j8I06?u-d;4VQm&v0O3m|Q@x{oYcq`38H+(nafC}JGJ4EJ zueMyMuLZ|MJ$>D*)0wR}mWU8ckerOA$+5Ek#=-(ROvNLcWUh=Q^9A-xeXuG?HokMQ zw|g&(!cQnAa^Yg>L!qmwfh~M+&qEn|Y%~oVK!v%Jw2nMsN?uzpg)BtS;wg;U znaBqQQ4@|Z@161b!l5(Sq_PVb7V+>|n@ANC7>g$h2?r4(WxuAImdceRl|q63&XKqw zGynd@3-wJnmuEg_Y&~8YfZC!Dq6Rtnebngt!Wqc)^*#LfN$gv+S?bhES15=O}rt+SA)cpIb3!DP(=X zx!BUmlyiOSC&5JdqFG?kDeKjq_Vd5M1Z6A&lML1iq51#go~KplzOt6UF}64G?nSM; z{-U=JM!adVk z^-d+U;evdsF_#CZ1S(g#$z@Y3E8sYiL!5t(%7HNl{=|7!Y-(29*c`9CynMmlpEt|$ zNK|ye>zt@hGkF=ri1${jub+BDNyLzteVEv-K)^;V%-Xdq}2914o36^bCIDd%#%xqv{zjdPaG> zI032F_lhBNz;w_@tUMeu;4D~lY3HeoCIJoM-Q3vw!WLXSH%eEj>NbI_NGqchrNADh z_`ZE{)m2Q7*TJLg=sqgmJQ7P{n3z1q1Z7Pi&M$jWNiJ3qXaaJ^VH|1|DA2U-Tzgn^ zM41+y|0XV#jUo{8>v)X59j#2}#k$OafWh=&Hrs5Ur=-y_@B;Y+hr`^i2MNS?Nu8%# z%>4N(WEK?hWSDv;rdR4tE2F8KzyDC8Z1tdwE*LPO7_!tq{NDjAY| z7LE=hi7B%3;wOTYX?w;I+dY%C(F&zdGcA-f!^4HEQjdEI90s|C&fK-rClI}5l*8r@ z6~v5vNBs;I3R)ppF-v8s(OeaCPiob8ch2>4I?OiQp(%eEAtqqG6$`gSw;Bx zrX%!q`xd6M!j^M!*?e4^%X=7t{{41uq`0=dr3$W4T&6 zUjl~(hWhmpi1Z4FztBTwb3rdi|B>xVc6PSRoHIGmY2U@l$mbBw~rD!#`5x&|tN^4KFIeq2vhcN3SyJ3QzX<1G3FQ?Idf0?gOJMOzcHo!dzE! zrHfqJv_fU1H~rCjmjS0pL7w-iDHg9knlg|^_VNZqUrz}|`$<&CP{fR~*c5ngq}H}E z(!o>Is`JCIwNJ2YW!1L~kvFLg(Aik;KYlq0m{S+L@v;+RwDT8dS!=MJtmN`LXthK! zNrp~M9%+s1NC!(B&(Sunwd)w2u>2kP2~B0%iEysR(~q5cueoStsd9V!2L)v*Ibhcn zQ=(qN^gS;yu_|l3G7zlNRyGl2rm&>${()gy-&g$kuB=8f7ZJsGv@L zDyLpGCh+}GWh-tfeEc@S?ctCURX!>1Cfn;E20fjQG;Mg)Cj ztDINVaT!06w!b9fmMJfP<*G)`#OKeScaF_1v+kd_Xd6cgl#xf8 zR9P-@`KZ8t3m+d{eMS5PX9Ab=-JA*Vu6l^{o=dN+`wL~_-W`s&{$S^IZfYy4Dvwg*ZYbaPW@s1Bv)0T*Dm(H)&Gpo(weKD) z=5isw1f@d7V`2_1>|G^q7-pj0ZBIUGk1DGaTY%hDm?NpR|J^xxPOX~eBsqFy9D@g- z2VKlF-j$J6MspBM?mWyw{oaB>TseT1VL^^*T?a~@V%lwI>)4y{Wfy2=8G&G;x zXt({5C9@&T4~IB!IPfRJRj9z+2q!2hxmK0I<;5nVh?z7Zl)hyU=r9Fx>JuN=)Zvfd z*{G7ha4WEXdmBxBWdDr3!dV01TJ1%}qxoU@&^ zHNSiy6u=WL60_@g`~(%cVWrfk+mN8mS~YEU`0!y51r3TQXciWrZ(_Mwv?98=f9y19@%GMZ8Wwnpd3m!%vrGI#zO++%VpNzOm1B1KhuaTwhq4oJL3n}GO zf+a8k6b~*AwzA?vnny4yswRA39Q$pHfUvq5t{LFIkRN2bD7@+)t6}Xw$2uGkEnvaiKuN2e0ECNeD7M9{>iW6xBy!bVeR`7&e?ss{VeK?o5aCD`j zS6%BBGswMwp57*)1+r3B4J;t@#}?Wy04w^ycgRe@9qbgxk>b>cIQoulz!W#{e0g z7!t?^NN!#!g0yUa`q^X)(`q64u%-GsWvVr!o^eB2lW5WwXxsG*VEizyo36@jRNzf& zX@`N$M}lS9`@pUNw0|?vQ^%Z#pSK}X0EY-?mNKp!+NDX=`}# z#&BbwFav+r%#%9t30z{?OFBSj0Z1r_@d!>U^pQWGh4p$i7@v^AXSpdYGU{bvI6=S@4|WKxnQiL3tDi7UXZODz!dZSyQ~qMa(8DFl3gM!fW3zf90Y{= z0$21#o7pyBx_4(`3%m2lHRbO)#oAXJyXmPGV8q)-#qEUPs^*@6U@w^&Zr{pG-tfTb z6QS7p-z^-c7HO1sFxj_IY?2UE<=c^gr0>C2-r_V(`X-=dvT}*qDTr?{8uPB2MFD_I zFS*GpbH&XJGZ}*}xUuUp17o6A1^T{K=TGb94bSlD)qV=?HzurK!mL@Cj9W11#i{7` z_%e7*rNzBi)-ocXPX+e70GOtV3~h9Af3){U15z)bjnuzgE(##d`vUxGQFH^Q)l=O7 z13#%~v&*uwAeosO8p$r;j4p-e-d>pd)UFNZVoxi>Q>eF)F7vYijEj}z$6Hn~1A>ft zMf=b5)NtD)jKnmR(R4z+UXqHazN@RNTV=;mln--PTD%<1Q5F^*q4+k5oI6a%j-lVq zNGu5Ibq99^=cJ-C6$1Nc$zU(WfaC2>BhQd z$;gDDLlk~e!V{W;GIOxRzD29=gmO|xrtJPmLKK9o*AAadr}te1rv64oZ9 z;`!{X-C|kNaRip5Rnwib1#`~m$fr-w%M5)IJ51{2gcO=QQVH|vr3l0%NveO z*T^bE_W(F`i+U-3q!LD-c?Sjs1#x!it7#&38f!J8$*JSYch$opYn+#fFfNu8hxZ86 zi6_qcaQV%D21$GjX5bhjT?$xr4)j`BLEPWK5{&18$O|H4or@3UL`0AGue$1B>W#BG z*A4ugl!YfW4($T-cn1NRTl4$DXrvbODx=AZzPml$H#>vu|1@TXw*v`^c>OA>ihf_X z?lP@%!UmnF7>b(N$zE#FtxemQ2@!t6uL)_r|B;(*^DKoeKOBGkIgy1r*w_C^z)?L8 zfnL`A_rIKiLehjl{`USoW&n_Ea1Ra9cfn6MDbtqXkDk@o{#9wj9rcqAJg&XJ6G`Q? z!QRB9DKhUWz^ZOkHNF8#0v>^nGn`BG4>i4@0MkS;f_B1s#7Cn zmOQLqG9)wSI$X4iObxHx2H6}6MY zvKxNla3a^T&Wu~%f-qMSUH-LoA275Y!CyGE3c#Fq>VgG-rGx}>;WZFR?m~Lwh~ui7 zehQfKh7jj(roM)HqhU^>fjwYckDzTq&@S$CU->(~+xpYIVHdmHlksxBs)HK0-?H1U zLQPXPC`cg6$uHTaGk~xqO+CF({~lULvj*UQDQif5E8l-6gt+$V`xm~fE)wqmz)d-+ z^%Uyo0yHn(#MKPM7-iG5@3Y;wP3Y;D05@s6s%!!6Z=SBn%hUyvB{GAF<*OeIPdSlIWi2Q=)vz z?kRB|vTQkE%3;$*^GHw|h%&ZkfgL|I8?f>wuhAR*`#j?&JZ2yqdupM>`pAuL(R!@< z*Z@D{0Z&hASe6pPA2sbT_4YKE+3L&G2uto<6@n!2ovZjA+q6$Yw4*oM^ZfUXvbpxN z8E_cU#dcAiMr!ws&#w}FGM+k9xOo!oolrH2qKf+dO_LrlMT>@d!P0rz4i;*+GHu}~ zb6q=#OtQdZE2OCxJi&d&O?6@>ONh`KfNKww@bM{Cx)7EUkV_w527N8oqgB~Ajh2tV zUhaJV{=p-07QoWe+sA?|uQCC6tBNtrtr#ADIpq{r!;q+EN6W~>baS9GsDBY%gLyL1 zmEVU47g+}(`JSEQ#i3Fv{kkxNg$I8LvbsaTRY{q#dj3n6KLJZLw@|+Nl~S*E-DXS& z#hfMRDqhES3%6dxi_I7CZYLNk0wkAbP`He zCX3ha!eFkOG)B%@7)tyE(!K2k!{IQM%$xu@_l|x8f?K%CNA^Po?3bUy%Da@0B@x#Q zNetLj82h#Q^9(7pod_6X=(hFERZFPEQHSCsmGQ0Ymuu#fv2Swxrp zv`g-AkOy0TFDdyrEV9S=iS$3iDpDrtdX@mIwKz&l@;RG&g-vcEoz%VbraB41- z@r@RU^fcNnAb7FBh>c9=4`pX7mx#pq1#GGe>Kn_u-u}0fnE28Vnq*N67qwC2Kmmmh zA?-|hy<0y}>c9ivWsI_eIM-@00DywOa{r^Bf2YOfW`ukQT^P+(|mIlkt-H z)UkXhkh~94QLSHRuF|}ESQ+?{(gMe2@nmgkr;E;kB7s!E*IktJ{A|?vLM30oeik3KlGc>=nu&!Ix)l|2 zphH~tA6$lXzKqY1zo&P>P2+jL)Pca<^eEzdHJ6_`9X0d@ji@z4va5>I?ekv#u+~fb zCL?}5zYP{jAY4U5-*)*iUnRTL#2i?$pCZpy;ard&Vr`5TzEnf)P*4oV5>Tp;Q#{MsU9^;p(XGf8*~&1U>3iC*4PQ#J z(1|EfP2OlD{&7Zb)$b}=+F+e^LC=jfA0h&e5d(hIlZc;6j=C)pV?cu71{Np0=)Svpb~8N|_r zC&)qz;AwKs)!rY+dU{2ABzMoV9!}j1n?c9 z_Kk7tTGEd4=|(_4i{leoP>TxWx$vsEfuAySd%dGgll=$3BNa@-=BwZr(|W(Z*pJ8J zT;+JUWy`n}FoNHJ_wf3QFr1cttu1?my003kQ$t2MFa0RYXV85OwSj0Eaefs@{_6Vm zQLmeD<(DzN5ltRhY3MqkU!{IqRc!v=Vu^^6J9p>E#xU7;h2wNbtu1=?(l$@+8tPw3xA!w52hGGS3Ut(9V71vJku9on^ihhTT) zT#n9^aAnEs(3v{J3=`rqKmp)1NTr7D>6>^YKqU2Ypad7#Y?-V2d=rK}e$votF>hAD zEguHb3RJnnTR<)%I07jmuU~a;=#)#GgDFA1_-!dnL@mFG=fHJJ@%zl+2xG*c*D ze0#0Q*_zMIWMBd4^^2@oRYr+R`X$8Y`8BcEwT{h|8)%hxFxcp)v{yP(P$I3P?-LUL zfUVL&`>ypQRtfoALLurYdZ^lEIA||$Q8@Mkd&C)wl5&kB{_1=NYK#m=j~;~yW<5IL5=W_K$_=9T zA8c1byRQ3j`DwkA8^n(^Cp`k9lslM#u#494m{TrTuDJN~MX8B&@49g4sUe{>M-;*m zNLreGd0jrh4K0aHjc1>LmJvqT_I6QrS2`@tgj4TW&K>iU=br)-#3Q)WA|K?J>?c=U z()#M4kMus{MYMfYe8s#@4)&w`AS0-erIS>ap7}ft8}%T{u@sJF7E3mB?@LpD`#qx0 zL;|FnPzp8=ij+_U0aeqEV6+qfp!}bAuFKr$%s>B;Grx!H7AdsIM^^Fsgd5c zcCM!c&Qh6*N$Tk`n40|>a8SSM=#dbK=KjJo<}JM!H6!eQNUq^e#N&y$&xWffu&GB7 zI2gi9*R|GKw6v6%*k~=w8B}@gqxENA2aDRFKg?oG_)DT8{k>I*WtyK{9t2-qv(CJc zK%qgi;=6}-$z>2hp7by!&B)X*p_)BSWZ{C78VIarfd^y{JeUIP@xN*KkNRrrVN=Be z`0&j)CUf(lb0G`UNrfx5G+unGHhr^6J+c}SCtLe+P{o^@66Q{01}aj8goS8S`HNw6 zI5&z{PS-Ym0jlBXI^3`=_e|9m7XUg*8)%GEisv7o#NOSGUlR8U2RufO)WQ7&YEG!8 zEU(X$e{V&(+ED3UFRs*A0Dq^r-N9|$TL}6FKQT#Py&^0WjxO?`@;wAE4d=lXWOO=7 zGK!@gw=xT)0kIy}&6rUml$j>a@Nk7VJOpkrF~GJMUG7Bm+?H)=YT{z0l9iWkT|+=} zi=jWL_s*(7UR9|vm~ONPGUvaIlnpHu1Fo!JZc8arZ5{8%iX9WGv$mg9vDyp|zwSnt z=wJNJv){;40n)7pravKcoX=>M(jUH?E#V#*0i%9CP`#JWpvQtjO`rEX-M1i>9wv6W zr;_1%&FC16iN)FI6#+P(!R05}PQ0?~p)}QoW-2-OA4=ZrOX(OY_aE@_CIvP=9%0wd zGO8bMSdI2>?e1Z;XQhJAjLg8IeZr7YxqY7TmOAfOnS0I{GB(Q2z+4@8sJEQALsL32 zY+0^IEj^88Yh&Lg;UXxkANo8E;j5?v`^#KI017tSUHAY#81xuB4!rdL8((N8B57G9&N)v4%sZU)bj9PWy@^natY$A^Ol2 z3fpm)e7=W$sWy4PcEIM2g*7pTVq~xEPGoi1PMs4IbAACMpJN@TN^GH_al z=)$20Yy-CNx%5)fGL~ADU^PL9r%`7UbDsLg%-2$Tkb5bC-~Of+l9zc<%8I)dhxXGovPWl5fmCOPFY{ZJ_HLfAMTS+_e7@k2lvE+8|B~DucOq3=i$1 zzkzR1$_kMh5Lt0E+J$UWp&h$VyL6|8Ih!y%%*5^Gg?+UHKL5wAU4M}0Sm=!xvWjNp zp0o5>zC+HW$_|40j%~lA3)?w{9WzVPA8%3QV&P4HuMvb69eT*7+Z`S+B9ZCm?t>CP z-ncDv7{y!oRZouq-}&Y@k+oUS^!LGxhQYJnWvl#wz=uV4QvIGkb`mQEymX)Ndxrw5S3|2hO4xx%$*cqB>60@Uzoqs0zxiu)jul~l#h zaVM>zP>_6=%jg@GpM5gN)<^|sH!T0rADlN3Guj17e#SgO-l|sR-h&{`9nHsnM+3g? zG-zkj3JUtISAIS1UZlk+?9B0tmb!fx_6Sl>nE&xOC8?o`i#I*_3HcGj=& za!XFS=pUShM>;_^!s5`^fA-DB=CMVjRoeONyEast8{gO$Ua@%>Ez0%hG9am2kWgsS z<}z5WJ3@2X5kIk%3%lo^C$G(c{4Hh_TQan*Pdtl|ywKSSyL`7^sl~xkYaXU!fq%VI zJ&)PgI$u0?znpw~cjJLxIdGoaP2w^pQ1*8qmVU;$8wqQIPq9Hm{Bzqm z(L#rwWjgBjZ*VCid+OC6cha)_y-^mo*U!%&Z#?|!-ikMYsab7p_(vmU`vu^L%Qb%e z8`}Rr>=L`|A2`BUj^Az1Zg&IEd-82!e0~)1Ur#U$0Y?}=*8Y}H)Rx1S{XVh%kH;5} zJiZ2@u8QrofvN}cCXAb;7KL4*z z{_GVzhrd&c(r!aD^EIhV2P}h6pW7s!b*o)CO!?~^)utmWGW`dIYBqIS8+zGRPm}IH zj@m}>`1dc;w;?X>ziuaY3EH8j(GA7bxZ|f8s z);l7L0?=i`P)$Ks|7oY<1yAOpc8c;*g@Le@ zk#d+9lI=70O^cT<9FhPE+a>6CWSI=Xf+BJb`<^YF~_6^M$Kq_8cO z{&nr_F(IIjgHSQ|FG>9PKqNNx0N@d~UeC;8JwORyqXF$5XKZ z1gwUIuy#)`g$4UmuQQM;i|GEyr-bn34cJhZ-Pmj%s!)q!PZ4D>^&qYJIlurP9ULp% zsi$R5=4^B>b?KfO>BPYB7#gsbzJ8$uVSDB5Xu(P0Fi)q!ot8*Sxrvv07KR)52Ykj1 z0r&aujL$EAJZEea2q26Ll+ZCpeOYQUv(8nz=EY`rnu-sstYP~7$45t5ssj^V4jYx# zjTB`SZsIzx88t!0Z#rbd4+RJ$*~wwnzGC1`0g#vmrv?r_H1Iq_eY1QmC@Rj81z)UK z`cQZtM9$Rl>Kzr3t!(rCr6!`KuKvWO5Rl!>U~@6p3P~IO*M}yZk0gE*;&MeR%XCiuhy!R z4G31>q46J*4NQK9htJb}u6+dZjvKY;sUsCI^m(=Ge5Ek-Xf-e2>tWGl+%r-8oamnG zQ-V%x0Iv!D2(PAkN13i7%f5o2#@rHxGf^&go$Vu`_|3JNiASf=J75sGrfZRwgGMU0>;Bd6-*U|0E4~4R17;p(rFU%38KE z2#f$4*{7DcXqKkXAaP4ljjvw`WqpHxjXxBPV?4d#4PajuP>e1!U;X)#Xupp>^JAS( z7MzkdH8ocq2F2E(Z{!WKg0b3@x&VwXAkf3ZK&8(HI|KdwB>>!8S2(JGezMW1rH}FwqL?k(xN*Lekhf{~()@B2@%#x1!EYmv z|9iyV+BddA@CH4(^eq?_Fj(SObE^>4#S`3s6YjDqxH>2Tb@#%YgoTZBH)#5*0bX=o za0iB^D9{R1It%vikt^0lI+TE`e6RNxX@Jv^?Z0@Fv3+drmbJCDM)<>}^OJCHJ}24_ zHR9G)Ff32nED*F0#x;!u+E<-v*1=Cm+N29~PAb-O^DAb(W`u4}^GwOQVwHtIBVBN; z3<=}{zIfReopB)53URS8H66d_T-uva5NaxPs8WXVZp%7E%CP*ZKN7i}sK*NjGERCC zpFlcq&GXym;lDqN#s)~4KchTJEnX7FudB2Xi*6a)#%*q*26Qe5HaEoG68hj^b?9tn@p zzdAZLfOy_@l)}*jUx@e?8vx(DI5;^Mu>Plu9P%zbi-3%Q%Od}eYjqC>uGO76O=o`V zlK;%s|MRN7g%lKMD~5-8B=P28ZiEzVndO}rGqI)qN$i7@-aXeo0qn*G&|s$3Wx-69 zBJIm~^8HlyRgKF}lgQy~>XlnO?Ij^t{lR7b-4`q^l3F8u{BPi)Z?gjEjYwb?eo{%D z5->Dl0$Lk0c>=Y5C>UrsWs(uofq{W60GTzUtd!J$U!4IW@iO?T+ld9*hoAu%EzPc~ zxs5P6T1uO^pX)n1F5V;J+l$-=|Ca9C!1L+;xbUHl>*dW3p@ll?nb%vD|20SQlsr;h ztkhfY^*WZL8K(&n184&Y|)YHkw z@d-yxt5aHhe)Uxo^%KPT)To>`+{$x+qWELufCCM8ZZ=?Ut+3svOf+!#3GTBmSO?4N z8D@sf*X()!u%P^*io!N&}PH0(lwmvXTXX#F+<-WHX=gvEGI9R!1#vRzX8qkXk53px9fHU`mIRA5Y z5y+_V2)$#{jWCyj8IVa8LI-yXwU857cicq^fAPbAt~$r#g-01DtJv0vkGG2-JoB$& z_dkk>&18HS?^)t90IlV!c^JEkLg~uM_B-`nfquf-)7VXBMoMNWiDAZlA&%sX!Eo>| zOV%>XoCeWD3d;+3n{UgYR-RIL4M%F%z6gepOfeKT74#gL3IV#%JNk@s!By@g3&U79 zppLOZtJeYeq3+_bWu)Z=RE|VjC0c$xEumP5pWiwNYK^a#kN7<4IU3HU^H}j#6v0Z5 zRqLIfoEFkxM^riHeKx24^Y)hTD%q? ze&#hvG;T_eo0)$@bl7(^ZU}vjvZ6d`2(b^N*J`U@g)8p@06ZS1DN2`bLV>^}(coee zeDZL^Od!|<2#u4@P9eZMd zpLk{(mNYg|8JYij(Rt1+skO!|voE$Vmnl9`$*6YaEKFC8(Nk0_T53@xCRV7}jITw1 z9_$9QO3n@7wd>D#j}*B(Uw{4oSUHAUu>8(?z@I4(QW@oCY?AB*C*sN#EdE#uOciHg zlxeC%HccXCAf-l#C-pdG#Y%xUw551DV3^Biat>)3s2jVe+Z@YKGjAs@dkgsU>6+Su zmHIviYDR#Y-XEoh@e|ZCO+A_ux%HXE`8N20_nzIL#97e5x`~!nj)*1G+%Poj{_L%W zvQ{OPU{gUUVB2=U@aO0i*BW0UNeM!gdGw2iDcx=hJ?*A2B110to)_B|%BO@peHAc-9`iKItD-hLggaVMfwW zGE@G`oKW#(fQi+Q2#A&HZc?llUTI*-HVhBb0py?5$ZGG=)aKW2B+#_tPI7$D>|+q< zUUdG>oeZq}ugrS>a)Z`c#|d*fYFe9|AToyo>L^Z&Yx(qJ^0AR60#WHvf^pVrNNs$Pdf$bvu`5q9S9wUa0i-1aq2Hc=n?vfa20}fUAW~%D1B0U)5|waO=J^S1;+rN zf{(|Fb+M^cJ$CKd<)4k>-PB)WaahldqHVw>025rhU#8`@P)JL+Xc`{q>J5iOCO`|b zlarHETaao&v`h9BvC2t)zM<=U=7~n$jg<-gkCj30xo+DhNPFSGXNbtPM~N8alxC0o zmY+_H7^leX`6byI56$#xbX@FUml)6FzP(w3YqP@qg0!%rbAm(k8AM%mtO0xE$rv5z z0{Xj9xNPRKlW7|AEA;C(Cms;zrGU_uKLGkMaIcUEozx?^VL+bw)La8vNRvS0SW8ip z;5l{q6tRa|-`@tItq!i_3fcpaxrc?h3pEHsu_Ogru6Oz_9&HoW+EK@%rt|Yo4k?aR zrl|`@#9Ni+M9YE*mg(wo9>x*T%#cgDUu}JNfK|eKD_4*lgre@u+%ebD1+c)q>$RE7 z!*u-{8^=W207YENjD(o*zEs5W>x^IMQ(zpp^|2k^DiDV>LOs#oDn^5NC@O>;YP4wCc-aasxD%1MN z6~hxFFh3UY7xeuccnwPy(yN90MX;p~N}7EXE~@Irc@Ej-C!vvEu%4x!P6c;=`$%?k zyl)3XaW8+yK(YDu^%Cheq+s||0LRqp3-T9h$VG#aV zCxgW(}_oGR|5F5kQ=@HB1W?N-4F+!uyS;${ws#L z3TRrPJcR~XHXEc|S(i9^k_l1weA%7}z6?;bVg{&TFF<%Mw-_>Qr1Nw~ROKbwC;#E+ zFkAy#BIR&puIXmqL5%IO8map)Jc0c}-p(@0|Gwy=aM_6&iS9k|*IhzG9!V!54EL0! z553F1UE3kR9PSm39hXyd1NNqH?hkZ%1XIjOCV~;nG{P&O)*(4wDQ-KLuZVu*ZVwV~ zkOolWC)STpN36c8`lNO+(%F3N{$R|ew($sG^TdX|0Zu_E+|e3g@@hVT1Ye6^RJ5?r z$?O-SpzL@Li9rAquud0JY7GM(1hwPv=>cgd;T!nA){Gqfm~5jKnA7b5_h+LCrG}l% zskyz>OUCL<8(O+mWfHJa)8})F)BN#z1TsVkUNg?YOH}CDFQ7olZ1~*b;9LEnCDxnH zs_@0m2-jCD2AhTep;OSs%~lyLX=TUxEf`-C38vk))b^bprIm+^)Z!NV@`}u)Ejy$!R2K8nC+*G>|=Bbh;svka=;4Fs1wN z7RV6yBSl!G3F)t^^O1tbcWxfP$GjlAf`dEm=qL~$wy8j-m@Sn~#{><&ySg~Ma^lU7 z|5BWJ#?e$4^C}ty{mgGs3xmzZQZ+r36Cy!FjsCze6Cmn;?JmKA(_CJl!3fgB(-8Ql zu=y|^1JUgghh6M|88r{5xDObVBp%=X&#Lk1{(KFlWl%-yg{how=^tdj0}d!hf^C|~ z8OSX-jQ5>hh{{>_@sW^fn|IdpPk+$704iOC{!MqlZ3>7LZqMb;H{;5cFgh&>JvRgR zEE;P&j|$scpT9W;&!r z8!q^a;f*DObi=)`^^}GZEm1o&dT+n;kDH$L=ZqA%Oqm{6u-G!A{-MX9s5jaw)bGrz z+sR|(x?=k*LBY13HZ-~o-O{%#JK@|v+}GDkYQ+QUB%(ydG0ha| zz-XFml2BcAe;SKB=x{q!M4B_8CvqJ?Xk&@9L$)qiAY;ug7zCwriBB-`ex2*e?{{KAHu@Zl;<-Cl!# z4Y~#MRT{Y8Xsv?jN>6~$C1CPcc~Idt1ItI_Y#8~RB;4zmcsED3%)sC30!3~h8$~_P z%L{kE__X?SLL^X100f|3ml=8PvpL2Q=O;wzJ`fkyV9JOuRCt;l(puyUkHZ9FG5ENI zo0e(xn)cGY zW%;1lvf(26=H7w4qF3PqUln?GmT5+A$dcc@fv$b=@mLerFJHI)*OYWPN;)0iMPP}G zZ}1fUm|S{est)#eyhQBCw|s&;6wIYi$tS24nm=tvkUm9xt2(%hxVlQ1SoXHyG+Krr zL(YXiQlK28$;ck2;sOIPl65$hqHI@AoH*eDfNz{=K|uz~!d+U+bKdqVnQ5fX-9PjV z0-V5MNM>$3u$ELJYMyuO0=bXkiK3_NfHm&zLrnY{FHh zOgkF;_Deu~2)13?v(B;hk-^5>RudQ+KQ-22DER2nEM zevq9y&)z8oJ^!c5^q2skD~N+Crr1N^;4#f8FeBkTQ1K%`bZ{MHD}510Yhl1%SlzM9 zWgtHqfAv0kko$_;=0>*OKj3=Fp;WN9HMS-E&SqM9M#xiW-O%!se*=(;)t#q548C*S z>csuI;Wu6buGOueHBCq#Ex_kMEs)ww8H?tntk8@)Yu##=?n7EOg7H-uSW;(-tn#d< z;a`RmS*lKUFg*qY{f!2BHHXhbItzJ9faU~T z9w90pC*p^O7$Q#pyr@^(MPnd`8)Nijm;?nvx-xBSOq=1^imL0FfgRMEp^1Izuw3)6 z5YqgW@u4i=YOfHxa1~yvidT2p6cW}-vOo_MpG}g?X z4JOcqEJcx=Nvqnot`=z>yy*vok^|fCqy4&2yIg3E6qGg}t$wnx=}!+GN6S$3W=rJF zw*7fUj89~mKjlCF>kAE^!GDV{o|K;I?fEWFXI&ckU3{t^lbmw`0+!F{)w*uA*uA+eG9y*Si!#(ueNpZpfo_Ja5r?E-7JRZ!T!JaiDE#+Ud?p*D8bwgp=aPP>kM=5;@uiv=#fFzL^*)a~N=VB#f3Y~1gT`}jDwe0Q zpN85=t%xtFRiNUWEsu8~2ty)Ll9MZ&zvr-sWJSRJuZ5q8xNeu3Ln;HBXsFBp1yBlM zFri+cC`UI4rrtVt2;xc1;-aD>{KBhVKCUecGbBX`rZ(t=U`~B!nJ<5iBbY}=Mp|Cr zM~egQ%ol4=VPJOy#zhRKrl#g1LFVz_Cq7b4T1D|G`Xw0S#Sa4xNiy$|8R%;PT~Vlk0kw80iBqzoFn=r00tjRogTT<=q*83Xuxg?!np2ziy2}Jh@ zAFZ%6DPf)Bf`WDzbDuqV-y63u_tVY8DD+v>X@9<;s|~qt9n95#fZWF|UgvRUIk@@h z4GF~5gSA7TPMK@r*aEaa7u*~=LUiu>M^zW*b3J@>|1jIC%5&tP{PREf^;G-T4Ap0DNUy5n&@S^NrB;4neFJ;PaK6DlC6TWW`XkP0{sdc_8QWJMBS47? z7}GSm;nV&EhP$(t4ilc_nSPP7GfU4_-g@m_Hy^5x)nsJD1=osAt*ottsVFJMSu$Qh zJY^Eku$wtF8~(i~3XLQFyiV=p>+{0PQL7Y2MX+dDyfJ75K5$WdOguD6k1{b8AMaTN zkp0O-U%5vj82vE1d}{)xM2CrV%?y~V$FZTSZ+MF)>QAt@DY!!gf&-vNEZR+~RNLqd+?4 z5;tiv1xSITZ=qDv=78d7-GkOahGF~V($kn&?zT$nw9!PzJwK_Gx1V-(bR%CaQkQY60OfhS_k?tIKITVJsI0V$ zNilbJoPp649tr7Eq4LMX02FV8Hmz=J9f#oU?I&uT#987DBZ#7 zHSfT#7U^ijLCA)*oGBsK;8MA9(kMOvUV7 z5wXPT^~iZG-xI*KFkIm;mXnNKW9D$zTr1yR z3cl=Ebhc8>jta5(di~qMOTnX|{1L*XJGR5rxJln(%pQ_$hI=-`0Er%YI#~b;;MCu$ zAD-Vae+3NKB`{z<&?$G0SNuXg1Yp$7T`oBy&@pz1-$NNa%nf6{2A@WJJD$e*b=0+3 z;B0Vc@`bj@2be0Dqme3{2gz{^W?Q&?i^ZTo$!Mg&2gp$XGW$eQhaL>Tlc-BwhfeR| z_exd7wuFW7_m1E`~Mf~AYAp+TSBCKZDT68{1J+xvR&~BNL~!wM{P0w3CrJ; zjYBlHUMWfDQO-O*PxhdfCBTnzJoc@|$Hy=6?d^s25hxK$_Dq7=yJ9AT8|B^GulTKy zE!VlTH4bRz%FWwAg%&oFDm<$-oKZJq4H2cbMTf-cR=3cSwr91#WRRgtj*gBhIhsr0 zs#ohO{Xi?c)u_Ga|H01YL?iHe9^qK9>sXmki$ChyvobfZsVjIvAVCW_q4E??#p1k# zEKDoawt7)&hdEXd-#GppOyIpxmPY!{2HLvMtaWvD)HdC=u@)Sx_h?98CgoxT=N|#D zhA1fDpt3xu!SQAq*QgASrGX-U%8=T&{L~=mtP)%FTCuqcOI^-Gp8j6jb`T6j-etj$ zOVE_2Wkcboa&cskGjdGBms^k{;h^Q<>XWgIj)jpQ8|zWSmHw+5tdi==q-**(rA8m} zt5RbkK?`^up^O;{ZXxc)dwN?@nxZ4*K{7>$AO1Dzi%cLh9NNL%m(euJDuG*osIki) zWo1yif#L>+RbrsvK6dRq$6}e4LfaA++IvQda+CRRxMX8c<1j@;A0k7!FvOUWXGd0vn4}oQwGK`SMcb11;m+9Hum0(A{4|Rbor`9lTIO zAgWP+0gXf7pa`%yd){I0j1HYs%UOvW^|_G@&|oqwELgjNZbd*-QmpVed%=&^`>6y2 zbMH_)Oj58~HQyv?ooAu2=G$hhrb(}OK0L+xSEpT*%+O?pt1sj1+j(r4VbB?+RyBt#gCM0tYZzIdZy^r~MXn5aBi2*lOEb#VDyQ`Lm{%(@y~E^N&)d9R=EF+n9g z{gMNXcGaL;M}RM>O)NqTI*19~Gn^t(;= zxwThNCI0b+E?^`G2#8#TwD?IfyLUc*{P;)9CB_CmI7(TeSA^XJM5k&m-J-n{^{CLq zYEF^1wBj6J0H4G6kBbxT->TeL$~Ug)1(#lNkait^>#@tT-nMwcFCsEh0P@HJOSK;{ zYrPO|L_yhC{)LDSlfJR5*8B2!rrh`a*<_ZLs=Uk5q)?KK;Oqi zxX6NVt*niJ(^&=_9|L%dJ%@hr7|r96IbkLZyJep>kQ01JbGO|^OxybRlDvF4XRGta zXoLTG3g((5==q@iv%=iNdFQ-e(q`(~)iQrKd5!I_Q_&e`i%;vdynI<7+R!F-v&G^M z92d?{nRKkME;8lyg5s87kG+>#D)QFyOs`<&w+~5BB=5U~W&|||{MC?RU*M0sO-Y{h zf#p^5Z8DtJ^S`fa4{(WcIA(W3)mySV*eyxdQ^IA2VIZpRE^JaUy_HhY`7hM78jg}L< zpyS&sbutb>g$leAEANhB0^WM-rO0kf8VlljrB_t5lQMYvv8{uU-Y9n5{IOvZtD4yW zRc(26l@S0H1hh0Y^HyZFj2zUk{mgQ0IoBiewiqSels&ZE9B$U zKIWk@FauypLymEu39h$;QV%M}<-D1J_dmN#Lo55**J~+^J5&%MVAZ9f%g$Xlj}lPm zBtnW)b^~-PUbm6#%DA-Ow-+(SJ05Kfkr}7A^%~>%QEZ{S%bKeeE%sM%X1jiRkNAS= z@Qm}9eYFby7I(3J`bxEhdOEzzZkgS<%B*1DB>sKSWH=%th&;0I4#2FGu=;Z|^gO7V zR|jw#i}`h9(}7|9t=!|ntDj(RiSE2ILR(2MigbUF{LyQ2uj&4hYa$%g8;&`5+d+*|Fg_rLE;%))iTWsWAx0e%%7T2a;$3YBr7-55hR zy5SgR7?n_4aSW^SxK=G=f;h~z0R9Zvl1#8kKjZ84Y-Blgz~VD5=G}zb-{9Bw)Qh^_ z4M3dUDMa0b@AE{W7l*r81%K0V$J#J3m=MrSi^QA-?2AhYJ z_(vOv!7Ql_S881LMK2-z0w3yn})U8mV+wW~ry%KR!)N#T{kY zUI43yx%i0ioSuB`uXEW#LS&~T^QBE>rvGT7_+PXk2KL7C-Fzef0mcAtI0tQLncWe% zl)!oamD#uDt~USF6AMJn#m<5n~BQ7XJ*;{H`JxkECY zBHr;d!K-(d)Knn4ERZahQAnAW90rhN>dP{7Y8WlqXjFyeQpoUi<^8K>fLG`VaK0wz zF0A`=;?a~p{=OW)-xsuyo{Y=n{DkuJ!I*~#VlZ(!huP!|$Ljjj?tes9p%tCw|R;yDD&go}~i}9z-nSb6zGyD82HkRq9Fc{!Z zuHbKytd>3$BJL~dV^l#$Xsv>bllL{bX&1>R=Gi+`-yNT@ht!PF+;Ab2{##~(J`;3! zn7ctD-Bz?Eq=t(SB*=cvU^-Eb2hkDASW-ve>9 zu>aM6kr@%+4D|+TRCO24EI#~X8u)%5cOP$(-|rJWl;I@8N7#t=1d9Tt%hv9MNA31P6OmbtSdpt$2cYRE3wnuZL4$kKNBMw?FqP{WRpqU-|pW z-8YZo@F58x?pV!52AsY-@SDElL{(7RW0@ygI9Y|y5^a$00b8%&oZ(VkdrQWf3Ei7M z%t1S?$gjL;jNKy3n&nuaGLh^z^-3ShUd}tX@!<^uR)0Q@82fuMspN0huL`9w9whJCDS=0 z?C87)iQESpwB6P)zdzL}<2hKj!d)wj-6=0CK-ZugQQ!&1sAYD#Bz>)rj2ydwBZCkQ zv4D}$DWSOyIxE_=ncc&>*`A%ty=K*-F074*uGX00LniKD51y{8O8oNY=$gJFs`KQCWAX6 zKXZ>#yNVKr6#aZ3IBM?C6d@efFm3S63S)Ig9K;;x4Q+DQZ=^KaSY0;69nwv z)=VD(y5!69NnWsCr*y=)IL;byngBSV6S4$QZGKUedIl8?^82lE6LCkO389k ztIT~n6$IelINVc19am)+jsd>2Uy~dvjAhpAno0nPpJ1CSmkJVuvMwa}j_gXBWV9Ua zy47)e- z*}9>umlf5U6#=<(M;?%DX}Lc&J(2+gX^O*|A*s`F#7JrQ2*;CB$?>pyW$^FB-rm5j z!K9nwUbefD&@3Bt<;wRbCM>nr9911Qq-cFFyMKap(ZeMsj@Iv*U@5|`Wp}l`3(7Jb zoJ%^Q7M}}1v4Ol~jnk#ZRVplX&Lb(qwj$v~o(lo`3`XUS zokL?9Fis_`{TiRTBnR207ylpwtGm_>0EcJCc3D$hJJe1ABfpeE+G5iw=R@GV7Dk z_ZS8#Dz%>R&W)EC}z(oLYWmUsV0|#~%<&NHU z_ZpLfv5%K-oX#~=S$xYn0cf${sL3;kO_*g)Q|%mplnEf&H=s9xHa4T zn-)|?EbK_bwBE1ct>EABkd|o6?_EG~F)4IzESAgX^DB(~xk5knObfUUQ`H;KfwuV) z=zzX7HiJcrFa6A`L5Y0(nJhgwTeA9S?a)DoIc&K6&#P7qLdmp z3-J3^XbDTOIUKQ);y*gfDEh8{YNBf9{pzbrnW0@=P&bW*+{?(sb3>|21=k(|YvA73 zpImQ70Am|WSx8BCA!fwmC14$-UN2Qh-m0$@xsnH1*sIYmqPDcL=nuIHfcpw4Qtl+7 zAa1b7QPpR4EZegQ&E?1r6021n!{i?X9X|zp_SGApV!X4b{$@5A^-;3D1rfq>{(9&R zwF793O>tvQWi*2q1o;bcZ38i>iWtK^fc_6(uP_#n#}U>)YIKyW%{8g_Sg7s8VCT1g z>WXm!4qtX4j4ta4tbXwT^CLP`R$RQQDMGldMfAg|^}KU@DY)E1M(=u_(MTASL|_U? z$h8Z1RrrF>uoaL&SI*qEH=1giuMF3-phhxt0>>bmXNbm%?m802y7$MK9pFV?*PVXy z(OY?3+OoMJw!q5y6JshLRpN<6HvYWe-EgCn?Pl(Sd>{(%CW5Z_Z@~SVWw=La- zpqG7c(r|`btM1B`hcs3Rz)6x)BvrbMIh9fxIeIYS`~pW96UjB0pqK-#O9!Vj7obI#|p1tUsej*KUuE}QgfKenS zIbCg0bmC=J2=OSUXD^4Z|?pE6r2Lhh16Y_4sK|ojZ57>0^=38j9rX01yupwvpn&u zfii)pTIGV29gZ>`e48q;SLIsIRLlf9ga4#L+w$n>(QuiK(xR?JUN*^;#P=~DJB|UG z^&4cms7rvKGmYj>kZRnwGT{#|H>7e}Rn9j}J?m!ls0{t5#qc0OZrOILpJoG4!wrim zwv!7Vgb#C^9dZ;J!UAc@9RMB|kAKWiczhFQmSYM7%@)eR*bp&TyfL`KdBa$D)FnN% zp;tGIA&A0YpJu8h3>Se5S=my>55o-PAHxkLtLOM#>) z12jw%`UsVe1uhPop5cCW-pn6R=*@{;bAmDSVPlt#hHI|tSsFjT_@+aJ#qFOrKp)JL zsmIk=?FGp;L5DA=g3!w*O5-*kSAZM%Gt5Wg=HT|l4~!U#)vXJyXV1mRhXSS{M;Izh z7cyfhDN3q!?J-Sk7jelUcLV)E*^TNEU|>!w*-w}ied6{tq!hP*c!LQr7!9n6k$ ztfuT;D~-^F!5b?B;6&U2C&JQTt~@;&{9KRP(mJ>yf8^MJw2H&rZd!xKv1>oT*Q|y{ zxX7Vfay=0NlVDc0y11g~?<_$NO4s^?5g5vHN1UO2QDEm?1!4{(t@30244&o#d!(sw z!e{ZPGg#boq&wLD^00bohdG4=1JJK+#Y+LW-ck#D`)N(p>}3vj8Cod~UN2qgun1-q zr?x9lY?=_FGNF%QQaA$*$JFFVF)x6_3e7HY!Qtejn@!gVHDAwY-xa4 zYtD%YHLh-iKvQ>uy{Jg-)Hi*>ekJqgc#+usYj&dD3zaK>|GuX?1~|C#sIQT4&qR^6%<|G(KC<%$|etPQX z{_wiTA}txHK1D#5Hb{(#LVF;Ay@|p)dv!>N5~@Scg;rWN$Hu^9Ih=AnoBs5>R=HAx zjQQ>B$0O4~0Bo4!)a0zH&nTr@g}>F~U$8@`@p2@NWnqH+^vt1NIsbCg?1hXSp#WH& z(;E>0jgj|4VM-dHaV~%(RHw=QbpiX;ek6_BCU@f9!i7>}Q`WlAXMufETXpD^k}-Ms z0)WoZhL~xY2on6Tq-vP!rB+BHLd0JAVsmx8!(_db$Dy&5T?{vY!_OD(75_2;D3&y0 zzok%0s4lzp@eUPJUX*qntuYg+{i1IfzP<`|b%j$2$1vOOM)p72M-qG#7ntOLD*Ip6 zyn-O#2|LS)eBHFwhE!Epn&pUXU4dJQa zF9>!XIBz&}Tuzc$K3z9<2+04KOD%so-VL?Qf^k@SLny#0!ld@5ctk}mrWnGeRf9?B z2iSLKwa6~Ogr!c)o=)j5Tea?Vp#|fKryYYpGwpY{%>g~~p2V`BW#PelFej%^&@6zl z+k_;VjWPX=3Mb8-Nx6GoyZY~FYvFMBdp`&XWx=eL4K#=QRJEkkutT_;gUA?gh^NKr zvbV|9DwX=(yq-5JgZA~v!!GdXng_w!YLYvm<&NEcxQXrs zs&sV+v_t;TSapPOR8v;$^0tjh_!pFOfb3REOQn`kOUbdo@ri2iFx5|m~2ey0o*7gfH3=DuvLfa`i3pTeKj-Op;Gt{;E^R$FdxG8oTP$a zXfGsI2LME)p0NlD?ZY<#$r%?g2Wm40XNs_9`|s>L(gcKiXTdQwmY`-B_XOuKe*9a% zY3cclQ93-DF#sWV6Uv_F!qnw85{s}=2kvNP?(PYWHKg@`?H~L;AZAy zdx+UWNfJPdG7K>9$xV=KS|blb{R|oYG=p?}a2Ov9mdZd#a9T+q=YqCs3XHV4Fuhs6 zf_OoB@_6zgn9QI{r))HH32Lu7HKHEgez>?g<`VSpXhDEk-C$6y^Bm{esHb%smaB7W zX#@h($<5=D)l;dWZCz0R-T;1lP6KbCf>whz8ZRNOjc}0*0K$2_5Hdg0eeII*sEYL| zI)$T6Lw!Ci*PK!>hJQOfzy0xUZ_6tf-`3qLwm#~r**mXW+}m4n7hiM+-L#3r z1HAoE94@@so)w8*6XsYPae5(@0oCGG3&yf$ei7(&g|S!iXTCEpeGsCuAuY2-j~wA( zIMOI9O{G|DCQ1$2m`Abh3Wr5O!TO9H5A>Fcel#TYwQ7?hVT0QspMG2M)1enUFey8P zZmL%o59Kw@=98;dfJQZTUlfah&`mIZG6@uVpETzYZ77=nkF-bT7e z?jx`myA<$(kLnNRqAjP723fOfywql>DdE5#Tk9yrBo7FH_NrzN?Af!&3HZ9#ov@K` z1X4NNp}tU8d+(NCqBlDgfDtsH%cjaS2!=SRL3YnS0!H3VCFxRnU#<=<2?Rx`ViN~E zR9~Pn8a6HkIC;N)lm#v(}|VYw2fQ{|~< zZ=Q+wN~D1%NBvW-HR3S#v~$8_$R1;udhZ@Z%p5dfTE>L)KnqXxgzxa?Eu8lx13YK1 zq3tBClj-D#f(3hL0|Ul?<`^92)^Jt%)(Nwb(#U*?QweBmtf_;CF|b8eAWXm!645>$ zoNHOGI}KRTUj9tec%4ybG+lRYmbTw+%APn(ONF&WzSIG1 zau!V{TdA+~LvF0xvrULfvPT}8sz)>Yq035-??AC&&PVC01>l4)_f1{hgf`ptC-1;N zyqysxyIB93e!abAVbYCdGuMDC#)=!3;1JL_?Dh7T+{a|sZ!B>e4lu+3b2#1mvoibP zJY)&dF$%!LLe{h zHhRonQvC`yGHHm1WB$R?1sLMIHZF$2uF~I8d`Yz@l3l>0?28UA>2zKm$bW8!HrMZc z-wK4^%*q`TF!q3y>u?yZ!8|iJq*8fY)v`H!TTHH7dB`gu4Bt_|+O?fEf(zg(+1P%w zCvD{~0C|ChagwvyX zd)Z`5L``sgS%H_KR!?c-suFn&V>eP2KOR=F%7`F#!F^ z#OiK#KHQa{JodDorC|8v<|WH!#k1>cvH(&pfS$cduok^h5fO7P?h8O@-Rv= zpt>qB6^gV1{x1VGbHPf3aapoXFFanXZivX6B5CMcpnm;{72OC;ia`vyv7cGTq^9*Q zC#xV7@wr8^&C>^dLgkxtkf~ud4ck8Mi8~K7fLT%bwBIA?zdh62me-0p|RzP zo(vREl%vP8bsEZ_1=1}!dvGrRe@*>PtEbhMS;CRW2|DRSOe83-0mE&t*u8I);6uxo z!n`E#V1QZQ|A;)Gpm?cXCt&4`D*e2C?d|Pz66`6<%Ev#_gMlB%><6CwReCwp($tV_;9LoacIc;fEz43jOft_qBiZZl~v8=24?n- zHR!nsQ5w%~Sj6@(oYL{EUb6udFJI)?h_#iin=%d(-?@=MM2-6@23O^a)+P^TckSX% zcKr@vb=4t`CpNP~lBaZH<1`+1%^w7GlDvwNHR)w>zKWpD9~nLi!vSXt6~uO*n3-@n z+uLDFgCfD@`1b;;;(yVjFR-)LF7zWW(|8VFxK4};rc#lq%$L5``m`m*|Mf^MLT=&a zP}5F(<{`DoBex=Y^&J9kgeIr)J+}i!k<|e}Goxh76&)(2#!zmn)*DUX!Xp}_;mx+t z?ZHq!8hMWW08u$9YCHz&7uqc zFq&9jU(S}6FT$qouluZ%I*C57>huh2Iyn07M;J^U)?7_IP;y5AkYcWM%4RMS@sKiR zDTbiCy1NM|b&}ut4r_HfEiaZ2MEZ1G!e9m6{iSlu9OiBSQ^08CYn9;9G#ofPS^r7# z?RrX)eorP_WyW_C{=YyngcAq{2{1Eu4$;NdVRk4rFlaT46?v*mN%hNcdc zQ2rSsABlS1;zvZH7Y6o?q~wt-<0g_oDJ$hSsB8tS<7t4$ z0NQ_zF5@3TPz`S%e*t)QaCWy_&wkn2gVk&yJ_O}r$spX9>u)` z$lwUzjh>t@<(jl1ecIjBQ*d>#^Wjh%EYY2#F$WNLTl=8tX8~}Sk39z}zt6Y6bQ#V8 zdj2*5?b>*b&-V;nP;z`f7!8T&&1Q;+&#-K*v4kWoHQQjy=gRWOhUiprVPPFJAMn#* z)NF^@zT*oL!M@iUXOPNzawJ1&=#Ic;@mR$RCX!EP@7GHUq zFA0OGzb`{Yt5Vp+%ppLA_S=Z{T)b*#4=$t8AYx#khwDmnEQsPtq*h%H-oJU2HE&iyDsm!sw$oJR6hbIDD z@tw8-#+fVW%a?0uRKkV#Go?{Ol_v^9%@^1}d+76X2QM6#X)UP0tYiXKB)j2@3HkMr z%$ZyDIT6Ak?D5bSsID~MnUzAmoq7|&aUpa1djd@PX=sBiT{dQAPqsTQqtaUL2wenO zv1`cTMQDt2$OCS?&N4H-p^xEv4LnZMZ$t}!c;z{iYaRvZyS#o}@yX8sy_}*v?#dOr zHi-wywC?_4XXzYP0I}&4Lx-+J0c)#ta58WRz)C3GG@JwgAUD9#=CT6J3P?`oiPt)2 z0lNgDNce+8i{>AgqcjDDShI%d;`oVIb1^o>`pG#VH*d1}Z!GsN9Cf8N(MxAn-|3&6 z*kg0->-c`?_`O~KScqyf*~W1TtpBKq4N#;Dc=&9p1P`D-g7959L%1Zv7AhZULE>b+ z>guxnxXkT`Ih4$Zt-K&o1&X@OPngGwD>+U1{+cZGs6Zv5@6O(BqZe~eT%c@M^on^FEre~`61k& zojUU2)XZ`luBn!1KPt4Mj&Uj3lNn6lh~Z2D+2JUFGW%!*0;n4rSU2NtRkUE4F{Gj>SX01jzb>mkS)Qxl-OhsYgQVhGHENtF6EUYC6o3 zoINpUJ40vHq%>MrEy)$a7vOp zhUpxI0= z9bxtai*dWIkqZeaHcy8j8@hlUhw=fSG=-0Rcl;Xp@&E<;Yw(RH zR7douNT&$7a-jd+HNFwR?ZJ@r*O)2&G|tEF7I>VWt4@PS%|Af9D$c70G6JcMMs6$_Vs}WkeW3-E@Q<%wOPGCRrKP2TiE`n?H3?vlN~CbCd{ti` zikqHfo1x=P2f0&|`FQ z;xV-v8WEXw=}^iYo<;c9YB7?DN1>S}vKC9g7+$$H+z7m?wJI(@I& zU;p-wP~2awlr(L<$3{P6SD{VM!AkX8>*Em@2JrM6!`&|fVV3I>@`4Lc*wsIHa44bj zq8=7ySPpUIrWP;8SZRL#E=p|5MNj)=3?!x!rKg=F8uShk(~ZdAt{@Y!L4LV>oloQi3NlHQ~~Epd3xRcdu`(A1H#t zk0jVcv=C~9NnS8MzU-rU;)Tps<@MEt>bd!$a%vm54*1yaNy)@@`UEQux2l=dNB2nw zLNP0LKuPp0IN)s$Ukmn-iV9Fvu0nJ3Df$CId-EW6Ex~i+RHBUiL|%?wj*lUvNm#)F zkL~b*rW`GA6tm$>$gO&YgW(IR4s#(F(1y!%IMDFz)8I1!F$0VVbOShb9pnmF0cTwe zm|ZQY4}ETT5ELiCowRz2;)la}Id?20o|j8iQV8fZ+<}RrT*mp{Z3-$}W9ge?SK5AD z6>z@2)d4lSdpuy>MaLueU=t; z2!<>jX!<(DQEcZi9wl1K()t1FaUma(kvaX2oCbqrQOQZo)9*AT8j+xybQy25J9VdF z*Ah6~4cEXlK7u*xHumj?W?USC_LhSXK~!)VK(HCbmzmmrirGM!dS_cn44A<5*!u@& zM@2+(4Ka=Ra#`{@DWH)Gv4Ok+&>fv8L`A)m5lU6;nXyfSch{#@d_byT44!Gf`8G(z ze)pJv*>d(Jf&wU?QfLQb3Ugd;)yG5#@Bg@1wNOsQy3qW#^}$7KzYI_c^3#5Xflmx~ z#F-qfz$nQyyLZ$5t_Jdv7gSUF4kMeEzrj3+6YZ_X2Y3e%Qxm}EdegiAOoB7_@XP@RW}Idecw$H&XQ zJzXrAJIRJ1`ztJyToch&`O(z0n5$65#;9|3V*qCBU=NR-Dto5}Y%}UXS(qOeXnj3) zW|RoPNV9p{_|rVetsP)IA>IcZeH9;cdFewbZJFrjI+zHu}iYE6tcT^zejON~zmM~)e6 zC(o-8etek2t9YPgG1y z%ny8z;-wASp7D1}5hVAOtPb;^=5l!r{suRgMfpcfMh+XE@o^PSx_eOyg!T>SU-};) znVZBQNMD?p!p`2mK;4tT>xhJ@0x#LVKc3{uMEtFcQNseR`#nx`_6zJVjaFDp%oE%- zJB^zk3rLJ@!%H9QFhRy~%6IsH6X}U7?vO2M8!WVmYLXC|fqKFl)ojU3O<=q05@UuY zsF_$@fgP{HfuYQePB~vA@SbGT4N8&3`}Yk+g@vDU56u(1qCNy%mDkz)*%Nhmz!>MA zB{`SbY4y7ShfWSSelK#~>NPgbj@AZ#|6I2jAi{Z?hP1cpbnSqW8GU15S8^apP zt1DHt=O#D3*X4_2gPC(xNdiwWKVm~=CBBtT`r1Hm6}tm^;TXq=i}+31N$4>u*3G^m zuJzWtsU$rCazc~ge))-e)%{G$=X#nFnkEK-_V}X5-)&@NU+(X|6APX|vDq3Jj^U_l3AF{~g;q=zH)X7T88HE+AZ-9s4RLi_eDPUl> zenpQy{FEio+aQ&O@5_$xoK={!_;Eq9Vf2~Z+PifdXxSk#6^Z(-ONw^(egD_UP-E=T zUjej{|v+M|Skye2WBiv9%5fX9#j8)ZnNJD$J?Tg9D?vhZbj#v2wK251- z%#d-Cp|~cB#yD@b9ajb2)i5iS@KlL{1{sU`&e)?Da%269Fy6q79EIyN6w8qj z5qZy_i7lIK2g#$U`y3nU=rD=$Nr)T!kmPbg92#?K)}Xq3bz>1sYJI6-)do(vtR zL+b+*oq)(U6lmS}ofSj7#Q#*PL07l;&#^K375h>m#n6Oz%+6B@Z5p7!7!CPeqt* ziAp3fbV#bZo^;MP78gqYx1%P6!*dg9c+5vhW3g-BI2N~McFO_dPccnNE2D?IULwAa z@N+G5+j&*Fb4t#~H~<-Ad{w1O0{Fr}fAphbY+g}#Du6B!Kzwk#X8s$k$#`jfd@?`v zErIwr8#eqygq=aSeB!De3$yDg}e$GuT^a5`L>GPY?V1STrQRc!DP zdiD$^fQS|PhR+PY*f)`C594QdE@xc>awa^y2y&GFGvqF$izg6<%tCMF)~qKODF`3&qf!(Ag6S`?54d{*tlL* zeU=v^qGgmV;v-&b+@&D*6e08pAKl{ecRyQqrGbHWN!Lz@(cpL-* z8rjSiX*!i{sQWl+aU%OoT2v7;)OprBAOXz^?Jfc0yT}RRQFC?oc&VM_deEfZUPybGYS@Rr{wn@xlN;cG zg3W2n=l@?(X26N^LlqNHAAp<0wKfA%3&=2{b*HTz=30YyQacZqdQ7&=Wf39yp4G=A z+vx6a0{BtM%9!^&?VXv|LM;35l%z=T0xmiNpMUlFHUHY~15n12BRgAOKMW|oPKD2E zn_necBzNM=l(=bexnbK`VHouFXOf$k5BH@9v}$5A+foIE7!;EUi%WMH{1An9j+4LU zxXd8#(k_y?TmT&|d?Gx!E20$W>dP*7)}Tff6b2E^f5T?*;J8Ido_{2?(1G=xdX5k5 za6f+H44 zs(vU=LD>v`_ny>wxHW+0U-E{00!PV{PepEPRn6Z)7F%cV{x3{ zYMr0iB}^PUOO8Y=EJ(g&+wqL)9>;$RGooD)#KtdFt9MJ*kuPzFcc`y1qe}ng z*UEv6RhA{JChS_7`b_@7pSW!L=kOXS`B%g`kV&Rex;C8Q!He#h5v}m;goO69(`5bM z=H@3fij*7M{xw$ar*PYYLb|ZJC@uby&5>xc!It~u_N%Snyc(&0(Z2VxgjL_c6}=py zFG|wczqgOvg3vJ?z@3hH;YXiTDw!x;bj)34CL(49$*~i_%3np-}YYx9{5YgMIpMM5|)aK-fTvd zmVRBWql}=x-`|aSr}2)`+Kvg2Ypda)v*qMx*nU6Ebt7@#-)=w+rc9XV(lT(n<(IR2 zQ(F>O5p@HcDhO$?zx!xNfJ~3AI2adEy zkDvXy;eY70HEl#2#vU4PUEeYOeED`1Xd2dZFZ~#dGW3v+F){V)C|ZVS!dfdPL0sW+PG=j-~y=G60xuet-2a44Fd zxH8=&zU*x1LwHz)POuO-O1P(9(@u{VaR|&=cis5k6aHc8@rO`HM>2+W6*)^xz?uWg90;6$cuk3Jz7_=Ms8Zc*#MtXX!&a6tC;hhvxi zKbzYgiA?oxgpGZ4fqu&mhlyvbV8tS_OQxn#o@CuMp8{xtw5SYO* z`d;L31n=o>aIvW+vWu6u6zo=OR@md;Ztw{NfCH`Z1+M`DJJpF>NygD$yC7EGL20GI z#Pyd0`b-=0Z~8p^Z;t7sE4(EtdlzfWC3I%;ukP4_dwC7qhmHSW*6Wp96@afe9rf=o{W zgDL)9ySTE#ytl=-i#U9315mW53T)kgMZTn~QSKv<}4sO@Ce<;brykBV7DZEsILNm#?{(eJE> zSRS9scC%b1XZqCQ`Iz65iBz#ba z3Lf8r$2v!`k25Jom$*&WkgY><*O)Iorp14Xu@YJM*3qW)_Ugn@)kfS#{{Hazt)+(x z{qonRhob3LY%zw44Mz_5$QhxXsZ-c?+lxO=F(rQAmrP{v*-W1MLoJ7f0r`nt_=_-+ zxnCEq6ijftib?$UJ>WNFboHNxJ}WJp8F-sP)7I1#si9t+=n=bu4dPRdL-_@{s^oQQXx*{2XWsowX{mx)C!}|9QXN zhoj%gjNJ-UN$#w=^#YB2oyEH)cjr)#gldI)*(m&*SxGFSx_PsS`w*>+m{ZjAVSWAm zYC>iJK?+dPyWqt1rB-Pco;1Vn;Gdf&B#pWoA}1~MS!VWYHPAnq$jQl7`gt1Kb8QK} z79w!isK~aX)4P15Y934N@uaB8E7Z%#hi6u&D(%2A!Uu00sDJ*fs4K*~ys}xN;MLwI zH*uSXyhSE*CHqAcsNj#f7ezD(t3rI0pKp#Lqp&Iwl>0m+x)_*Nxoz(g!j5t5q7b6D z6KfoZ&#b(EV)oOGXLZJQ7Ey~o+HIxq-{nb0-ekT!2QY zB4Kw${=608p6yzQ*FJCdZ#o}WWZomP^&H5(D2ttP?=x@6eb{^TnM&lYY#zd&q6MN? zEON?k>RcIBbIII?zh?RxiBzh##Am_2r}78=PH#!zuO|;>PiaM=OZVQ?b9kXJN6y|) zh4?8(eRHq!AyX+>>MtplohR~poGKqYtE=(ozD0Omq#%kVOikwe<%cDu1a*5;Fxq>E z74`t-XR?7$WZnOHzwIPr`*_s)IQ_;;f=&pW=*6i_+zZ5>GVitsD{8aPs0!>LKiQXI z%lS}_@Dd~>OwQw8tDPKOyTi)`@F)B^>0$*tr769`Y!N|RiR$3zN?zJu@2XYiIUgrdF_40 zc7V~!gs7)(gHil=Y-o>Ha6Y5rlLNRV)22nqF1$hOHSIXWCo*5A@aL6r-$b3Epzv2e zIV8SyLVNa|ru6TgLv_--pK}PD*@G8DkoNu;_CtH}211^{+H;Q_f4(M2b*qexM$8so z#xeV`a{Skl)##IT(tq%sYS#~W{btKbX40KD8q+Su?0LnOnOu92-~u!riS|C5UoPpq z80T;WgSY*+k429K^zAiz!={Sn{I9DcACPs6m{63mX?U3Ps^S-8ZP%lKQi&Pkh3d!q2?EEL$cXKhUF$g0V~*a!0z{d1I2Vuf=C8kE6Hj3&BYHLAdWfc-&)*(%&2T9i zd*F@R-YkGBwr8%6tTj{05lp~O*(Us><{9pt%6G~+x-D;+pr=QoU*xFFqMxZ?YK>ip zR*htr)i)uD@=~vLbG`HjopkuC-$PDkn#9cS< z!yo@Anqittw!CV1;rKu~{?%Qhy_57mMSB~HY}a_ew$a=$gV1D!69R*++Um3<7@E*S zHw`_-RJa+A85R`{Is05=GpU;f%Os+5{&xInh31=QK*i(q(wJrF@W0e>B zGOR3fO8@inJok~2{+-GWWh*}~PIo%JRUzs))hjl#`{DNqp6E1JyQ9Q_)SBZvyS+!1 z3BFq~GL<`fm%)Ac|GY5b84XHy^$~+R>*SVGPI2x+`aE2@;1yd>%81`7{EqB~c_!p( z)GjyJb@$*T0`HU??)Imx-!r{WLFqT-YQ^3e6V1$N z`ug&K{5Mam30i>B7*V&L$BH4h@#gw(wUE1?=Bv_^fQm zIX@ZaH-3K{nSX=c`DdK64aG(h75F9NLeaq7>Ls$fgDrfSEcge$#~(dODPC@uNHDhl z()>b4QI~`KIvy&kEnN|m&2`B6sV>r8NoIDiMw}XZh^sZzFP>)Ow1|4uVzP&D3YLjh zOs#Nbn4~*bw~=7SN=c-KJ?0)?TKp*)zx|`8oow+0mFv_V?L+W*KaOgzJgKuWwJFSN zl3QnHFiK+F@M%(R##iu!0zx3#HqC65O`IiX`hdhB| zv50E7#|%o=*X4*2KKQ4Su%f&7cFywg;R6broqry7-;;AoCn`$1AFoo4A)l-XdKj zJv#U3{6ka^WJZ6=!XN)Y8SWBGYVpSI^{+S5%Rq zP+(ThlH+BE3K2cF;f+ZtBH!k{`@i3e@KxH~plB%}(qYtJ{<%c5 zTFhn_3YD|Ig~kzFu|&-k(0C3z<%#Ob=0`dsy?6l4;i|$>x zi|&Jq{_}|7j3b1caThh~m7vtAlRKS<-_uO`dfF8VaEUg8b)#O?ZW*TE-kO52jI5adwZC8ff4T+!k@w+jn1C+-0npx%hI+|{DF%#L})$h+-sQ0W*_n2fd>8+ zvO=$#$nl9p<9snxILRwErU!IH*M>?xh<5t(E`n|O^XL!+%x_m;Plo9jx)^pOl!V^?-cF)uy(3#Zsx)_{#!gI6Q$qvI zbEQr!8viuv`UtJ8&Uu6VY)2FRzlA^}Q!%30mTvF+Q^&WH9H66F$r??cCaB-6hY&^? zA2_LhALBy*$U06J@2yM=M32c~=MB8lj;;}1>i<{_gb?laazFL!LpZ}DN!d;*E1@JRTan7jI5@B;5h-@;--s9MQ*L^$s zd_Ujk`91$R$GPwO{l4DUd|l%PL5SlbWx_Ds;(}?T=&33(*UVLq<#$NqrK7bFKBP^` zgEOe(e>nMnFc`xPU@$KZjzD05QYAoi6uu@uK+g<=dLy=46h`Jho!J7bzCy(x|3mH?cY)l^(>2M$k{@5gw3%cU-z6Tn z0vE9It}(%>qk}vwZzrd*7cV_#0(T7*X{Ca@+JQEUpeHii z(Ooa<{=BCxYf1?uC|YbOQcPSbn4OzmN5&+T=l>7gV|X9Vj)g6Y3T#zeI!K)tJL_Je z*foJ(zsxpvH>b1qZDOK!V#}C4dIlPbBn7x;o92c89cn^X73Yg6%KoG~^gyR7AyAZV zw%SBb)tfiO?DFV0zMCJOH^|A)YNK(~Rdud=Ay*!LayCg*+}hU`1N|4nRnpb06zSdn zI5p`nYIy@F2B)PnE`I;=c0^#pSTo1#VZZk#bi+Om%z7fY?|+i}tzcom9dP zY9;^wjmq1CUUNLe?1XB3qmG1_Sk|E_u`uND@DCsw3^R$!e7d!;VV)2Zqq~Vr?!zzC z&;@gW>Px*ILwk#9J2?5DoCnTpF(yV&CIx?>xP%q>O@kN2@KB(4HgP>WDF?4dNZOK6 zT_u0*fQo!GhO4^H1PE>@JACN>&vd0rpzK9t?`eZCf;j*j^;KA=|4(!Rp;+ zN_zYL7`jP&hMJEi&k0SEYP7EZhx%lR!~~IN>?&}6e4pj2D$t+|Xux24OEdn*{dEzBG|Z1!ucgO71isGSQDaYXI@?Xj>Gh^ZHH6 zt)Jn)Pz<{GzVk`DWEz1w74(ehv?j0D1c}`B_y*UI<`=0tbI^ZXT^tKFTM*M%(BQ7k zt7ZojuCk#91GBtKfcnHH%f6ePMejEeu39o{@gY~Ys{dU~7za~S&zI!>w+wj=dj_pY$ zvV#^k%)vRA**rKR(HXFi`SHBYw~omG(BFn z5s@qkZgs<^P_J(+;71-4uIZ(^J(2A2Z+8TrLRx&hb~|LOISkx+Urhx$htf^Zff1Cb z+-#Qun7gcbji@yHaUy`QnOTa)(uwN z&h=YJ*MEo;2S}*&jju#K^o+J<&Th8N=%(>C<2L(8$cC7}$*IbS8zlVz`!99eMJ7`< zHgR`rJw)NsP$40#sYu-40uV1iMe4&QIE6r~#@&~3!2QCeBvxc5yRi~mB5pVflZO7x zR=oZa{E`WgyV6^`Wm8{3G%yp+ZO1 zZAa`%bD~-?-Hq>b({sX0=X3l0k4y3!IQ@Rcy|Majb4oiN=jMTv3Po4{{+HH&yfX*_xl#(X$o)x=UY+4NW%E)7VY5vR8O$xwtOmaBP zf2dXcu8!~z%X7?6a+*5b2HZsYU<*Xg_`4t>S4|EspzKOP3v}<1OOdBwN)s&Ye21L6 z1`>X9><w-uPrQr>c! z{;nG6uux-$f*Ye;T;rF0F8_o=Bjjo>Ev|;L7M)1BbWLSO^!)ty40bVI#C&=^hsmJ` z)8ncg+N>2SDID)^cH6pS+jkHSDYYz^_&FzS6pUAjM!gp?#+un*ui8&JEF_JbZ?u5l z&m$Zwv%OAk&nPNwDN{?+my+nlwDAG~31;`Z!cxZs8m&kxnUq##OtpiJgJXaG%PTCs zgG4oB$wcHELf=u*pU#M(xQwLSRjw7Gn|X*CfzsN`+@8P8f4^b7y%$j;cq58IO65I@ z`{Z@m$tb9Aa!pX3E^wy4sq4CR$+iwSjOPgwKcs6@$>?`!8^sQLC)a_p2ialn5bR!m z@}x_~VY-N5RsybcWP`;V0ij@X!V36I{>z)R7)R=i}SsXx?<%J(7y#+8ieiSrS@eeF7m@PNAzyb_ql zLDaWLNu73bZVaD(gHSXM+7o(&CMGXeA9&qB=6^gSTdFa&V3Ji$;&)iT1EzMH>RTHv z^+jk%o#KqFIH?fRvD3$Nr;hEF<&iiK_(mrLT*^mFQQ%J@d%Aj=%VI|RO6&>a5B;j& z`H^de`9tQ`n7jNG$}6zi!Jtant@sr5 z`4K{~l34AJx6wMjMK_HHpdMVRCs%eB4tk_6BMS|IXGl!OuUz50erhN|&kDOWRWUcxqDffYPV$(?i5|rW`_Uq;V8~uuth#jY?o) zJ?)V+OvjK5oP{V|##zMDPk>425GMgy7jWmVoPulDx;z7>1NG z;lzu{yYCY>81Iaq^1|rtYI3up#5LQS4xiaNX5xW;LtFtWl&)N{NPc@r0$!kcz2GIe z>UCICvrZ|SLLj+8E0KufkVrwHi$&2#l_?VG&?5!n`~cvFSO#CBE<{wP5yJxhuRNh^ zJPHA{ik>|}TTBaR1WYpka4$(#5N+Ft)NuUNo4ErjGSLlWIDOSNx4yl;A4{ z6)3dPD1RkFe9RLV)9{32wV!6;P;gQ<%0$Au#wSHoFWJ*^39H~U6H_R0kFOYXZ~dOQ z9gW?o3O#U+f+_P4&o$KjP&pPdBy)x3Q-ViIJJ~U=ki}h5 z>598{UuXW8$QqsnhN9}XJ&CYJGvY62XE6Hu(>!`Lmb zFk}=08l`MCf1P29W=*ak-D?z3WCLv{Xw8DG)IpJ$1aM56ATyMcPvIx?Z6&G9vya$tKpL6*_tcV?-ceYg<(3( z9F2li_U6?8^9XoW=?*xQopBOg$Sv=3hd8+Bv3({OW<@YbBC*yJL}tRYe{U@e!%jv- z&oeJlfIU7KV==vO1SpzO#OonAkRlt(l+GN2~yHoP!KCt3Jhvl7nQ|P-UsY6Jm>{ zA*pG4oZ`LzNDeS^UbXtzzfWi$ZV`XRVHaFZd&CFS=-{Nj&gSJHn-&%LDoh#FsA~M1 z55$chC3_%ve`5X%=h_>V8-XaYLqY~>bjJ%#;GK7vj&NHCz@|%x*ff#n;>h43d@Q_| z&pNN~NzyqGjn@f~q=|B)vhz;BICqMz}!NT3ibpWO0^;|xs%;wo^3 zD3`vxc=)i__7%&3R()~Nn_$ZQW9EPw^)SfT<6cjjQ0~4;PZ_2QBpBS8ggQst%7;t% z0kFUm`T~^hfV54>KETdRj9w;IJ%s7NMHL>2BSg9ZA|_y|*UE+u?x#scK|72Z=ux!5 z6r?-BRoToe_{nVYzipffV&fj2Z9=ni0B&~33{Fx}0lIkvK{rWAxyg)Zerz2*cy{OT zQ2{rlPT24mdhUFE7okW*1SVz&*qpEHYM785ZC>%o%?EG|*NZPYWz|ls{{sJzjTtri}TQ4UJWXIX2YK{j;x5?h;k(I81jcJ;K-`*G6wJm<1rmY z1D5n>gC*I-A5gf4;%~@=v*~!f)Y+P27awA-f#gph_VU0K-bmV6KzRXSA3*wNrIQ2}tL3F?h2JB1KYkSPAk738h!Fh|KTJpe zrvyvKPfp6awM7x~bA_U%n8#SoO>;zOsQ?J{2?T~isHvVqq{3z(@QgQ`WiIzxKXVCW zgO7K)%+*C(!|QP$?%YRV8- zijq#b5emw8G3yGG59SU8Q3<)t1aSYO?^*o8&(L7eLLDzYTBQPmuI^oDgZERvKMV#6 z3XuSgVbka)#yDTOLkM4JxyjT|3srx=%ZD7_E!rBVfi9fkZp{ne)Ikxb)SahB!MQxu zt;LI3)+2Aiz3-*wnZ{`-%K7Eem)1*7MX@J)@|g(#(MogHYIrWUKB7(&yZp^erA_qG z;b}QiLJ!W$@n=SiSTK)wWWC(x+R`oMtN+lONq}qoqv1J5l-`tJoEZ7-UqOa=`O2e@ zEr=$$s$9yZ{!BsBV2j!CS11Sfx;UOY|IS<61f}-{khHfHB&4DHl~zfut{5Ss3cPpH zNT+86R3`;ti^lYfO4b+Iv=+RQ2=hPGY4~GdYDMd4&m$?7N_wz+JrQ@d z>e9J1=<(o$2w0qAO`f%Dt@9w_jUWw8OgFJv-IE*_I8-foG*03(d*wp+;MUfLvd=vN`VQv)mL>+V_1JiLX zBcT`)vA2;*wc0K#i!IB1nX7?5v~V+06!4FW;P@{klySgFFUmBR-^YDg>YNtF3+L+J zar-{eb9u}d>%Otv*Jctt;Z$jn(|tM$+v8+%|H(x5Q+s8Aoag;LXk^88T^CyNw6c5MR?`=L z-t_s9-Er)GcDt$3On;?O`Q9_sY!;c9+dyBGgTpAPz8bt)G+GVXrPZczV`YrKz`5U# z-&YJLK(CBFgvDH8pXUj4W~S%2ZZHPHW?ylYcv41=27 z`uwcny`v(?XIAb=~hJw$&Bk(~LH(P%_X{)a7rCeZ$a;BU2=^VOj zs%brrbl%Unbhi_=LVezBZ=YX0ZoN8@vM8f zzc_SSFmH^eZ5+Fw)p9NCqra_4eSD{7!3lvInMb5&p9GJ88?s@a$={se+-zLVGe7;( z$Yby68oQA+*NJ<59-M22#_gPIzsOA{(G9?Z%A&_iQjsEx!{z`kW@K+%(Q25OO@e;gXNAr&COSrrt(;zih93s^h_`3Ttv0q zIcA%_%_H*6;RbF_OW%EDo6Y43Hox%X-e-zu0wX=hJtWme3+2}OX!^>igvKA2Yia9= zr5&Jg{_atx88aJ3>rRspXh!%lGc+(+FtyKpIo$og>GoTp6@ops4KmT)&?NusQGHjW z>o|BCeJ8A(F6tCI+8D$v**g5ZZpN3r{#dkr*z;q*oJhqS5Jk2^wpt>4BD>SUO>ME= zEY@#>BeJ=z08vn=a=^&KF}digpr`b4*Q^^2Bkl(jz+O^U-&{U?>tjKZJ@P{hBly`K zn$%RHzWdDpxv9BGj{HWA`j0{L;HLH>FODfp{~`*;d-{vI+sP~os-v`fjZ1m;6LC-6QqBmz4t^Dyz!NkC zjtNWg#zLBz<+-k-zY*O(uKV;i_##s0x?1}9<(8GreEioJJJaZSBVD_khNAm4V^^;F zO3BYl^-qe}Fh_bFY#e@iQIHbMPV_G&YBlKzbM@NHvGZLrf5J=FZkrJzVCC7vkHWW;2DgZ32z@F}L$mRWX;U@I^esC`ndXS~%E*qtFe7ujIvpc3g? zVf|&bhfrLxEUIZ4uIDfk)hRShS1xW7MK0se<1|#S%U-ol$|L);(_iiPFM$3QlvS}` zO=%%$v|BtwgTO|xosWCk{wlsa6w#ehj(HHHt?Gmv^pU5Sp2TC6l1%3FnO@LQ=znn0 zSAEr;P|zDq!5TZ*Tjc1%GArUc9YZMOUz_%k8^4+}6od076cdVUnSRGQbeMAs&VPAO zaT?s*jeBe7>|AGZ*{b2yMfB=&vo$>B_w?sp2xws);+ICXOJ3-l&Ms-S+_QeHWHNuO z43Sz|kJIk!t?ukFnU?8v$DMeI#ESH<IjKecVKgXcTQ4vpg;I zt&Y`lf^QK$jiEl~vf9}@=@dPT`x9waPr0+QOeBPE=OuHg%!*hqA(uWXqvQ|SUz@{w z*C(;j-KY8mc|158WhcAfSWKkppT9UtKQwoNS+9Ay#A)a(wd#d{gaL3%D1}@{E|O1t zU70()jI;b4s7)-V2FF4NYz-86eILQ%zfIMq|Qa&U`#+6SzDh7p6o)NQRWt*el$jvQBEV3(PNsh)eRhpp@iwn`7oPH^H^zNzwUetVV0Rc%lE?QbOgjO}IbH zKF6_}y)SSo6_Nzde&gkm)q;{Sr)Y>A?aFo)1x@NP82b)fvU1j} zzWv9S`!TX-+k?(0^d$#FYR-r#oH&c|I%awGw90287+oYo=2broOw<)K+yrJ*7aZht0JpRsUAB2qK+<=z zc`%7Yom}+|XdQ4@m(1>Q9#i|pJE_p*TEWiQAE9gCq9tQt9C7>bIPFeW5%H6_R=-I4 z-f8bMo-9$368-z=L>#}nR{Sb^uazvB<~kE#@a@#$jIkhYkKAO1lGA4I?XJCQd&|<+ z2GRFWv~!(U>$j0sReWw|HI@GKqxyQ@q97fsD@sS6myCa^{WDQYx4HTzT%R=v-kedW zr(>|O+~Dpe4g=I$H#eySbGZsvyQk`P4Wfi>n4cvGSB$HC8dssmHaa%Pb1P*>b>lC z-_II_UOIgsmxCWcm{9=fssZ42D<&yEAVwerNNQ{(-k!-RGYrjn*&z74W@i@ae!vAg z0p?e@_`WI7mn1G41qQsJJ0*dy0LNh5cIpx49pw59)Q^qRVT*=yj<|Ezh@5Y_=M%*8 z6l}H~r_EWnv}o}li9ZqPn;WZGha#mt{)LnFX)kl;fb;E@zrCOKS}hv*|hWvd(E}vBS^I;d5J0Zf70h z%TCuQ%AS-4cXWzO(RfGYKi!aA$)N4FD~f$dB{&6`3exji?w&YqY>>ItwGW`fPNI{7 z-%mkpEqX|2?ZzWKij2@IK|iB!xBXML=|2EH5`v5>@HF{bE!4ut#71I}~Kl8HNBnGkAT`zQ; zi3)E`xgN?=?5oeLXLs3HPY%$%a&Di4C#SMlQkC|!S3Jsa_wHZ2c&zpKc;j)yEXbAH zy9vh5h9%mOa_l3z=pZ9h0AcDR#mymB$nXjT@a~}Eb8#~**7$-WvPC|*=A6)8y*rD(nm-bAsomrT!McFuyXMsdiD^I}G9 zcl#@2XnvY||D_=L9HE}YhS@Wh?Hr-;9=BVX9&P`!NIKov3-%~qBj1dA$z(G^r~`Is zK2eUnUV83Zh3#TDwiodr&Fb%okkRj?aSF;UIG3p;xw7kq>DTILW@E0~U!L;$*;jn8 zf7)E^20}@fS_IPfZzST|0pqfh&b8;dX4A23ke0r)bOSVB#k~54kX@9$v(d`SW107 zr`L^p()8n(!!LQ60;#^581mH8m9rRZ%!ZenzO=|LcBGa;9G)2LN~0nt`^MzdhTzAJ z+zdtP^T1I#e$_IQHqqfLUbOvV%2N1>&5c(yu18!Jk_|4UY-WL|6o#BjljYT-#s2iA zTFrp~KIRr^e6RpuP4))4p(xujk?Mo=h!|Fj#>_mnMxbKEG58p3(;AVR zTnn*4Ih?;t+Bbvs7J~(9QrD$&&PiRV5nrRA_JVg6CpVX4H{08_hS++S6#6CB-S`J1 z<)kKaO_mbfR}v+p{O4H<#C@_m?j}o2_(_#iEQi`1bYGi^j0W;kj{>w6N%bmZ{Hj6*ngjx^-1 z1m_|5(F~Z}ExmM3=EiETNfB!Vj>B&F1(fAw&&F7Z7Z>|&6Q?h$gseCw`hgaAS^gx8?jo<~q?%fCagE+yv zm?><H~4Vhk}VUX9roBH zJ{8x8|8f?N-xj(kz=VD3`zkbY;7rN#CEo(wTN3?htTSuvwgYcpTB>x>9m<|7=v}uo z&T8yS2vr-!eW7vrwKF2TU}cVdKim0 z*QXk!1+kt7lHp zghd~~bT0_KCQ01hFfqZ#CqUoTpK5g=l`dTOfpY(d2-$L(5 z#Lbq^+-hL`c^9|0<_i0a^LiP7M#BVShttG81GXM;wr{akT@)%hbiQjGTYP+Bd zsI|laLP_u3WciMdz^XptLEZT1v9uD|3NA2nR%XHa1G1WmF6&mCi&j#bInbw0N>}92 zFk9E3@^OAvgJ&UES!76@a$S9wUY4$!!^p?S%#05W0_ZdMwuiJD=1-&nugP}A2 zzO%El{q>ckXoUSWtRSLs4le0Ia5}I_d{D-_WDjzGBh0Z>EE>LnYSn@QA42{MHHPlP zA@X3~OF%kSCY0!QGTKyo7;&sS1V^|==W15M8^S^yNAv?WdgtGs;oD7Ndd%d120GD7a)Ppk!EihGuh`rf;@ZNZn<ozj- zZ*Neqiwh0_-xjrvZy)hS0~-s!38gvzqiEx0C{xV=Yh47wxs+d*s{RmT1GR&Rh|kMR zsJ^hFTn*_14kVa-^}4EZ#ir4%W9GMzyM*-O$KXo>i+~880ejIKN$5CB&fa`oxOdFA zz`vq$Uc~z8&=R0JED-!L&&#~gZaWkcmibV2GLDvcfFRj#dC*2XC`GYEPbeSzTj~8A zgalLo`5W}Bu_zZ5&Qnp=HTMeLdGO=XnFZ=YNMcciI$fcCn;MmMoN zk!BD=#>_c;t(#;o%XKcJXIO~oAxIVARNcO1K7{P1KkK4z!1r(=gPu^qiyBBoQkf_Mr4D_TeC<&VaofWZ0jQlE8CUF#rE!_nz1jK_Y7mpAkI42Zyc`>zjAz&fMQ>)HP8Y}hL1mko~JKRVf4lnzuyBxX?YTj?5lJ*-8q7T~6 zFgso^w-v9OrlaBzOqPMe`&4Hr&3%0=y+L-BxA=K8p{)v>sG>y0E8qEdKQb0RGO+|H zGj>P~zLcnR+g$A>P%l;NsA4Wzs+9Ph2E7(eBq5QcHvB>DXM@}(^1yj1PXvgfUX5qQ z_f;SZQ-|X~U~BA8p*S(kBNpK=k<69ft3>y;YIkZO=f;3PVj>9mmK9ljysm2u(KB00 z2^sws!G`LcTQE3-t+KeSF}{Z9y#?~scXdo#`Pz1{AEsMMe_TcO;R{p*BFUPV%&=OA z+9H&-xJefnlvZ6&zKfPYyo%|(`X&{)!M%!0kvCEHF&{18|nC*0%p-0Tn22!a&&f#eI zN0|j1;9H=Oz>b8K2~hQXtESZJi-Kh2sYah973!zM%*Vmq;6-ckoOa+qVC5| z1wMFm6z!L)L~7vbb?#7ZJ6eV$_+b1<6xuAxM_02Nb{Gm$ZyAb(mXeVn62owvDY!au zy*ds>-k zr0gqo-H!8PAlzSD@-x1@ItDk{E*fUn=f&(ms*r(JUBC;c7Nq%~2c8XPSyBRF9q11b zQgbI6#Oj$TC?gL2!XcOOO6 zM~r0B^y$Av<=P8 z&4v9%q+%ZpyPh4MjA*=R^W28C^ifdM{bbhzOhC0q7`C|mF zd*CYbQ6_e6kbkI!Z56lF=fkg`7OQw8J*ii^-6U8BEzKHwA##clD|+Bs+u>^*J9!}q zv>G*bJ{-~fs^eBo4Z2r8Lqz(Ko_f^2;^7tQV|B9i;scMZo0R$A1l_xW&t+@)dDBYf zd!E|g-nOK0fCG!FZdw6Zp_KpZ-%|e4BcO2^QZgt9d=zbNn%pv*XZ^_J zPZRPzG`vpLxxA_x&sNzFZDft*956;_LLfnIEG5JHG%sQV%K@>IPAqmPVAr32&CDn0 z+j~1)ugp9#`uWI;NW}h~$CRtxH|zI_s5VXw=VY9fp1Rq)a?Mxj<}HW;B^!_XP6K^6 z_aapRLs}(}@AY=y7;>+7Zm)O%S=Q^00IkSQ3sT!$cI#Rme5dYux4~haiagqFWuy

    N9vIhqt-h9~EU z7Cn>N&s=e-UKR`Xw}!IT4vp-eGn6mmU_}gzywX9vQC3P>eS;tm6a>=PNZb*w@^9aCb!j_Ne>)A z)8xIf72Rizx%!cWY~h1-2mfKMk#~tPtVq>j15VMJOZW3hhIYQl}gXyKO{HEO+$&` z#J*D5_tQV`_8US1Mk>hAe=C)85zx!eq$l%VAG4p|bGO0`IrhjE`)yq8Il6+!HTVb_!a^QzD_^`>c{e>D?TKAHSzJN zuY?5GB)g>BjWObhNG=L7sJ9jm<>O0n0!G0Uvjd!)xpTZ5$*K9ravGq* zlyjfta9lcsHPwl2d@-C@Q{RA=fwI*|6NqA|Q3ML+ES@x&gGvty{%E_V!da1Fx}BWn zin_TQkg#x4lY=_9AzEyh(w=wo$wHHoklWUVX22C=k)`QMA-9iav4Z)-ucxzfiY#20 z$z_JmB3>^gQaW_whiC~0))MtP>mgb#mU69_@h=FKUjCw9uZyKNDFpN+_Q%E@0wgSC zZP^RLj7H&tr|5|Ww+>8=Pt+#M$=xjD*H(5sG~SVDa9deU-liu8Y=Dfr+Z3Qs))4Dq-4wrV9GEOt>uThgBs(jPqHNL)Nv!Tt9lpAq*^P@JCFQI<$ov6C zJNh8u^mDSH2;rt3vE?M4lvrV0zvR-v7t4eN{b4tY^uGj+~!_#3+e<*UBCb7%VETH4trjj z^;9tKwJe;eLi6$!yGETnI7BD%Ayri_)~ZVnB*{h-Fq*^M-UnaX>S9U&_VIxNO=??; z*48W-)K|ZBArbLlqxFt53F&toC|FWoBb*J=X)W1YEy4cz3_--nkZ!13Vyu@OOR29v z07Bu!KTnIz@tsQ^SF{VZJwECAp!P(L-M9&-3GK1^9HiQ?!n%@+B$nlcznuJx+0l(P zi~NzdJ8deXFG8^fr@fo0aT|Y-Nt)h8Qv&)nN``BtOiUTX9qC)gHXvq62muardw^R3{VT9NvujnYKidMwM21$pwf$Q zhd5)bIqDLDl9qy7&`lr49Cc8|`x*sLW}|ga8s~INIha$-uG3mhBZhrT3XwlB*WlU2 ziEE`vWGVK4gT(s~B;IBdnb_*HM_M|RZhm`yH9Apm@cdR&2TTJZD3lO#{HQ}gIl?s0 zp@Ea0GJAa9pWU)$DEbfEJBn;?Zz9rX1lw~e+4N(d|DM*gH)A)VfieHl!h&4!RREdv z=hWGIH_PSPw}7|O@EyftJt#YL(Dp!ES4^xDsP=Pd9D$-xHkiA^Dj2vO$f8@rQ$iuP z>W*%-={}B!YuM<}*7td&Ow*C*d=W&muu{PE&Rga%tmaupGE>DLVj7|f#sSLIhelV) zDWod>k4SmzMz0~lW3V`%iOjcKfY>>@L6UKdbyP%V9%-V_Lfol|?_*mQgiGTLiF#>L zr|%fzvX1hEWLp>9e7!xinUfnPb{b|wvV&R*7a{a-x?zEC`3*bPp>!9T26j_BdqV&0 z-ti@$T1M16draChT;)4)g?JTSnToB4AIF)3=!>ygcW*NRZ3GV#4xB^pgPhCO4L6W$ zpiCRiNfYf1(och_s8R4Lz$XOht>Gn@cJOn)o;L2-ZYzSGw+_;rgo}>Hs%U>{{Y{(- z+W-z)h7^WDjq7qn=R1;r(FDog!Iqw4_26O6>Rc0U7EgWS6xaXwA#Kqxy1IiGc#_^leMCLzb0V?m{U>#tHtpa35e53dz}eHK ztGyEB`4Ed-MV!X|e%pD{Fvg#?OmMub=|`^nV_Q>*A-$l>WZzx?bx$Q=b0uQVbTR%^uQB0WC9@^i)d~wvDRek zd+6Qwlu-c;8Yu_9LP7mWklP_;TV(vz?jpTy(B$>w)&{ZaXcO7V%FE917+-BCK%iZ-od#Im~rLoK9ZpKzcI!si;}!r@w#c5B`9b zNS8XoKdT1Dj}fViZ=#@Z2{TPPg|>+zlsDV~s^y<4|HQM%f+r=lSdSrv-U^4ghDA=^ zU_>=^Al&cnskZ#B0SJKh9upqE#4P1mo_#*)j_gq&{kwW4gUHnAMX_y;I>yg6eL0W2g&W}+HMB>EEBuGs1}0v)jx|_8x|VTz=Eo%uKvD%09F#Uce1UuC^d&;|#^@ z?w79owbAlQLa12%XXP+*)!n1Nyj4&5=p}9o-7|py?&F{(zzed=+MlK*w>tU%KKc_F zxm%a7ptFneVZ3wEVLwXeh^+~5anL&Ja{%28vS(;B42)D&-Lp-RJ^bG-b2w)cS0`6j zFT9%h@ zV-c~{l!^HQC-E2kgq5-+LLK`ZX+M)FoUFesU!G$&Bpq@3Af-3!ofT-kbePT3@iG|oPRpD#xr82aeGb+MO?VB!YKYFpi%r62 z85510bsG{J$8)X^ugc$X7t8Fdwg4xy^?8frgeVNezOHz!VEY(Qo-xz`C0zdWDl*t< z6k4JivV0!JDayR{(MN=`loYnXL@{i)&0x`Fe+Pqin&E>BjlUwcNrx2M+uh^fv6rK* zs9R^%7y8^T(Lj9ljAl>Y^G|Z!B|2Z-Eib`T*{bnnYGAScUV2cx$xHd3txDO)h%ilm%hiMWol9~efi(@ z3*4!#x}d1AL6@zpy;a|~E;|i3ajzWd=SkE|lbtTLo^Q0;dsBAlb53?=AP=)p&#x|p zcpLW_=bn-a<_6MxaA6kbv%6`46IXkeWugfo;wtDFAr^+O-aWkBs~{iYgNTI zOzvk8en_^<&!6J;)3B)X4`{=p_&jc%C3!-<|9OhmPm!{WF;@BPs& zx=>h~YULGK=c)I;!D)y+3O9DmmEaH{x1%Rdo2Ie23Bg27302RYI#;C=_k4Wj1_m>ky}Z1ObqlUtijuS+ectro&o{pho!RoX zlKjK@r`t|VZ0Ij8elSQA+{Z3CNbmJR)T+njR&9*Tqt8XU`LY4N-@+a)l8 z6MuIev+>61WvPnJe_WnmN_uEXsjlF779|OMZAoir`T!^0Qd4^o8u#Yrsh9;y>!YPY zBy3IL-5C2VI4A|-6!hOs7`>VdGd>4+ zlf^2Z(2qyc)9F#rB}AX9SL9EsF5?9b`9r+xv~$O9GO~k3tq;_dmxXPebAB&BEd8*$ zIO5Tgti626eTsi|TIXlVs%lzdVC~33+``uu)(f%6_utDKRTb3DrD$LhJb3mOM}BI9 zn({tQoAn(>jf2DNI{38zgBaU{&1EM$KW-zHq@s%a6Ek{zPJ~gkodL~j~4S? zKCt`uAq7j?lRI-&O9?yN$d=+-$JU#FKUhC^zNh)(V9)!joO^pNzg3+Tj~{vP$KS}hYxAJQmXY^9pH`ZAN5F~B_qusk_&hzM zmd{{58dr+o>JO6bCbP`h`{4rXYg@_y!FWaz)1Teh-OpHfetx)_NN`D3Ofg&>Z%@{h zEGE16VFxZVyr|_OgOK;daKTDSyO#GYM|2Zv494Fu35MI6s5mK9FYqMz%Jg?R9{?(C zvo1<9Y97s8*MJXU$^tO7`-bTITQoS0)`X++yOtx z9isHPIwm9}+3wNO8C*hAa*HkE@A7m z)}JqS@48$gT4r$2)yx;;kv#X|f7|92RuBp{ZlG>o&on z-}P7xUzlDt?MTNpObS?ZW)|}Wbl8s?jMq!gtb~PK>(wh;nfml3NIF{&&r<6S z-5sioU#mB?Q_p$r>5CV;_w4*FuaTcpY~I~FJvZVN1IxZeiuMceYra)&%XFfG%A;32 zhE_7+wF|aUp_6zLn{`@{1e{?g?=FOa|LbUSlM$k8&(!v#2ZMWo(d|uJxftW?9rgd% z`s%Q%x1ejf8xY?*sQ2ytQvMC<(%d1ApdL*Al8Sey++HRGY zCVn{K`>{L)!``Ux@mx+;#lbnRzQBR|mqokfQ6n?1#^+0@9Lh@1`>OV9 z+i*2&hC$nhYi5rFow7AjIeIY6+2r)VW}=<*_kXAPLLuP8NJhT2(F8hW(N! zs~eYru=>|V zW}0s`de524P|#ANjaU2;1=+YyJ0cK}P-aYVwd~0n+;M)$mdgYL{4#8?VscC}u7?Bf zl9%QgRvo*~=SZersy#I=VqKR!G+1L9cjQ_=b3UE0=voBNX+86V8hO2^!e*3=J0)Bq zpw#JkW|6IYLqGJ&HH&g5v6GA6JD2}_aR26_-N$cWs#X~5cbb{AhXGc(*RerxkTgjn zYvNmxiUzE@e@a5z17gs$n7rSg$^iNJcEVAJK}+Pl2$bF!FUw)Red7+y)r@Jjc|z_y z#yVPyw&BrG>Lnv?{6T-<_VetoCaI-!YK7DfVT3$Tje_^wA`WphN>D0!a#9Va+mrZ- zli@-4FBKv12(6(8lDn#k3u^YwVukc{(@vrhHkUF;AV&DboC@I%N0Q(RPnO5`90Jiv z7`3ymiv(BS&sE`Fooqe~?UwJt!NH}5J4Mktr`y%CST=Iuiov7d3irn`g*tI~+?fL@URA&vBFld#-o)FAqW<}dE)H)HE|MD_h< zdGYI6Luz`=0b#f-$r}TyL>33DT^`&8{!VFUo?=1f>br5bfx|OmDZN?MxBqZitP!`JJp>x}&+Te1plB3Y_y%Jex%_IX{8+sosXGS!k5du^2RZf!9q$1}=vo5pzspJW2Z z!-be4%cM!cRTR8m#Q0ohc0Wb(OLnhYZuH;0=nwgzaCsX#T*+=RL$N@qUaWzLO)cl! zzs`$vKB;!0-bB3!4ZTW#?(8%|%5E{Kof6Cog2xU$+jBTCeOhwb5s!McL%{8V|3`yb zk%oQgKk2O`b%KvU#v+vp^^RD;CqXB6-E0ju;>?vFz4_*V@6zV#7dPJC-8+&BOfgzfS$X{1Cz1H$)wPXbG$9nq>iu%(t0?s9%&fL!!TAca%(opn5QnoR`$d0}E zCQiUW2BY?bG3Y)OQ}!T6|2vhiTz>b!`b za<+9lEC+_)XQ&9|CCX3}j^tXIwnd2pdx z91souHCeLgot~b)3S(u8+vUU+QU82^1=*Ky3}Clj%#J|Ag{Jy)gHKqiKT!_$f1LGR z{JIL;JuH3<^#jrZ_5Xa3|9;9;j}eaj&yVQ{K_g;AK7S!;qXK}WPG0B~(0|VZcr2R^ z%9dmJitaTGNx=@nOO^{rpZtJ*{I#W=j)3MgBGOv#CiuU9XpR?bRk|QiMZtW7lT;F?B_=K|uE4o4jPKD)@OlCV|~2bS^Ge{>Q6hvZ;i7 zmHzF1f$6Wtax?bQ;UE)hD03!U@=lD52Q4*;5ZTWM&-c$jX6Z~6jedK86t9IFU@!Lm zVs3FD!C^KPO|pa~OUkedWpUVV3%1sB(dTjU&wDCs0vRP+?~pz8vJ_`(QC(sWQ!k2D z{{Q&+mFXW3!DLEI!|y|IObrgfWo1<4zh4ja;~|i5zGa$t_l>wY`ueh$ zHWq@7kD=eZKYa-jdSOyfyJgOV;S05b$b`CXdwwK23ouQ;E2>q@5WO@V)G`*;i_86_ zus6G!z|*tuXhKak&mIa;XoJJf z5~%m5jgr9DdE9=9Xa!Z>9QXYVI4OqRD$oAuwSHILzfj5t?E32@rv0!=VflC(gjq3P z?pfcPyBIS%0R%138y(^gVC(NhXS0ZnQQqz01q1W<#gFe4l_qS^dK<~)8Rf-9k%cJ$^9o`zB9Grw63-xj z%de8-XjQ1(s~;?wO$%k5&M7j3xN+)^*X^kKE6gEI;h?`mWVf27esA+6dv6e7QVsHc zWa3Bo`*>Or-`QVb8|~P_rqRLs(_n> zcR_+axB5H-ini=XE;xhLU*jBjpey@-XRn z0pfQ`=0u1d5&?;nP-Gm&rIqifK1n5B>|;2zCguus_AdyY?M{5p5DUDm z$k!;x%9Tq)vm1dE8`HubB{_!76X(UO(V6Vka9jd1u}6u>okpI=&HTm72jS}pB@69O z!}w0Fvf!ICbnwSAiQsD{uSoqb!20t|%~?F+w6gu6HF6kmBJ1p7ux{S8_9=QEw%XrN z(EskP{oC%uKqy=c>&oQ(J*SV)W`SoZKf6PIkg9$43>zw#>aFHvziw9|=xmKbBI3zw zKEk2mhXK4w&P=nls1J6pi#FC+y34D4`|KC!si=p?#QLEb8uRKk`P`+lCPy5O0e1t` zt)!`GzQx6X&gFV1)8*a8;_=NP7YYIZ7asw3UjfT6y6q)GSAap}owvHHK&q#Yk53*J zbV!0jbxmA8Kg&Ax(JD$Gtc@cCep{>W)*Iu6ZsOrYZ*OnCL)Z*Itncx;&1>JjpQuU4 zyx6WiMOQBt2V+hV6;mqYq@+uigV1Vzb+fAl(z?1VuH*StY$iiTctz@E3(l2d@Lbq% zP?wQSr=(xFup!`s!rd751KWg?WwyU1y(G3$K(#yi0nTgQ@uvB)-S&@oHWS94<>aT7 z#to_QCvoFEk(+Hbi!fpZ)jvptJPCIvOWTK1NLJ_S7d=9}$D4VMQk*9424L_lacA7b zpFB&E$+ifyyddq`DpV^R3C6A^OzO$#k7ejaW@Sd`jGaMGT@@dSrWag@6 zwvbC&0s8JSp8+h;0O^|cM@9Jm5nI`+0CT=9;G=dy{+$mbNdcYWm+-Sz&5bZ)-RVBN zJnrY;c)G>JC=X0V{ZHLR@R^k)E!TtV#Sy8sl%e>F@sblN()wm;%REF7%Eqq2!u31`l-tmp3SSufTWF;y>3Bg;_gG+b1<7RDG%1kTorFg5F%T#%^TB<@xk)$kZH5hak8Sy6f!KuAtp{8TOObE zZ1(cXn9{Rei<|PXZtd^`%b`@Js;8{OGpHnZ+SrYc_NKK7RSlibb{-ZjYG*$csh5(z z;3pIj5piCNlxk~BgFtuMz7C6D@!{rTi-gX+z?3&(FknQ;gb%h&M8rwpiJm@&=ex|D z`hf-yw^HkziaHN^)CXGQBbglOmD{Tm!AfMb)G+gjLLCZG9l5a<2n^h30r1K4PP~B~ zk+BIKs2$ug;!{|fy2WGBq+<2l+_);GdabF}t6iv(7jo~zF}dLPCK}Nfz`U{|b2T%{ zrqrk?(YmO<=K0J1n#T3vxG8JmzWT?m9uvmBS6C9F9`aTX70!Xe#^1X{x6H&2@NvC$ z8qD@^{T28u`N-Z-%li6tHpgW5bi5xcX~Xg-J5CY zcmIueNQ!Hj41wp*p9k*}ChHKjYG3cqfF_CprQ>T` zyeaMs4J9tKTxGti%-yNkn0*)#Uzdq2C6;EbXR~=%YWA*{-XqZfE+7}zwd3AQ%GEUJ z3fqq7)))2trQ#Pe*4=c4{25Rf0ALsIUUQXmoCN}sKz^PrsI8<_x4CeC=8R>sOfSMC z%^hoZX6MsQR(F_40;^F6!f9?PUtHJw@cSg5N-Irl2y^GO4gO?r?)2wucT{w4X)$`I^lNwBuqXg^JBLoGpzfUoMY4C*-kP zi#;3H0v)Ls0Ls(bHsKs7mM`657!ena2!td*nr6x-v8QQDh8aW9RtQoi3{I{k6Sv3Chfg6- z`)&l@qn{x#!MfkHtti^P6Er9P>bq znOks=G{Ro~=6d2M0xsjjg_5Mj0`S*$9SFE(5M1Mnjm=<;uKO2b3%s5%(|pYbCr8NU zT2-1_a$kCgPAm#aNGw6a!o4Hn7c3RAQ$kEPijS!S{}-sfzj?9UW2k6bwEvP-M%3wTZV~79;6Ct}@MWh|M~QoX5tI zezW_YNNe#on`vIpEJm9W6d*<1zjy%K9~mskT6%RrZuR*D1<|^{Vpn^-BN(J_5(_}- z+K@|HY3yuWtvQ?y2HiH%yRtm1DpO$|*Z7*dXSf~R;duM;f_D#t6F)0A+p4z5B8Bws zPtf4;O}VrMqgUI#O1A=z7?Nt=9fHApFTpooZ7`){=p2F(<+KxPW}q`pwY z=cO@wHVyMWiMv96Rw#c~iz-JTCVoB$oxn3<>C)vU;CSQiazYW`J%|m5f}0ORq8>le z1G>xA)!fL>5(zw6W;635TOL~@8Qvd}ao+AHa@6`O4l%(Z3;H!({aWeORvTD2yN)AZ;BnX6_G?8@X;16&$vv$cEvdoAdX8}x&7@QM*SFe8z`Ov_HZU*qBi^2 z)i2xI7N;I0O}7@Mm;7#FkxMM5s9jh73gY^L#Yg-;wk&A>YLik+Y$#P=_hDc9f<>KH=TAo~YzH2fvWtx7 z{$KA^j`eaaPMqxWvpprufj$&wY|cV#t1GZmZcBY4#4D5SkESJ{RbkOwahSAw zX6ud#xPhLxu;;vvXFqbZOn#)*5ik`$!mNdXc~_5Rs1dvA00$2=G==TiYRgc->QFnQ za%2GR7MIy3oj|q(rK4WEoLn*&TVGK5z`Zr<~t&9g&bX|!9HPOAot9N{4vezu*ZCGL<> zOERSu`^;>==um2jVvX#!I7=|kD~|5N=_{5t=V9h5eerixJX;U*{HexvvM1B$GAenp z?b)xev7czwbLQJCXBbFu9P&Zcjyc>359D5(c1MFNj!mqmYUF%ZZU~wT*6H|x6~Id( zWzg&Jqd%Pbg}lFI-1DwJkF-6He6{{qL{!`UOOX6B+&}&@LC815?GoW=t0<0Z4yzu^ zl^^7`nwzF5%YbUqZeZUSOtQwR3k4EL=6AiF1zYaqu`=Dp*9B4+2j#pjov|;kke;TS zLpu9p?CNCc&}F7_eZ*tO6Yf>G*!OdFq})(8$k6YQN*DHl(uI;6bI7wRtCwJY!d8RT z&67MM1&}hW^4)~FXPlae{bF942$%K_-e^+LH>@v)VJ4|qwx22)D#d(muWkc&=JU{J zYQ0V>{1m08&ZhT2MMWu+ZH7#fM=rff{6Ap%9M>cESbSgogbv2P%|o6vvUQo>{1@N7 zho@t<`#*RD^nu`+O^tr(|HNqBA>zB?RF4ZGz1wIx1OzgC(3B;#BPNg=f`nej1~X@M zF-E!>us>V7u{WI|mK<7`yJ|UGOWxs!$LNg65Y6Y8_-^*RUpLnUSQrL>7OM4Yd?bFX zGz$vnBKEvajSeMByqTe+$Wn;pvAV6E%^LH()n6pGG7RXc10Op^>xDF+oV1#Xd`ES{ zR=|c+n6EN3RIDx4S$&5w>rGROVl2wEu94Z$&(J>>-OdKc`L{3hS_U>x1>A&0Cg1-& z|Jg7}$}svd-Y=f#lbMUJ<*a$aZxx_Bj3|`1dN-ANMN2w;sis&S27fWrg{>PfCOh^+ zV9~E|(PT*sM6RcI>ERIvppkGOc%}|#&TJ3Xu(*;h*C}w>yL0%|#yp}DC)I`u^%raQ zm324114+WQNqxB124rLLL&LKjj%WnQV3Q#?B2%m}iRW5d%wG3RVTb5MiCY{7@~Q@D zKw+na9C+C=0*4*R$KsQ~(`=@x@~wkBkw$+gM)g(!K-C!}y~@0{QlIEFw6BK)`IS z4m}#V+e@4yBjYwccb47@%!x*IEO?R3)>_N_z&E$s9MT!dFqFb&oDw{r9(=;3Cz9|J zld><7&vc{_Qsx=cdE>d8Sm4ShuEb<2=Ux1OWjzU;;k~TM*YZ^Md`>%CGYt+!!wJ>9 z$X?RL#IN@xBO^mltVH(?GnGLbXy#cHNPsCdL^IfcmmAYv zZ-FP5pVoZ%x>9OW;d+JLBwO%xtYAQ)a$%HVcv>eX=lm3WT9Af z0xH3mv51Eyze|H7zEXql(uy}_olr`QRLieWUB$kxX*m8`>ouWM5BaeE@>htpluPrSV7eQ!lXgAzIixs?@?GMmQuvC)cbM zU9`-Aspazv->5C|)|m$mR^z@Ww9Zh%4Q9ikHd3l3Iz_LOhodi_6rHPSchVk z8&kpQM3x{nHu>uXpq7uvctXfeE2zK1v9XjF@0HzU5Z0zFsCy$_ zAJZ7txJvup1g}CtLv`&ooSzR42^~xKbd!#;hq8_Z#L%n1+g@x<8)n{l52QRz76L2W zL_4}y&!|&7H{4J70bIp-75tm~`Ws~ccRt2eLCmALzdIzY^BAH&1Tf-TWJ*6_aP z01rf0sz(+4C&MbE0ASp+?-#>U1r~qtS@%y`0^~ESbnaRR4+E1NWJhb-0dkq8 z)^4K{{c2{XUZ23b^}>msB(6g}UD0`AokeU+%*Z6dp|eJ(L&lLBIg8eIHEh>h*@Sbq z_a#<}7U5-4O4Zw~Gh3V!uG9RO%`qEe=qJt;XDjDeE7PjR{}}p& zXFyls(<9AhB`@E6w^&j95+^X6h!^*jg8+?6?u+a5-Mq_bn1kHi>#%HI*gtB|H}Y>q z;{`f=n&Ob7gj%&`e)o5aa6Y~c^FP(i1Z2llkUM3fR0z`(uUapE(Q^}4jgOq2i=R4) zD(H`ES``i1fjUw(n8bO7nvpEt;B*-N=(~CUv!Z?b!!?mi8*$mx7%zzUZ2ZmvJlQc_ z=lgG0J4`Wj2_x)x;StXueU2jWFh|>&p?Lk6nYo>@DD(0+Hanz%k{{uq<3`l>bgbrv zR!Oxak>krAp`E?$PHM8CVXhr>V*2h3AJ^e7>L%7^L!Gug96GTtm0Xgpz9@8Du7|96 zGT~B|w%qF!0k;}|JKtj`3B<@}xDBP^^eq$Sy^)G!2AyY4qd-E)04-~9&@b+X zhhx+i%?Ep~>Qt{SBvodX_&O^OX)S`yi&C{Wh7HG#KNXtCjz1iXXhY~{WYoFt(pCAx04j3=s|-J@5vuOs+_^9+P923r@WuWdkG*?oBw%1);sn!R$^|B+5Rsi!?eo+a zOpy{PW3U%W=(pA(Ih^^_P2zQnJo{rO@0D2VBLC+E z)Um2XCAo|BzF3KuJqg@+wCyrEeK9fla0-G~6vQrTO78~ZF%wvgRd0xS(!3)WbSx4B zwzQKW4e>CxS#7g4y%Znr?@ZT2p1wldv_F`%oc+w_bif8PKI(AiHg?JmaeP~D*bO6_ z#>t>LNg(JB17y(8BF?iTRpsLEsM&3zSGzDUuDfc~_h+irYj{{-iKQR~U#4Tg8Ic}g zbJZXSIU^EiC!XVB-tD$v{UHNA8mt%Dz*+$X(?rl>*=JjT9R-3RV5<<#ulH+%N$5}M zU#s#+VB`y6rd_bc-rKE+=hCUsbvFcH^__0d;(+c?1knHFX<{jS4yQN#GnFPx6-L9J zJ-g)=upmLyzHozqo~KtU#wat<5Jk3{Eaq7tV(+1_anhMGkvQB(eIAp?;%9>HaVTKe z?t3LDR&;&7cLyWlYUE%8zEKy;(4(1kg8t_vD2JGiP3qA{;~D6!B9G z3>$vNcv}xE%}JR7jb_h|04diG5wnO?ys21IYrS7^0A>vvLtI<`k;4Kkos?djJlO2>e8>%7pU zY7B1ChlC%_U@^~8CpHiHE(!v|kwBpB0g2Kpy|J+Aj7IITCSvRTF6*Z7vex^VLwum- zg?rxJkXz1FJ>X@X8=dAW32qj+WrJ%s`UWKtTZR&I?^3aP9y@Yj_lOcm-79JUIffR^CF6w@>jD5cbyMWZ4>nFh3xi-2P{ zYmVMIJ*iSDrp08b%BSwQw((x((hmvI?#a!g;u6FhbcDpjy}lRsSL`jL zoCcL-xB)cMhq!8T?rIL~ueWygWJ*KC$YpwsxnnL$4MVNAHKFJ&?+VW3G@@2DYl8Sf6bX z7kwF6CYRFRx@M#dVUbgS#TX{y-1C@fs<-!uP4A?jl!@`Rt8m#V$eVEaWaobT<`~FA zCAnrdSp78D#AG(z)7o^=G+QO90>`%}O!;g?a7U6~4H+su+Scs~P6(EnWGMUW8U;9d z+GVK2NYaddt-VbX<8#;vGagKmLejEo7{}uq60U>(it7$3+xitcjSe0Y!b@t}(FNXc z9V~%m?L6l%d{avd{FR@2-0@c91wxbMH?szCE8cBKu^zUZgbyF~O_shXgLx&D*-307 zJ4TG~tr0jb1B6&6fsQ;{rCIq9ZIDPc{u7VG?p?BP25^uRX+MxpUjmxm_$R#=GNKDD zr&JCsGERnSL6mg*>wf02!zjDq%eqz$fOqJ*(*Mfk%e9uX;nen%)lkseQ)>gy z3(7&tR`q4o`;R;0-#F%bjN@~n*lU1c>e3J>&MCt}B5xVJgi@XqsQvST1GPt%SKUeS z*MA%bZ62be<-X{Dj0G2nRJ1Axz%UrEX+P_TZV>i{vb~?H_L>@0vpRz{l2#jd6KqZjGsH-g9J(&<5V@LMRFY8{l zyags!v(axK1vlw4dYl$X{KuB3OLn!y$gY2T;yG@zz&#xMu#~LFP6cpR_#EbvoJU1n z9uSx?G#c;;+w>9L^-Rndt&4lWq~4>iSt&bazBL>v277aLgozu_0@|mV7IB=>1Ny)S zLfWFv7cHf5WzD&BQtzRhsx94`#m3?uY?r^u0DN`SzY2VZ@PdQ1G{O&IinhDV}R52$0T z4jjuJ7VZkeYi|?u_N4kcbD9Ti`Th+K<~OM2CWq90y*s$mZ;)-CK^`|<1fbY5XxFuE zCIiD#y7Tda%>zN(a|}=N+)!C}pZH@-0sRXa58YHM8uSM95n}Igg;ODj4j@ zt_i?$Coaw+bWhn7f#9#!3E_&F7+qpY3(9zEl7Q7*$M=R9m zx!TMjpic?Co6NsCkN*bm#wZ7uT!9hMhV5`4!)@&vtDSDrno3%V$ssxof{gK0Dxv& z`u$%dUJ%hXYC(Odp>*{_>fR|!`|oKu;C(bQoiTj=2hsP63i}iH`YQSUYmoh47Uh4D ze~34N;2I%Mk#y+nDv*RB3M4`YB z=Bjq_wQ#$E_zvCD2kD7I+m;Nyr_)o_O5DKA}~H0mwM}x*Mg7vkcl0ihUn@ zofGQ{Jt2mzt736Dr6@gd%0q{4Aur9Nlq`%BWnU*|YH&4E9B0>YZ(29MBgy(YY;3ip zcBDYXgezXlnSaAXUQW*zK(7_1m|x9hA)D$qt*_iiz2(rKU_^VQ=8@W{=-fY{G#*f8O%rL`VVHRGvMBSh6-lFPe6>{BwcG4NG zCi~*&h78HoVpi>4!{ZC@E5c86A#PM0D$e z@*l@#Ht{1a5^$r9`FkoJDL@JAmwRz?D@T!;wK`D>88Dgn8}t~4XW*0i(JJ)8v_dtL z@Cl$39<13oA^9MP(|4~S^+dhnQP=teR`SxK!#gh2%5**64JJO_{*l*FT{Hy>L+dIa zahJvmRBSiye{QC&j!LoV>5Ga1g+!5&u<+;4isD$5#9v@tA^xXDXYwI#F7%!Rwmdrm zk!tfY$ql$~_|DDH+Kf}nrsdrqLviS%UntF)?9J7myxpy_l-^?*XRgZNYNXA7m21}0 zBfD!noEqb@)`KayO7mO>oVrqH#Ixxi;k4aRl_tY2K-Vx!TUA@&bM!=k>e_DL9EqFm zU>_tyDW0vfH9S9raV_9`|@Z%bPenzHeb|YGqoE|wcfrb#EWjrd$meMbGlb) z!%aBVKHkop+;51)MD~JSnVr?e&gL3%wfm;n_FHFgzHW0D2h}(=6ReN(Hkba}XMqSqOEi-L%P!{)wN-Rx#Fvhy9fP*l4O zoc=+K^qZ*t6@hFcV!=VTJRDX>0dqutSOO@jrvfhp#4KkS4~zy-(UJT0nw(i$dGfnLbenI)7pX}kgJruM z_D0&vJ#IMDcG2+JapjUY8i)L+fTp@{yqSJ!cg7_z@E|*TPyl1&uscq*ZnFvm1bygK z*PZPSC4&gQ3*J5FCo~2T&cad8|4rTGP|`~o{zRh>UL-M$ISU;y%1O@a^5fXEjvVqaLUi``;sZ4?>-Cn24XcO2jmBZEpM%F5>CKJ@GM2!98YTAxpg15mEEOV zjAS3F{2U8)?v$a!wuP<${bV*dxAM5=vW-FPZ5zA(7%DmZM}ixHeUh0>0_VZ@{H<}I znJd1Jv(<&wyfy)s)e)W)&$B1=uUFWoRpwV&N@>cwfo!AuLa(%V_!uz5fv$!^xE-kE z6$yGgIk%gvz*}al)oL|A^FqL_Wt~kP2p`P)E1d|;99$@yeQQ=PFRD!ksLpcb@W;SL z+-0^}iO{%o@nEdNt`mr6ymjScl0Xn_(yWGqfcGHA6LgOs=C*DXfG7dx7lQ~_h~!QU z4!6nTs(XJfa;Z;}ch{)ny5lzW+V>MO!MsN03U*+eQY7360;#BSMIjqIEeF2k1Pw@} z1#^1F4IfObA!30sRK*~8x|3MX%N_!c)mX#CRI4)nLhHSSWP_~6YxwnwW8<6iM4`Bt z%e8$AS+LA+w8YKnOnYXxy1Gk5@P5jsgR_IZ!{ zd%rxTQ!k7+<+N@Z#r3#8mjHf{EQ6ymlrUgKi@I!@5Tc7fXVPxc7)ra5$ovA$Y&8d; zuhv+DbSyA^)6l=(kW)Fc?^ z%e%M7^7M)|GMa{Y6ohmzb|=dsWcH2;u}c=4Eu1C+IertSQg8QEHic7;=UEl-tA+5b z@9EDtGOfQHDHnskw3pN;@JH7`Y_QW(XTgzz)gwUCC`}t9>!jl)$TkXjAF) zFubZTis!PnN*=;#nKb`d@pOThxh=r&8dxbFBb3UfQTi$UrNj8uyTv@`7rTatPu*yy zK40K%;l0`O|D|Y92&j)NrL3umHv{3Abp>EcnG`Z-rj9-5`?=5cQcjBp9DUnse#hA3 zc-CgPL+I9mtFcBgZ@Xr|1xo^~&PSwaw6DRi%$Nzy*i)2{k*Z$E8Ug6A>-w~T_1bK? zq*_wF7WcH-hSx%6Ki_15HcPL=%%+JH}>V{FWggS%8 zf=6_m_p+Z?lyn&;T-TApJ!;_P6Q3+E9&8Q;D$3+GOr9ykfLNidi$$I*R_D)5U1`{a z1H=AjCQib43XFd-HEoP=RS#t@X`XS7bnol%Ab}{<9GKq?;~WbVAhGFhhMlH&`i(JN z=U8?sKL0%dvybB&wQ1$W|HHfhT=7}>ag>jg8IKX>fEf~~Ld>H;6?$#FzgT7+^%|%b zbi7X6Apl+I!Rv>0|JVS=$oKBr#9-vCQKfQUWP7cl05wl1rfE)01V zfsRpEUAh~1yZ(fj_#TGm%m>B_v2FR-`K#B1Cz#qKNb|TZSgbv~k*+hVG{$8&{c6`} ziVAN6Z`k=ppkd6Fvh>Kd`uzkFG#rZR5GJZlVhw|Fc&?dXzfG}Dlj$<{J$~=QQ!DQc z{Dw*kn$Pl;dunljB~g83Nwfi$OP(%2F$5SPob2$@POtvCudNlo^>+!7-g8z#$q;5MO{4d_C6vJIs_J0Ha2*ydgP3enzO$FM70B8B%XnoX2J4~<}8 zk5$g?bNBAHUb!f2>ku4CiDszi$J9?BXzTo|a0&7%?^tD8I z6&qh(M~Q`w{4>J%TUwm!fQeor=Mg6X3`iiCrt)u}+a)Om<*8ygad5-h_mJRkRA2t; zKfwA4o+FPp!u)$Ge!smx7Dl_2XmGFda~t4KfioIAZGv050zCi;v-QE;JI%V8qOwe| zPq8P3F9NlT(|ZFiz+US8r@rOo+qw?3GCtdt%gRC@ep+>)^C`B7!h%~w34j?)@k9O{ z%5^{x31sD~He#KriP@{N`KD)Vd|*VZXKnB064yNo+$$V`jlP)HqX~7cPLd=p%aBUV z`{`qB(m;E(aub{$784*ML9Pm2wpqK_ZwyInvl{5?qNAC^t1;#ipIHpM9>NSadW4GDq?d*O21J1-R{E9?I*8a8Z*r@K z>qfuPKsmZ1bU?H3BU6Tlxa#uDt>Jo|%~T^vW8@^Oy6Z5-F@AD6*lo}VkOcssO;FYsby2rE0vY#{w%E$U*tu{Ht#^;tKL=ofSTbSrCS;habiETQMv)GLYda=*! z2h(r=dI6yR0uyjHf>Is{5`P3Aw+IL}f?P9nnB^3JBOm#}ql9c%f;#{p?m|#*)m|4l zTC$r@BMd)fK*}4#Lpq{6=Y8F%J){u4RTh7e`JEVCfjV1hB;C5lmTewQ%yj_mezqQW zxxU6!=XuB5q{{?$ctF1E`TQsfcB&`{ee!BSh~Uvfq*BGXu?KUwmw?hb1`AuCgZp0| znzTv2fb|jt!trK_SCCTJ+=|`_jNZ;EGGpg7B_EA6KgMsXVcq9NrE;IusBQR1*Q8H#c#u{2LfW>nDsl_|JX z0zu^$%vli_$H#&4PV(pUO8&;N7GP1pn?*x;rF+&t(WC7}-LhoZA|0b}Kf3_A)Et+6VVmA*&U>mWU*ih&~c?IC6*mCJF!b%~%z* zzA#muQQ7V8uXSZ`{F;Up>4rA-+zU3kMS{Fw$Ci6U#IL?}h}sV&^I$&?^3}Sy_qd6T z*4eJ^?xlCF9cM%GP(k;cf-3{Xkr>C5_Czj;bYB?5JM#Ios<9gnK`W}j_2mm$FSei( z@qIW-8jjQ3@9sbssOsN6tD?@cN5QRrT$>;kh<25dl}rPNLg)>43`lS;dtJ=eg8dA{ zjt6s?yUFuz=lRJ~U)4!p|tmknSD%j3I^EgL$Q|OKd%{3-)>=% zW9TKkoVRf>nBv*o97lpMJMLn0a8U>Z`G2K`xac(5K*llZNiHof?S$tSNnTUw2$&ptobcnA%5 zpsw=Ie*Wk?{bK@rNBS86qa>!APK)nz=o`0A+6VAyZC_aKiv=&&`wE=D*C+YOYx8(; z#YH&mW7B#s3Lqc%Sh5#MJo#tCuaje!ueHb9g`oY3AJ+dSqnw zzp&UP=F99Qu?2A~p(a*o1d1RCHWL7$x6|V8i?y4kLp?VcHR=#RNe7A|E{8eA#!$kS zbO!^p@F$+*g&);*3j$2iJUF%Lj_duHr*|c&Bc@b#>&ZUeH6Vj_6cq?8$G+uwXzqi; z@qSlK#@$ANeoQ~T$9nU>3%`lz#;$VMeb&VFu7`mPVjYVWNcGI7yzxcAxE=A6*J%gk z%+b?yuKB`O|m3zi`?2ALLrW?F)Qpo(~?`&YfulAt4#&cjgaMeS~XbVpFq8 zxtnSes?8t^dxOT9Ix(Xx_>VbKb=B2GXKGWt-uzMA0dv=2%+VtB8RKbVuK5Nh3Uu1R z>;(xt4yCKo5DCg2W%>s&pRt`4@wz*)(fo)>JDZu+WXM;>>pnPzeq|ECSHXLIW#CJAVb|}Pqi+C*#Nt|1Y5Bo1|VbV$7RZMxFZ<}Z) zaWy`F^2@2J)2;dI&JUnBCOz~+PPPm|F{!d&-s0c$Xn7OskEO?i!M?s z7x85>6KsYXvT0}p=1o`(9``>@z?IzPdXcKZSd?E)kE325hh&&x13nV9Yh;RmqjO5H z=!VmUDdr@1FlqS%Hy2*OBpBKzOX(;m0s6%p8&j)l-=@JRBh}kSjb(n8^c=zNP3p0F z+3m!^Rp#&&gYz8xsuCE2*_Ab2`cbw2>Zuteljd2|&`|mJ_fw@rGc}e3hei@xnAWPS zihog-<#P;@b}4rq0p6!pu#`uickd}}n2?PQ95P0m+vGdNFAkgOW4h%?Gak!J=b)l0X&$tDZiA1zOtBqmIl| zU?d4FshC@0^G{?m+F|E^aL>N!wuNJq^h`jdX^e1(V5OPz2IRFquqk<_5UZ}9ftVR@ zn(xb5_1c?(id79pT(ctX z{X2Su{3F^5ZQ=ffEH<;@&0vO_Q>uDA%P*(R!>cC(X#yusRy0%>gRM@7x&fcS%_BNB ziRwB)$Qi5Tc$4&qkFHPgE~%ARyWVpa{(}E$XjpWJ{^4ttdvMiVkiG zd2ugS3=veJL5D~2+H(42ZKLLb>7r=z1m~H+^-$2 zw~uT5L7E5)4{Q1BwEZ|fK0c2Wr6?;iGu8Y@9VQTQmPfA-4x8RzAw`4vaxLKxED65< zkE^c?i?Zw5CZ!pqJ4EU3P6ZUDq$H)04(XOuQUO640qO4UQd&9(k&s5ZzdeA@dmrD= znYm_W$GPfUYp+e1JJhAixKF}5PO#e6_I<`29M|NP_hw!l4b(bfatQK*0X)fTuf;0mP%8{x{nW^8NCZQA#^ zaJte~-mlAEW=Xm7I;W>6?#E}ZUK;`VL9y-#ieOXQrra)Td_yiT%Tmqn-A@aq$~`o; zvA1S~$-jd36G{e2B5r@Fg@j1?ZD%|7@Vf1x!Bk02>acQ&>R`-L^qzSpYLimUd6ia> z$#73dQga_JFFv1vm2?2@7!iQK)xVOqJUoo5HfF%4U#I$aIc`gX@$_)b7PU9#H2U?+ zAvl`H6~c})8+dql`wZ#b1^SfUg(eMS29xBmhF?M-wQG0s0h|wvVbwT#RIh{rxeU>Ph}B3x+hYtMhffR0 z`&(+-Z=pRF77~IQ`SIE-USNTQW_Ku0jhP#ZRA6W^zS3e0#F2`>hA$!?i0S>>o)Z@E zI^&5T7kjthExi6#bW8Yc3+PkwE3n0eQMuN=GOw3OOMpT`)<56_1OG2%QWLH(&d(U1 zgpFyvl=JlZGtx4x2Kp(9jHB&bkTo)WD-2Y9dI36mXFhWdmvGp;RcSz z3I1ckIeu(cT3iP(NOA;M2e`Y1?{bz3(Vo}xukPAYp9JW# zO-Aib2cU@JLs0z9mD&F|snf`Bp}4bNq$?WIC5(52q)?JWD z{MEtuvEkks8Wnoca1cv`N;UI_nsJ2~je;&xLllygX-5?jG z5wwZv-SC}-3mFHuje+Kj+r~o`W;RoK1YKcn8jW*JqgGr_Pdz2Ho{`{^SyEv8rTbj% z1)EQn_{rL&`iPN%07Pw{g*)-NZXgc-ry>S}CKnox9G~VU&1#!uc~w9|w+;qJQup9g zD5I8YNhvHeym=`%m?Ek}O8;gFD5CeEJhv0Qhm9`!O%qPrVRrL&qVBUVtX;VLdJ@hr zW6eg-HbonXTo%!eGcB%|jV}^gTU)zv&*>VRKkEPi`Ot`R7#$!g#*rAF@lts6zIAC1 z_3#Jwm4SXCn_11qnb9$ugEda`vARlh=A6c``%)b_=;SJfW3l-fR1Wy+c6_bEJD5*E zT65=_A6=_V29paOzES1TeOx?j3K4Vj`HL53(lr1NXxpe@%YqQ2x{lwwbFpYVq~^zHQh5-!C23HJ6tV9>W#aX?Q8b#gune z-WfWT%${D>r)aNk}ZGKF00R}xOY)n3I zD$hBP)a7V3yd+pyGO{B$EcCZZV3uoEH5}EM!VaEX^J(a=-}OQJY9GFx^y{CD%zVjZ zYwK@{>w;5Cy1QdXeM0xX-s-fI?3T|zg-cH>nvNL{)sJ6Kwu|o|7bPIzqw4q}Afty} zt-df+8*VQQm4sAJPQJf|A`}k+)8nLr)9(I%RHHjG@yQT#r!&4B1w5n4XU)*%myE-8 z&z8R@ETmD9YQe21Eb?PJ-k1kC)qszAiouB7}t^iQok0a`e$L!xkqWPhq`F$LpoMaD^*(XOGP2n^W`sR62 zDsS|NPY;|12B(x<+NpKKpaUU7=Zk-A?)BgNPp}S;`5%5a$6%Rh4L4?yMQ2;p3}fp&qwd zD5WjV2tL*&9vMG~1-+7p_L~A)Ukf0UEKqYEJjm)ZF4bI!ZtGr@Eyr&7J{h9uo%I@g&a~OnB{Mr7)^* zCPk?v4=T>bFU%OZ(r$7tDsd?2xm;oGF)1n4=b(Ak%+6%@?A;hz>I}3sG_T40?cqI? z!$!m-cJ;#^a~Hg0d0>R8?T`-w#13FsrM26^+Xm`!R~#PVPkEv8_*&SZDE=0sQO|u1 zAoDo?0jM{9OH1$q1ql(k1cJ)#IN$h#-o7tE;GBQfq%JVSds79kn20%Nt$4iS z547j-q^LTnJ7#>%ze!Qq+uKi;8VQ4|Ji;11j@HH#M9tWkG>SkkzO$!tv2_f&ZIs(0 z1v};{YSz+fb8sCC2zxJB34gjp_ttUfegD4H6;Y|iV1KLvadO!jrvRq`0yv0^9_YPA z#m1hFIQ?^D)kEA5Hs7aFFX>0wuhU%X8yXs>1ah$4Q}X;J zi?<7IJ^Mq-T>24xD(re!8a0H~w7j$u=hp{$M1eO}9=>AZLP!X<`R9A!oIR{7P$?h< zvI$gSl}_NgL}+E5(??!Qb*I7Nh-$5KkUaQvaQSoo<6oNS&brN!&QJn<#_O_`ywUcn znWnj-HcF~5{ayESDUL#-{u*R-);(Zj+g=8_1n5rl^-+8kDAXnvFvsCC`TkbAE7ZBQ zRdOR*-SB-x1RD#cWFDr>n?o%p-n7G`D4Mm=-0}}pef){fMFVkMM!%}bF?LxLnnr0d z_yzOX&SNUJYp&0eaoACVrlT~xor4Hv8NJ3rpHAwYVtRy8oO_SE=}%reN~n!Ym3q~G zD@2b5x&WoPQJu)-N38#_x$2hpd!n^CrD0mO(u|l9e9KDgzz*}ngbIxp4{3NZnHJyj zetE9%mVv8aWwnWct6A!}TGXwoM(Z&l&mP!i zB?E|A9J<>mn}zU8y?+pNm6T9~8rk&c;ByONcpNZHQ9f}H*$yOdK!tgh!1kYEtf6_X z(t!C45@;N2t>?GiEs%^LQCG-+tp7oLN|4tuUyKaXK8yNy8~{D!dHgO7AVZc)tK6e}+GME~!39*RM#;EQ)-ULO z!XNsu5dH|o4JytGV+Uc+3@DF#7{_FtvkNnyc+IT~RSMVHi0lIV_Ib{3i7xtPM zkI#JOo0kV`!+4-7N8@-(kn!;ZG%fALF-jaRQ@1LQ+$jMydO&cn(e|vMCWlW`yPH~P zT9eoLFW_`oFOm7u0r$vfM4^f!VN6R+?o^PPQrPE8M4d%H0+b9)H@fVksy1%ts}%=* zdyxq0Vm;TpRiygR`H;gK#Le-oE5`}<98P8N=#0Pc$_sbIDEXo8?d{cgofCo{K?WH2 zO$Jja+#FVAzV&^HW|2%H456~+`a(z_q`5tl3T#x-`?GQzP@GMZAroFc7-S^6&YD5Isx-n&`#>w zb5G=`OFO)u+l+8E=>_Xe{IlLIU2)%+$?dh0*Y<3!k;mlK@q*$aa1}U6Ubr&sNHeJA zy)}fRRgnNn<=yB+PyPqmWaI#MO>yJPGg}U)@A%bB1Ua_fc4o7>ausn6NJai`K#Z#f!gQ`5KH~dk8qS9_zV|a6 zDJ#22aHT{8ufyCmMOj!df#kI4`&vSGYXDWCagJ=M>Npt)@<-FXVNCXl8%klj)D@yrP!PVFcxnUYkk*{W3 z+f~Z3cwY<0|K4NNlJ9Xx=-D!7LDM0?)rZ@|=ri57FZ{sr{*so~wW|I7zRJJ{0W9omwwZwb1Br&dXM25#b+d+cHO=CxwuAn%#N(IPo$7>lXRI*{yLog zHUIgoa7(OYZn(vnamfP(1%=9SRGf}(g@gw4anpMCEXp7bjGC`rF5K| z2HjH0JGN1i)B!ryTwku@DsVrkr%p~ze!|r}-X9E>PUL%I6?am2w8d#Pxhp(Et%Za# zm52POp)D!d;%}-vqrXw%xY(Zko{{idbKAG7UQdx7n&z4)g3{9F)T8(M3#4k|$Q`WT z+f*L$Yz`a9(*^Hytu2n1mf+n|84__VR1?2At(z~eGu<*m2oBM&k0;DU4I(3%r<&w^ ziEf{wevNch<3`FIIaw@qW?a=qeDIto_}s)K(zlkP<~|@$Ajl#on6p*d_e3vthiZ*) z4Vf<@%usp!*HBgZ*4sMoyh&s4+ zp*SBxd`)%{l8@LkjwTt@q58N?7vnV=E@TobJXnkR0pX;qFm5CaNK`B?`c2` zgv_W=mwE_Kf-PAVkKON5>cbykJ=_~Hf7;$|^8zUx<>g}YXSIdT9zTChOfc{0?;8OV>^jl2OhExY^nnqF~ z45E%QPkOsCCC4jeBPqR#uE-8~>SGvR-qUA)m4}Ph-Y=7phl`tSW~DfjgL1}7DDQD< ztH~6KE}Jv-3Eh6K!M>ePp^nUcf-nHqu8OknkQ!D)o7sw58E`H6p%hG%Un<00)bf*x zX_@A)gbrzELWx?B<6`I4g-5>+UPr(2OL<0s)|#wwEyzyr{N*3gZWH3GJP93Ij@ zD|@W?^7qFsrp*aXp5>J7jrH88l_F$Ue`2Si7fxEhORe;;7J#=4L~S|H9%Fy_jZt!y z?`y=-j4b)+7*iw*DXB4BE`aV)*74KEzOHCqz1ObY}t9 zcFOG}^naTE07D>eJ3Y@mXC2%CUAJqkbS+Rvd^BV``1wZhL&Xsy@;uhiJaUe6$#v>M zF5;~>TKILmeHQFvPf2JhJ0=-x7WwkFR%T3WdwaV}?M&+BO_T!MvRzRCbEKn&{;0ANR;s0s zv|9i-8eOR(=cfCtcF>CN+H^Ds;P?6}^d?NMCPqI~JDW<|x^J-yN zYBO32=`bp4>t#?4Pn@Te)Mk_v8-$Z|Y-4`w*~2QO;pXCFTm;moHB_S;D+w_UkyBP; z%SFNNURs6YW?A@8GNVp$-%&BWwU?D^$07;X2s$~zpo@OmpT1F<`woXNG%F(@em>U- zj4_h(lw z6u+MI=SPFRxquIaW~VeK7qgZPrW4ZnbSoFpS_^Y+oGt2QPrY0}PJc z^o0-LwrgdeHU(Ke*Gy!^ux7RFBPgZ6J7+(PEv856aM|0N{Dh;boQ|o7zndpvoaxEa zW3KB4sNlHo%@<`WlTLabSUh-QX`{$?1ybSjGAh3`=$EJ9YDanI{jXwQ5 zd{@QF%IXbTL9P9gsreV86c@Rira;H5cY(m^A^WuyF3M$sYnJqd9!WIsy{o6&F4Qkr zyTW|C0Hez0oDewDb%=~wfLz#UC{@gkb|0eVZKnjBYMO>%cdWRGrcnLeGBd#4@>nAS zB=uTT{vftjxshL-90Z?Pk?=3FO(pe>f+q9=UW%=fi0VhEqh!J7faDr^(lR$#8_n|B zhb{X@uf57$tTm0wvH6j2ASv)4xeAB{ETmahOXdJ#M{u%z_)W=U1<4!EpF~wcEiR-| zYqhhhgHIK*Cj0ntX4KkrA=y1_^2|Jm3hqSz&u&`C5BycKd#kcy+*k-V%6^&+sOD*9 zmCXs7^~Jvjw*%;NqmJ@_dA{IgWUum_1yDu32xa>~ZSXnr41%v7&}11!-$A3;EI8fYFK91u@ng--e~VD}UUr=3zE zG8XF&Gh(bzhRt+Bv1-%N#=~Nag?opQjiCmRR51&-h?}{J-~f^jQN%e5#$N{^YtwepH5@e-R>Z+Gz3?r6Defb8cb=M4z?_f@K`bE0xzLB?it!bq-?|BF8$t z%`{7T5NzzLer@40O_pN0_XPa5bKkWy+vU>`k5?yU-|*LYv$IEd5Oy%ugV{RC%TES5gt$Tb@rUK^6zV0&-H6mt^yvi@U3X|;oUBw2ec+Rt>(Jaz8^p?sT1^0c7mLb$eVDQ zZR;3+PkI9jRY~!Pjz5LO$&VN5CR$5ednBAS|*TZy~@KL*?L_X*NVY@@rs!`3sV8ys|pvz z+pCZnaKbZ*&!dWJ|HH?H_=H(QjRV+%_qb)o!XwK2H*Onzv47nqgmt z)3$D%3?hovhaD4eDJg=E_ry{}ULE7#MB(}BjVwqB-A{)f3|Rb8x>|N{K8_YBL3d{X zMKFLp2!x~i^1N$UDx|)S{>U!~K)Z#;4($%w5G1gxaFMz z)Lq967H}Uudp*8d{*hjhQZ7WW^;?>xR7heS-9fqxnDdZ<9pl}c`N5p+5W{tECPFQZ z4(3b~nVbkf?5RZUb9E6LfRiDD=Dzy_-D~%C?ZaEeDM1Qb8=0Y`p#R%v>Pw&$qxd5D{da*)jr(x#@qr(JV4%GI0-cfV z;Jzh1@8Z1D3Jfxc9IYc0WV_uFYHKhpIWn^Wshh!Eu>-lkPNi{yZA`Ooed36@)b3aPOv{bWT=V{sk z$jg9pj?RytJb7R?fTCPw^$I963S2|y5=QehoL|lfEa<*5T$aFmj>BL%4HaTC{sZ#i70C18nQ zvI*jQfbxi>BjMJQ@+1WQ32Q*jukczO3h8HG`VHTENJ||dDAAetklcH-W&}1JPpac7 zV_>JD8x5u5Cpf*v;>#Cuo`ghZNgoBv1yr3hx{cAY2C)U0Z!zwR&_oAdU7U2Zn|xoMU%#aJa4;Vh!_{L^*@TZ0LnKkcbVZZdfN#WA z=phmE#t4?Q#di*Pj*CGVqe~>Nfl{Ohcc?P9RF{*0s6$)q1%bp z0DTw34u1c8za?G5B1qId4nyXn&b^d7t?P@B%K&ze>6e4~4Hi%tK*)-)E>KGXG;%QV z-_=3=r>LCh5|3QsdI!_yVvWS_AOpqV*t~yY)<^Nr0|#1R^8Rb6fAzh8D~sRb3Nv~Q z+^Z3{#-Ie~6V!tJ-!`{;@(`w8M71Mex{g|znr5x^C*FNV6BG6Q-N#X0ch=C46$Y?W zb{EsB5RYm5!?eARiP>_biz83%OxwnQT{WT+Pdy@ydyVX^jk~ zaNn$&uMfccqW)^<7#RE&Jq{^po)o2x#{Nn64jbb#F`%nbM>|56!vnMyRYPnl8v z0+psKNz}u+qTKmH@PA7Ng2CheZ4GRKa5d-s%$~Fn)sX)^vr7dpO4U(T7JQvVukq|= zSx8<26ok2L&jPrBji_M*nBFkmgoP+AUx2D+q^F2WWfy+AGI+PF_;N?!szI2q`+wz2 z*XT9pE947NOB9d)x7HT`UXW`6`l4H@Vt~ql*Yi|&-|_vhe7OfLEhx4q|9b?d2Ekn{2C#=MqCT*+Ef^O9 z4BIUNvfJ-&UaIKBpw7R739?~bICjVTOPN9!e_O)ri|yiWf7x#ogfFzbs)BU?TdXu+ zeT|{SBklU;n1x0|$equk;Gx%{$akutBINv@7>+Ube`?R65$Hb}%mzb%{Zb z_wIHlBxRJ3s_w#fP#gyKw}JnE`w!-rMgyA=#?6Fw)!+}|qlGPv-nhDqPSOt{9e7Io z_)h8Pygqxok4OpN`+m+}?_Oqp1}}3T{jg`0@C%~`rxN{!RlX}zZac-rIAAU$9xHvE zyBj%)4p;)6Al>dMq|eezc| z>hj+*!Zt~(WzN=m5wQ~9OWV#7HYiUi;OD8<)!_hH_708Dx3Hp;1H(WXdE>x-Gw3K~ zu-ZL2^n+fv;+PLUtzqgq0{bdMWOwYics_R`<|)%0UE^~L4__CU{1nm4-UeYeZ=y=T zyDUr^L22P^@7cf%moEfu&COibvi2gaHyHb){8LAG(c(nF0)g8%6%s5rN*+bG^B%Ji zg*~XneAC3PAa@J$9@yB7Gk>n=-EKV|&=exWM*2V}(V~?k$jS&=Z{Mu|)CW9zvhc;e z^FM37C(Obw zh*1^Y2Hl>ru$O`uMFGIz>cT_B5EZHzn^w2$&H7^Ryx|L7@Q9DNWKrt>Ni20gl89@0 zq+cYD@FNA7$Wkw%)tsNdrxG)1@CX#XM|=d;1%rY8=dKT!)x|YtE#l2fIWm|!?D|p; zKD%OUvG`|cU{6!i!W7F04+pQg(Z#5hR!uFN7AygWLEfFaO zfC}CzX%_QF>Wr1Zk8`cHI?mjz60{6_$`)OU2nYp9q**WVOK-}i<`hXAVw>pOxkL?s zWzF1&w{ySwuUmz5jkgQMl2AX{ZC+aW09(_OaNqjo)Dyp$1#(WJb!6}#4Zb}zcLMl_ z1;$BG!DVOPD9>i##!ucNq9D(O=sKM5}a>nwNng3!bY$C+$|FPu7NFs z&fTr{5Is=8V)Ru&=(vJ>QBt?)5OJl#1U-&6*TT}fbpKgQCasO^szBo`HPvj0x2fY7`@Xw=`7FqXRN4;BQJOo;A2ZC-6{DD?s|Jxmf zUc+W~P2zyki+p4?6MmZ7->#p1RUyB>To=fow97dHgSFjLfa$oJNl=r6$^p+FBm#g4 z@9bzNeD`PKcwiR?vBQ4A$f$on7KOhj_J5Q*9MF5Z5+D9JdM3mg!e0Kmb#_SR+u zP@)ijy7;!>)IICvr`8?FlR|zAVe9UDuz&ajaNc_Ehk)OcgOK9s?2$SJjDI}1a6*@4 z?9=%_@KaxcRhGNvhNAx)0{AL|L`yV37Vsud=?vgzk!BHKYol*;vq;jz!*uty)l#^nJU zmpMYg%Ew`ar1QH4Fvf&QR+pgTow>V_?Kjk5J4mS=(Qmio)s60*MFTVn7LW6u7x;$f zby$tmHKD-noK1mtDRD4lOSXT4Y%!dUb=P%~{=(y>cZIvW6^o&f(QBSBIOOENWSLi0 z7@-Ni&B3Coyusnj&8A)EA{W5%_Fz(kntv7;A2#Z?|Vj zl7*Z`$jjS5KgV8O*JUGq;z07YxO)bxeK4i#N>tcxdF07#jW-s*txj-QWVPr?GOetF zNK!K#WiQo-U@EL9O|pNNT4!qwIFQ^tNXXg!(RCUf(8BYZldO){4VDGv`6svQC29=~ zzm}SBXA8LhrjPfR-j$ltP-=d~OdKv#Y^yBV=e4LqT%^|&V5z~jw7wi#vvGNqzxK0Jsd_On=Ss^9P*YPImP@|)a*FU8pp1$lF)+INx}tveF2gYL zR(N4+GW4TY(6_UsG5zBAE?roPhi(FX8weiFbK%OD2gxs{(3gjbWKru#y_!u*%T*W`@&_2HjsUoFxL3>6D}#=D;OtiR5c=YUhA zzFZV}8_i0y5tAz1jXqE=T77HS2wLL%@yv7?_qjX!skds5A+)EIa!WaoPvP1P-RII9 z*C$EHFS^^ivs543gXi12PLm36STfg-C3^wje7s0IK4YktPte|PPCYf8H2ReTDYYLv zQ>FJ3xTe*tc=IN2upLbYPiag;Ys?Mr6%r|AiuQ^ck!S=jlvjVIA`UyB8|8nosV8&|NqHCTeT4eVxI1qB+=;V%y;sV#1B?j_4Ra%-7w{uWqlK6Q|KW zB6m+Lr$D}tYDsOUbo}NW(dkH-S?5?@osc1ay`3utp|^Vh-KfRjoRLH=R^IrwHd&AN zPFhNbEWQwCU(JC;wax%f8}%_fd$35$pb6q0+hWSy6#X8-!mwm|m;Xe`P-!q@x{7L@ zrqVTOl-?|uqk&2d5Ao*b+`TVxEGaYpA!WiBOq#D)`~4m*ipQa*@WEOn;a2?v28N#e zB?pcL{2K2=^rP*`5ADI;EuH&|0v>0Ik7XWHJAP*A(TCbMC>;?rjF)`*`JMf!jnZOb z20Pie*ZA)XI3kU)N)yDSldU-X#(R;JkwS-wH3Fn1rc+YSN%+4ndl$XcEVZU`Snb_- zMpTvMJBV@OIJq=h8of4MV3bG3K3_(DO4%E?zHc0Z#+Fp?E2`~6b74iyre70)u}FmD zP1jd-NuR>@f#`dzf*!MO>d~j&DOY;A>pN!cZTxE`qyQ{zig6 z0wspNWgYAX5f?INgL$65RA{cd_3vSCn6as8OAKD_*cwC%a?=?sCMh;}|HhiP@)whC zIGn1Ti0g)Lq)y9g?MoI!Lur~Rw5&kmUR~_`OR!mWc_UD^@hgKGUgwCEeaW&DB2Du- zj{EKjv2PP9EFvipE01@~6^-s4cMTMhsaVgjH>%vH8aR0{*0tJ#FUJn=gYCgkPIXMt zVHPdc3e5R|P&zv0TZ$pXEIp>J!&2^P-%`%f$z~5KzM`{x!A;_8Z_iJuFgjO%G?$Q_ z2srGT9qbIrI+EWMK)K(SC@1{JX{RribqeK zci-raSCu5yUxiu} z`9gFveKRwr#(SBxO(C^2e1jjyjUN~ELk)@e&=o5mS8CA7pV0B>M~qaFNbSyL-)o$n zOC?GX6>4#Ge9R|6kw_Wu1`n|7H-tsYMlMaqTB5RQ!Ud+XyaV-FHE(~UTrJsZb1bbK zlCd3H&TO$(Y|ZPH4_4T}ELquzO%T_v@`~LecRLMgS@5|Cv~8aHP${pmfZ;5l^=eFL zjB~U7BQ_Bg6&1wEw(D^>^}(J}khJGg4x%>!2jd}|Ln;-v*Ad@ekx+>S=+a< z(=jC=VDg7g z>=qiMr>0Y+eF+zT8a>p>e{roZf*!uzarjg3pJ7B0zuSCJizsk5nr6=JSU}#0 zi;)p(i{QTP=7Zt!Fs1ck&X^_N6>@nh# z5mcy$+9_|Htsu*kxdIc%y^fzx8l8)nG)tiS#U(a>Nc%anJp~aTrT7eNd*&gu(c_sy zvoC`;LD?glYA1$EQN7Ey;qkcciT#4)Zx0?SJ=?$cDABxlrj+ix&~jLg&}%juCbu&( zjpu|ZN6$M=u0UN-I5HDGd{XlLQdzEe#4gVq`vuoO7yfbZ?U+g!yz;syzrxnGQkkgI z_l4e;Qd=$h-4paFrX&%yi#H;AnQ6k^ud5^Tj}=e8*j?-l8Hil!aN@gv9cr}KVAF~C zLZ$FCDce5r+nQKT+#GL?hp};Ly0bq|-$s3RG8CsAuTc3zT@oNhE8>5-=SlI z3om+H-y9@*=3*0=3fWe)pJ$zRd9ph1&$Ht{Q*C3|CA(v^aW>|>6K#8RVU4m2Y@p!S;r(d!#r?vJ6pZQT~N7t`nafRtN zygaQZDGjHd)9QuQXQ7Ig+gaMpH0I+~(KB6nWwOEFir*aV2%002R$HweO>P2BfscAn z(p^^k8Cw3Hn>W&CvFj0I8}>23>SFL^eRKDF{Ycs!+SA>(A0}3iiv^!Qxvvd&Bu`IH zTnjWy`nH9l2ePD4F-pt0 zqSgv~#`dNNx;IDajujj8l&v&0&WTy$m*ozvTzyF#m-7vSF$_UZ9kXAru295t{6Y-o zTM|#sd6h1$21hcRB|dQBHf?adh$oZrY%9-sMvl?i@8gb(_v$!Fu0kHT-R+1`x^ynV z#o5)T>Ezj~uQxh2bOBr#-;DxD4n+jCotfMy*+;eoiTatVSWhBu*-YU5u){(< zjO-TP|H42327}p;j_oiQcxp)uBJOIDK~YS_yb5%Zf_zgv=BU~!nF5p=`(7rPi8)-T zs^+RqY+t^v)KV@sd{aM0`PpXLzX3vjePpFyS35dCm@FFpg9zkJl4nA~8Zc-udyacD z7|!0>N4{gyDAs8kU?nR#pXsj>R8@=VfJJ1xn(6dPwK5b9I>T4~{#xqyU3Q=&f&0KH z>lyz@p&b((^VHSmlbHO{oIG$Scct{(m4iRx(m}I)s~*qnt5l^{g80V#w^Gw)twU0S zo_f4bhst9nrY}x8o=7mR`qy$?_($$t$S)@s@rsfrU2Q-rEHz&`u0y`{%haN)&FYOG zpzRng|0?D=FJAB>nI^M#oocqN$a{T| zoFEE>|If#pbR)6Ne^yeAE*Om>o{4WhC2_6f_j8Bpi=Gra6#`bKu*|c2VGMgGH{;LN`CDL>TZwd<49T{#U6X3f#^$Wka(65Si$i?|kb`LQY>> zGm+m5zV(RK+)(vG-#5p(?w>^arku|LtY$}0JXs~QHKz>B%SmHmo~&0*OMh{>o?rk` zGch>*yyLYUGUlHuPBm+WVQ8gOKD&4}zdlZ6Ep+@gSF58ik$?BkZ%B#T+%sJdpY%=U zHBLKXbVA1Ul1mU&#s6(JzbH9wMll+j%JKEpG6SXSMW;H}JkFDFRx>4CHr0Kv%F-+s zXma-45d#8@%PO_&t?r9a@^d{&CD1;el43g6U!N&qj$`^N7+m&dB(HJHtdx^8Ufkmz z;lOFfBA*DB!oz{XxdH3RdL!n6lq_Gpn55q*fv=e!ucCgolT;9Q`rOZ zF7{S;=#``y_CO*5fited$j@pht5q6)FbSK)lTIx7B(B>gOA!_+7n0d z&Up%LE1Iv_p>Y!JE7sO(Iy$vQx{YBGFE;2fbaJnLHfb7a^B0FP;;b1&ri zf=@|?7plc>j&fgP>-3(W!%9n2@`6Fl0LqNo5IDJw%g%ch??+qe6X)S0t0ehekpZ1^lTnl!W;;T*lOjCz1gN+8OuCJ2h zr{xOk2TQzNS89LRTn>m`{op(>pX$u2%7dZ!bF3C@9gOV${XtoP5cq$BiQwH zRqT4}ukby;a!|5g2&lf6(akdqtzTZ^z;25h~`2~_b#Gb*_r+- zrfPy(m~XJpw|05N5b0SVM0I_LT32af>@qvT{<+;FTKvi9f@@KMT+-{q^QRTID)g_K z&t7P!?+pE3$Vr}*2n-_C@OkB!5-E0xYx2DjonVAyyjs4jB(yOw_d8bT#EwJ0LqLMr z)Vo>`?gh;`s;*Y9B_>y)-!IZ(s^RcBd#q9S&@z7|x!QsI@5=cx{BVcm`8V6$d0{W! z?n{SdQxUxrwCdH03FXG^#L?$^l3CVqdR0*65&6Ivx+Y;v@vGfr;D@dA8PN>{{AsdX zzqS~E+jq`$%Kw|$A1&4M{jb5;@ycxAJT%Oe`xk!gzMQbcN-!MCfxHcGm+_;A!95~; zda?MW&o==9S;@eTu63uETeTk&g6B7qn_ROjkQ)wVm~|m@tF9dMO`4Ykqt|qLRL%To z3oPYS4@MoUoz^D^@TBF*(a$B72}}JtrG6;;J+~)VbPEAI)#abU9m{O`rg^=-+jBPwD=HPKxV&9yu zQg+nWFPzVN_2l2DoSwe@Jq!Y$)RXI#%!ywk@NB4D`&`YCT7gg$PGUXZ^l%?No8uaA#9TGHL)Ct9*P z)6duw9Rz_TD$LFa3-rq1y1fNA4_xW`6CzjjpIsGRYb6f^CbxP>pRK#s=&32Y7AW11 z@v7t^ZJN5b0(}kc%))}%)z-O%Ho&B;Srm_%Pkf94cUe}sCZ9TXTSkgamwrZeZ@fBch(o*#O(&Sww>e z(f#`_){qv{-UI?c$7LBiyO6n}o#vmA1skonU#p2e0u~sDNg_wgbrnNYLCPtqtXj!d z4PWrOr=?y44u)ahB9;`p`kR^o-5Zofv4M*d$7_7l-zCBB6ccPOb@yNAHS6K+gpq9@ z44Ep#N5o=WotJmL34Vx`;9?#a#7ai1k@9;i@@vM|;q>$+l+6u(+kk3!=N1^D1MxUA zGKZYgJQIZsv!|59Ic_2`Rvu-(qf2E?4!ww6u11De&B89U>@N1;y>4ub^0z3+`#5TS zAVk@d(oZnpVXbC@qP8fxF@4oMr;snCX~=4>lwMTyfhGCOxm2wM6&tOqR6oL?3+3hJ z5wmtWc~^f@)WzVAYK_k*k~_0U&TeSlto?OX-bjP62@7A5R^#Kn+Vb=`LzaLEBhkw`bMsBAMbEwag{UtCnZreF}OrIqXd?YIT z^-1=49YU1Noa<)HB1o_H>{i3UGeEf!+~=bm?MpG;YEZjY{^9$i7?#fUaMt<1BB?Y9 zU%-gJW(7yCfLrZ5hcgQ+DffFz2;IqgD+Z^K$^=S@G{GTTZXu`jOhKRjN7h%zMcF-X zBS<45pdbyRNOz}5NH;9q(%p@uNEv{1DY0~SBOx72cQ;5au*7?>@jTD>_x{K32iSYh znKN_E%yq7T^TCR(we`A16yxOYZOt{3E0EnWeLn)f6%DbfDs}JH@M&-?ng^4-766Bx zJomebE5^W|TXf(Vkk{m2jn@e*pVIsC+BWO;MKn5(}^P>KNje@s``)Nj#fI1Utk;(BC zo>q;IW5hGCm-M9H;nab$W(ll7j(4Xd*C`_VmPYu?TX2 z4~X6w-68vvHbrkR&j#%AIj#6yfbx{2FQ&)P1WtH>+~}${j3 zsjAIA%|oSPdW&y^&04)Hzy&)6XeOl9s%%Mj%JY?!M4Ku5@`qt5JfJ_sI#%==P{4u0 z(?lJhizaOk6vS7?YsA%#C$xicO$O71c4rLSnz;CWX+r^}ETEjRO$n#1WH`(G*LP3h zF99I>H?^!a;r@kpZ-PRQ_0Lezj8LH`YMvv@NQ++k`qsk)dRNU+Hq*|$ds*ZY$)^hr zg$7lx4u_NWh0~hQzWw;@%G(9$OBk;0>Z{*SUJiJvD2QA!9jU`(Pf5>?_&v>=+?M}L zbZa$aFPhD7&Co{w&7INsa5qMl$Ij2%^wFxJR&yfzT_E&1H*jO1%WwM_}RfOLNt!bK*Oi~ zYn`g&=7jCIe^v2@wz^r&2j4KuTbqgUI**m@tm=SR3=B&>+3KQB@k?@7 zkdP4dX9E8qT!4N85aTNGH~oGQA0_@CF zfp80@k2~E<-dB%{(jaTdbQsBXofiSW4Faz%ZL2q~od6J*J*)$spJDA<-z}@CH+|Lr z6z}oWpRU{#n7c@O<8>i*>Q#_9?*}j3HZV8YQpJ3ogdLuC)Fe-Q+tSRoL&Ea)H;(Wx zL1ec|%a-yRO_4>uw^1|~NyJ|p5Y`rO3 zv;vakb=-IERgrs7=2S84h59^%uChaOi-2o1cp=bhkl88a0U&4OX zms4$ie{A8@>p*kZpPHb`wHz`S#dXY&Qk%=KQ3=nd+T+2?m!r8!DqyllsBW*}rrqs# z`9Mg=4RX50Uy7PkEe{ggx$R;b8OhVjsb?Nj@NLNOMBR6Iy@G(mf8Mr?Bw`&s+2||l zvXkJ`)IxDbrk8a4xUsY*mQY}FrEPAe1qQHD-sY&2?92p#w~p3lLRb^!p)<3X19@P) z0=L|2va~73-TDC{5u3td&rjT5s{<$&GsS%5ey2tQ&6^#And)Tw6)>+pr?o2lQEAdb z*)i;kn~Qt`lY(KniW=<#UiaHm(jy8-4Hc_{c)OJe=gb9g8_y zVYovHoE^8#1T~3=xKh(ltJ-9H%|G$j3S#Goq{5MJO>+f&Z9Ms z`Qk>C-$K4suozqdjDcEmeXpv*c`J412R&T`M{owBKFYcSP)t@09M4peQ`M<5oeS? zNTC*xr`Syv;n1rUp$Ad*ENkW7U6buUfjIG}gF{D3C3O1Jx#o=^9|lB7T`yYCXUyIo z{jB|vbixP5SaJzODAZu!=k|R7^ozwS>cBE1&chA^>GDKE_emxh6@OOLKUnN+{V;qWn0Khd<82z;A{9Q?7EWAD_!LeF3k>HBL~ ze%Ljn;}#4K0o!G|HVHUBqgSi&Z^_?b$6tipy4iE3VUVAaoG&!(;H4%{cI2nF)h%XUoOg4WE&UhVYTtD@R}c;eL2@0&~+`*q${cu zA&kB>A2Pa+d;{HtIPyOG%LRzWz^MY-=SFXCtF4dMe++mu9^vPkcLm;v!wvop_7m-@ z2_kz!>4mc&b~SK@ObHe&(Bpf)-CDv^6yryO!Yk+^mH8f^m_>tRH`_^V6u3jdjYsot zn_|_s*(jNIt6oVbccpJcLeGf`cSA{qE`8PV^w~f<-f^62tfnJ5->|z~d5$vQqEQSb z+Z8Z7-npJ!J;;-x)t*dIH^>~uKQD;Q7k2HN7{}tP+?sjg8N= zGV1Xr&xXPHrGUmgb#ch`pBaqZ)2!Xe#8$rcs?GLT zahHSTFA6*oF^Jghyn-hvMlX6-rSjCPV^w{tT%K?{#;;qmUspMekjjw=$o1jIb01q5 zZ3aLgd^X5x+y?@;g94{Z_@NJ4uj1aTA3nZ?2H28J8g?pHQyjz_(G!DLB<^Wqd-q^G zDgPKc1MwAQQM<{h&p{flb@ILT-7FrcE^q(7!qkTg#iwXkEdn)Ss7fiQ*m9VTI`sTN z$t-#QhB-srWLCZIWz9e;)LBkDPv6nh!VC5F^vLFR|7;dA(W{J$}fwvDH?Yv^qv{!s4*Z zmk(yeRE)S+FFuHrdX3PN3V3wlS$`Qid35zq{CP~UJLw_XnTZL;EBNK7k^H_gz_ecA z>QUi<209iOTR7wX14Ix2I{kksxpcTefSKE3Es!ci!8ubT7)L|2JaaCOb;=n+g|e=U z94`Z|gQsPe$etHN4iw2a2nrqzRe93$pKiTDHwe~!Z?5<~<;Ch=T9%t0EJKXLAlcS# zu{JcPihhbrTtkCSznWV3j+(-0{qo_9v9Aypr?8tTBmwRcRa*h;Z4}c1;&Uqb?2&M! zh8mOp*k*19wti3@abo$#u*=<+4X5v%L!lP$?9QWTuqH1@E;7e+lZ8|Cq=p}q!&8ax z7myY9QjHcBgobO?y&+{VH_M>K^L>pQIg|uR#bnfU#g!(Q&u07?(ag~8>@I3ds(HRc zw%ggk)I%wpgNmy>^CtVBzKdc>lN9yKb*kMXWu8L0S2H_L3li1xG%&ySsa1lQBvJBA zV`ci{xTKin(pnzs>mO3+R;f>$?ApJ^Y`w)nO0<=P zvruK!oGYXKyR1p4>8^8;8JW$h7h*B#c`Qy6^mHFwZ#j>8E(vgiIBhM)tfCw?<68!* zoBR&m%NS_7@lGs}BQ4eV#;6MrCuvo^NmUO}LQyw0l-RAhm|T}md={sE@`C1>@%~2$ zc35J$DT>oZuNmavI7DQm*w4DaAOXU|O)-7oFc*u${`F_~vWSBP)NgOGN>TM_^@Qav z46x(ix-eWsvy;zCO^2oV*-Ev+H=@SfUHcBpk*=;_6~exyT(;;t((?5eLfm8v=7jO@ z0BbE@io=XI>H3n-J-=CGa3AzS(i%35bW`p{&TiI6YHp)rM7Vf3n`KmB&$WM-s^ji( z+G->lf>#6)rdIKl8hZ+8@qQox?iqay*aLK;rwGCaW5aNbhXMJv>z==tLV-f@Q95A7 zf`6|?|H|;fmk~dBmc|1Tw#&(V+i;W-9)hS%9$!<%D4~op-|RbkzN>4JiJD8=aKAET zk=u5rby8EvXV}V{v;i|sZQyouB=cea;**0ZT3#RLkJERCB|`*1fk ziOj`PM>>P`U3c*(L2ri(-H_*+Okh;1(^?v2u@KZfy=r>;sT~>yGc{mX)aQJy2iqKG z%C>;iY2*&=@l56RRE5aH=dUqR!?4*Sm;858Ouq+CmgrBXSY3j-M(nqywx%|K(K&B+ zcp0z0Uj@n#M}lfLwSv1VCXHU*rA<{^7HzO;+KQOwn?JlNSAUV^H#T9hxQ|*;wy%!# z;f-J044DRWYq3ZOdvDsvJ*|p_8z5&Osm)eeY9S@CXp5N6=9vjpZqKQnT#54tx?kPr z=PoG7FVWTPVx0cK?yCLtbrKdj{`0aF<;z8MeA>Xd+571DZA;tR4uYTu(th&RbsHtq z=4Y;^3_#7;O72rhAE-IH-Qbw9aL#S`IJT@Qf~NXhz3<6X7Z779!znn#-wEaGOkeR2~rofhOINKi;1%&^H%BZeAV0Vh?7^>`vT=zHs14S^6moz zccaZ`u)q&REr-Dg?o${z)nNHk3L)HY3?NNo4Bxx-=EJa{257+`2u0SkwR3`2oSL06 z%Pk*S)wCgxUxW&p!tKSb!BVN0Z!)>EAavD?rP{V*t zz!(ykTWiEz!}>nKYP>d$3TnptqW7OnwtqW$Q`l8XDDCLMfpET|4)!4=c%h6!%x)$I zuH0@{dojXZUBu0jCL(_J(7@^mwF@L_n9L?b=vqOqg708udW_XO;;N&hg>f;M;Du_Y z-}(wSl6n(rRAq3jhce}>*0>IGo??Be0__1co(8k$20pVn`8h#`1?7Qzx|J?$Sc#l- z%e?1ce*Zc~Z*k?7@HKrrlh}(A%?rh9eGa#arMGM z3qb#tjSGnFo9MVWUcOk@fp+xm-Xs^?9k+x4H8+ce(!lKJ;=)r0Tl{C;$~O8Hqz%<} zF5{g2n=e#z+e%+od=NI-aBpGmzG`;OMvF2ja6H%bWVP5+d#d<>U8d`OgxCbLPWWpY z)O>6%yA5g|&=$i$4LX_!92JN;l=hY@qKHrNWX^(r;N7BFj+&L;H*SX}?AKRaK6)Na zR-N=od;KwhMM`AXRUaUm{zxKo*03vklIz|b&+Fxo4h%+2t;4;2>5bVtEcd@I zFem{X>i8^b&d-cIxUtul0?4xJ$!9m7SoCTI>lWc?{(e8_EGzG-t2_LTt-ZI+MLOS+ z@W|-str7QUixoK=(dLZDjyMm=L*; zN4S?6q(#A2y-jI$Z-&--%BQsj*1C|V{oICia3ZiwJ<9kvYrE&x6I)l%x$PMD?#j&C zCXEl|X>>Q}tHtg9&MG+)!~r6Yg&rMwKs|T)2OMw8_i+<_Ed1MYXu19E(Xh}&*AaJG zy4@&-G1XffNfBL6B`OVPbQ!-0D^^X<+$6`cDz~$yOXQSyCr{cdRVg7isD2a+(lzjj zR#f$|n)U{bh!@pM^iVqbt{6uxq7+~}JwEy6d&?i6TkOr=FXFF3uc!v*!DiZly8W^3 zgv-gC$d*ekRW}WnBQL@FeeT6&M10=8Kgi2RBWB_R4mZD4I?Oc0=nMJ>x}E8)9tyY@ zCH2J)MiN%ga6NuA+3I&d18^c?dt90_fm;v5_lBPB_xCBHv4xA&L>8y6*8Vl<+K&L9 zD6UM_{Vh)MA2uBP$BS0rnxK!N!veDkV)2vnJoOyPwCj_pA2}vNw--5%&dKIF6UD7t zZNBOv=`~&q?54Me4`#bEC_l4&5wFr~DE1xBNIYy9N)z#p&|j=lDhe>PrUsB>YpmlK z*I`ki9A&|$l-pIJX!7F7Z2CSxj&{=`iS;Wx^FA&(T+-;!;KE9w*w5|a!1Lcl|X*7{| zAFUSA4=Z5V{XYod3^YMn$7&6lau@p<*__M54;DmYreJ9>to zjsXrMp!d}yZt~N(s}Q)3o&{4=%`(k4WAv)#47*5ckK4S}nY6RADwxRgslvE0^6;N) z^g?2?o-URdZGI!w!z$YYX*MFc_B_ZMfVmya5^|Bu;FLgEG)Zs{MJk`rfR0VSv^b)# zbVi*Va1clHR&G+RTy#oA3-ax2=z0K&+J;a?IYj4vyqAZeE#rAg za{0+B*_Nc*cs!;A zYWGO7oWI`UYoLqQ6Z6*8;2EHpA9a?a)O-t zx}~u9B+?a%)RTXNL2uk{!B>fe3RDHcYfMT+5;C6ya}yFKEd7SvfRaX@7G1)CcYJ{^ zQYs9q)be)iCk;)VZ&^7lt4F(%e6kIL zRxGosSHDh+^cYdOa*Kkm!_;By#(5@*Upzmpc~KH(x}pzb%Up##52?s?!;V(@kG7qr z-ORCV#&mbf3A{IoDb=2f4^V3;^$}q$QZvag9{JtnhO|`1RT1xFtSXt{B9~){z(zWy zO3IJ9*H}OCJA(=fe_kw`q+Hzuf0KFFRN!+KabYwSqajaCavmGlpl{o-yf%WburIZx zK>H)1Gk`59v+)dD#853YJ=QTdDlHO%sR(!0Xt5NEQvk*{cV><6PYvW#?V3&*O~X&f9P`u?PN0f@#syitm;sycTwHW^ z^e*BQXqcnb1*XWRN)D85CFFylg=2Zd{AcBN2=$Lz_*A>68@p5DbYfpvyY;cM6CC^Q zN`l3Ium&}f&o#UU61{fAqgRa==tr-w@3RtNY50EKyq%w2t}$Uam~RU5lGn+W`wrIF z$39#P;*x#5G_(F*I+hjhn2Hn#%l}9)WCBZ6EUQ@B>M*zz7$q_{W_+4ZoToY-_turNW+jR zf<)ToQ34imPpMeX*~JKw&m#E4Kt@<$N|{^Yl@R2uN=0tucK}MOZPGC~^!!Yz3q9YB zPSqJ-#9GXg(5c)W?MpPWJ^x^YNnBs*?s{oNX$7%2u6P}n<*9~5AJnZN{;r(R_I%7s z|K(jX9$8=l9rC&%g?uZdgS?zNuuu;7_TzHU2aYZ)ry4>c%c*iUpN1KK9q%oYz4>Yq zaWCv~m!YF5^Wv2EjC|#duyuyVB^kC&@!Z|om1ffo5g6j$I9))(2d;)_FPnB8yMEyv zKVNi@w=)O<06&~YPjnk}hIFUEq}}}5zgJGyHTEcBzqA|obD@&V*nXPeE$Y0s>pQio zIGZ|W!?~1qM)b+SEuO!O{qc})K%J_&P7zzNqOOf_xt{AjN4I=OglC_c$fui${Y9*x z+_S|2Q&dn{wnFa{kDHw;tGi!0XAO@Zu}FvAH*ERjCQVtRRfRL&4S|5P-3)m zZuBfCmO%&=<m4VE1 z9Da^&@whO86D~$K8r##6K>KD(vplT@u2v(r-fAUd2=6{U3&KUzylb>T7i;pk#riI* zDLRp|V;~MtPujxMZBY5YrgDxu^@8~m22;BX-0d=zMpSx|I_m5DB7f49`n0idhkX)D zt7?d8pPhB{koO}qXz=N^!XozgV6b~$%>BSR_|mIvvU*doL#fKmmr3>w2@iq8S!p*# z3z>LH^?Z?AwZbQ_t>N7kD}rcd9d$fOIf|&a-!TMsZV#L4 z_I`lu2#-lAwt+(!)DBzrW9E-ek1Ktu&CZ@hl5xsZ&3~rlh~eN^!-ErE>0KrHG+mf0V2N4&xVpGB{gG zw9TIL{z$(Z3AqyR-};@+;ihYku>d;V;LT(h8nVYAZ(n z1uXw#^mE#xeI2LO0R$4nF0gakO?{HRi5=ZWjeAU+9NeKoq#)ub2AO>tCv$Qn0m+R>6w_o3R{<`f`aa5BXCEN8PS!m$k}R z_ee4@ebf=Boi?h~ykgb4^;^6`k|#@+VmeT5gr``#F(?2s7q;ZtLr?Vr{I9PQ?sYsL zlH94>R!^{IPdrLqWG>We9#)}xwLjywHrEigj)a7yR2F-Ksv@P3h)Jvwz9SoIFj1mQ zYSwYfYKld#QF3M{=q;8VD`~a);yO5*Y$isZOgnFo!$)haHH~a*QU0b)u?RD@VMo)n z|Ko!oFW0UGvcnFBOjsXB1%Smad>#%R_xazg$AqerwYchq(yrBE`4`Rjq4j>uExkJP zCl`9P?{rR4f1BfoIc_z4KmD7`$-%BT_UZ9=XJ9AS zF>R?)$&=oOU2Zmpj}rZ=HD+&Hiw+d$zP=gj1JVP90Jxhj*3K zWG>8ZKJeADMw3PNoh7|dgM3&`+vcY`dQQuxgTOkb4Mx!wxwh91h-34VG^IHLu6 z<)iI2>Q9!oMHJh87-7AXGCj11)1_Ii7Ro5nfX82TAg6lJ_25a=KmiZ9+MwK)R#^6o zm|VmoxZZ5>?lQ;o$l<~Jc;-+FsRyNMkxC8xJU0puC8nntX4p<5etH$Jt1#p)Zq4Wu zBnCoVV+HJ|T2`m)71h$xW4$Z$raQsZy-VogR7ez730U+{iyI+@8^6(=)pY@l)v=+E zvu5?qyVx@jdo%TU8oiv;FBNWq>qFzYia_V$-HvvxZS{vYO`91D7g-su;iv71CtFif zZnUZ6+oOhUUEh@!Zf|&0fZi%b9$@!vCIGQC5B-%1A&#d&sG!H*KOS9_0JsH!_dSF3 zw?ffB@BcRX+J=C5)E!%5wxi{P+~;1CCF?d_U8P}ayJPuP-!LRiOC*_Qvg9A{(jz5L zGIxJzyP+2JIbb<9>W@S|e2de!G1T6VPGcL5f3-L*{PExSQ&Hz& zu#n4zf*2(hB4nrE@?<)bvcL!{{S=d6{U(NRFzp&CReP$qF}B5lt?yQ#-c%w&5@Lh; zV&Z_`IR6V(i3m%I#H=J2*#pay6T6#G(SxE!(~#uof!%4FS?eVUvW`B^)TThHW z;4X*_?zjEhh0OmkJhA4TH|&Lsa(^l8&@04KsoAKn>uBE!*5>SO(uaCzMvG1?TIbxi z2)4|RFjE4!a|DbUJ<9}&a(NS7Ql5mD4SIKaqg^No^*{4}@qym8QhOui*x2 zHycGI)L}lmKBIY{jnve(G~Q&EJIP|OJeObL;c-X>FZguhP;>|s^L+G=M@-MTAn-i) z4ZQ#E;VX;lJYkXJjW^Nhl8e|)3n46~yyi<6>9)6tRPQg}+yCVPz#2Wy=Tuy2pB_;) z&A8rS6I!W_R;KkmzU|C75LpIlDNOt8`mTFp-&ICv*D(CaxgKDN4t`VLsHT^i+Qsm>O)01&+gIlvhmMjRnMj$LF~dhg@0_K%f|a>7CV zgSK$uf`@05f5X|Q?ii(l!u>E&U@mjyrFD%?=qH@zzObJ(-o2=6 z2{v;3Ew zi!~wn2=B3QdT_!+{d?|UBK4CP0Nuw#{3jmV_%`Ca=J&Fx-cNbectP2QI@78IU;Mn=_ZHalcRj)0rYjSyx5% zq=ki_$3&md+>!bpU!XTO3gU@mo+CtH?F%;EApj_0(uT~d3Fo#Mrvg{{!)X1FcpK=_ z0uo~qA4S+XX(gB+-;0!(_v2=%(ogCQEt5qHO0r6ooLF=0gEAu3cYicNo$_V^=#!wbW%nu3_KQ?EM5; zL$>rgcJTg0kyh}#j*eys|JO_`^J}lI$o0EoeRc{}Nt}15A4mn9sg4Qh!Tf_4Mk4Ss z!iQ$59t6lPrzrGRydx5NbSpGol;?LB8)}8KE;aoXp@;Q*(zlZ`tDm|(mljQH`qkTW zm?05yYWvg-Y2(Z~(p96c;Z<#ucl$qHETeulq9xW=X=vA!<#S$f@AHi5p)K%q+2$Cg z7d5o^*Y=wvl%I9MawY*KN^0c2+p4m>L+^ZV56hhRlQz)s7tWuHKkt7vqO#szS?9a@ zBxyngC)MK8m(sKpK)Oo+wg*EGXWUtvizlZx=HU4%iu(A_=RHn`Q`67%_5yMbze$ra zPQWQ%)-}vsir3#y9@lzo2;K_%qbCgNl?Q8^Ui8n-lKjN)s!^lpO%r(TwATCXG%*E} zT>Ac{J5Q>@<5i(K$G6bKg${^FNxLa0DE@GL-wYj{RxSk>DC|dmZ2c@y?=(Om)v7;5 zqjN2{l~_@<(-}=6679CXr}_363f(-BqhmW*VKoXF zW&aj-9bN|9T{t2BQI!6-p75t4R*Bx|>pZ7#Thvj#f`gT+Y(nAvt0%wP>=H!t`R7x;AVS%I@1~yCI4PeAj-!ZfXb=EZPk~&THkt!7ltUd4@|wK+UrBKQ&8bK)OQev$7KP6b zstci6EoV|*<-ZM6-B-ZfoX6AB%zt$8{yop|kkY9Z*mGYP;o=eov-a2K&7pSCXL|`R zKRmqLsAAZf{SKW+x~^W5)y#{iU#u?mioFrH9S8c3!^MtF1+x#x1s>4;(C~ro!uzNZ z$Sqp*5f*hSISN6>-x!vXyO@qFBZewn0!J(=)o>lF+q*Ss`?4fphaYIY6_TN!AT&U= zBvjR(w8}~qIvc+=oSmLiCn*Jp5VSPzA-n9&^RTia+|*Zor;x&Kw${k^vOOhc=WidY zQn!={Rqz8f_tJZ;_@D6S#83nR|D_Df{88A%{ByqAyowRESoG^h)2 z@F8HYU@YnZ1ya}p05DaP5PWPS_T&bS7Zyyfv?qQdT4;HGZ9hLe_uNMu^euNE+JyJq zP8Y&v>Kfy=IDug@;eT=ebCIslOXyssIbRnxy0@E|1U4wKV}w@Rn4Ruv!9Z~HBl|g9 zo{V)1UB3fiz{}PS={x@#(nGh@2=q+`XUm==8z%!!x6HPi7ZCUSbEY!jx2N_sY+nDV zLjO-vM~934t1*~sAXu|rsCm~KU{DWD0k>v(m+HrMP4M2H0yTXBtA z=}*i^>~H!k^B0cFScq3*adyIV;a!y05!FI^lb)gVjtll4r|mh*>yryS!Bejo!?WPS{F}tc31lLfiNJkiTx(=u(Vh;VHMRw)y84wpdWZiv zIS)WAgX`jIH3y~${|%`BEysq-1KqLCoO!3Fx_a3u8t(nMY*eV@>WtY_*<0dKvz|3I zQA+S61H~190r23pP*mt$Xxow{dh%20FSzNQvWDniI?sN-+nIH0R6-m|6D|ujZftA< z&JuG$1o=Vm*-&;^JY{cC%c1tIPAEM=>f4K-_|$&w!2mTOkaq7YSIUZln{I(M@Ku&) zV11BZ#Wc}zILhRTCJjMklNUu1bm7-~LDd;xkwPL)J`&!S`48Jo^c8RkFg*OxKk`=p z>qlMzwFe|@?pb$_g-a6(=X=GBmnLp=e%w=&(tgK>U36}fBs&QKsq>k34@LA zw;WS#A#gsJ8>)1_BxCpl(37jgQ{nIk8R&BVT}U?gsQWJsJSoI~-}&#-I)Lsm;mps1 z8q-@^{4Eo{FSlpOi}uwybMJkieTSVI;UO_Q$H)5jFAsqt%97=uLOJgvKvl2|Pe+L| z2h%4bAgr*G9~d46o3C^3X30r*@cZXuTIYcUaHt{M^!jH_{&P@8vG9hEmztXLF{ort zP-EFlIZS#vD^vJFg^YX6osxe2m!%1~PdGdOvj!Q;fQdyZ*+>Qw(H#Z8BrjaSLxQHM z{!vK=oCEw?+EU{GU(kp9jSCBa@6ueu!*@ARH(O(mb8UgS6lD?@5P#BDDpfe6q7eV9 z6)>j;H{*E^=Hy>5so`UDg5heX6naZY$bxJ9#K#v~4Fl+ZU6JU$eKwFLlyWp9CveK`}GnRuz2rR_>5uSL`VHHjXkQgHz=!|{w z?=PpTNeONzj~h*p{C@gP?{NgfZ)L<9_>LCUq`!|@MxneY zc@(Ha_kUYfD}35Ybkx*aL<*H9aTGzV@2lbmjEDbDCCGvs@z&Y;);~rCj{pHAn2ml+ zD!W)YUJ+{pu|O^uDkFCvWHY~#LE@Ke1i&!8ML&hFL9e0h0%1Z}yo za{?xMb?xyv<8})SVt>kQu8YaKCUZ6)OoZ=eI0(_+pRRs#8U&P;3R8B^O{O|rq49q# z3kpaDbu^tP7yQ=3-Gn=Y!Kfy>rNY*343D16@{eH^4PF1b49X370qX=B&hbZt?w4(~ z3f!X#ev{Fwrp>nMO_z58a4FZj21t&RYioUXxf`3BenJ=hb-+?z0?fc}7q<(aFYO)%&H@)gWL{_eujc+0y#<_G3~TE8^!0DI^CpP)Cg?hV1GZ z{l*YrGOW)tg)bTW?>OEJQQ4)`CJPCj4G>6NZm#x!c84!KuJ`>)!LRoATra~%eLD2^ zrVqE0^5dPybL2@buCBlenLxbZ&W-=$_$SBzj*dgC=t-o znX11G{~Fk})Q0hDc6YL5S2B_)TByNY%3?$!f}pK(n**1{ukkF+%a;V$N=XX@?g>W9 zU?pHbt;O)(OGc3ryH6A*f_)z|Ye$weU+rK7*M%k|OwtbU7>dKU#*PtaYx#N$Mr2{t z`@+sed7inzzFG5$15Wpbjp}ze>O=%K)41)KW5lo1bTk5-q>C+`R`l9fxRugHlmi1C zVLxfPnO(lr68ze5_=@0)0e?pQpF7XLjzeJp77{Qo)Yv?e(dXt${fa@ec)5d;JR$!2}3kr`sDB=J`-C?w$-W0GU>)vSZuay@*)!U2?mLt9?L!9TOm;1Q;E{r*BD-nI*Q4p#z;!JsBIncw4V2hzY5&#d>PBQ!>|+_X1( zi{O+MNl0FDY!aA9);Y)LOl@1{IQDFzau2uOCX#Ol|4xL6MudMlLoZL z2Z(B#F2>u#nIf?MWPwSmZMerUG=4I= zTXP4Kq{gE~a*lOJj2FVjE(U-3evH1_4U9D~LZKy?D&e0=))ZFaX` zSOEJ#-_nSl@@IrF9t6r@X=BbO1xPY9_XrSiMRY~75u#sUREIAuap~*pYZ6MZSrZF9 ztHgWhK@!!a8^BzHOO5H$gV8FBw$9Reor}$D!%8%0@$D<+B30Rg? z*!bzrL=j9YHzTLbke3Lbl=s(Ml`?ezwp^c2bhQEoaTz|O(+4t|qFE{aH5Uf3*o$E} z60mSw_{oqLl#Y7SFbl6wvMeP$QESbT zwNjH{J+A2IV(6n|v@L$5tdRoElE(9c`5hFWhZi|iy8F*~(*D3){rGg)(g z0uDOYwe0T&KOhKXwZnNOrU}KOWmY|y^40l14kre%UF9WrciS7qFRp&iC z(#FBrS=dC&;@8J`bqQD=_mkwl5l}rrkYOS=WJ{FuI9j?#F6?1QOz)b+8GA3WW)c(m z%NuS~#k1pMI3K0lOE#l-Tpf={J>OVjeFwq-9@DP2;REDJ957$1L646=CR2-Zl^e^* zAdmw)SwkoR2#a%-F~Om!`y&sSWOLI8Hp49%cCWH*7GQ}@(s}=2`~Vuj^OK_gbp8_y z07R=8D1h1dIiMSuBU~C0CO{!s{O?`9S1N$lkwz^4_mVcS0o3$IS5?yMnLI_!uDcq` zD# zesu!c;gP57zLAovcL-*c2tR;9XgB~3sxt{iv0V9olCjW%^!r|(HFCd9?{=#^wqY9c(@H=IJ!e@o}o+mN<0hVy^ zyAUm8k}isc?jGEFc>}AULlHDyWLt^+7k)*;GiXUW%;*0m7<5ZN5br|nz3oN|#}2`G z)VkUoLsL0dVm;A$v`hnBQLzkeeFfl1X#jj6BO|Zlx6Tlcz4=PUVP3iOh5c!{(W)dd zZlWpcmN!GhL)>N~QBhG1iA^>UI=Z|bV00?0V|y^#fFgH%obbkgLBM#vs(~`)6B4XY zkv(K#{icU6gs|F>2*Trs(uBqirz=QEc)=hyWDi8tXI9F$XNw#hF{2pkEnvB2~63U zYCTuY&z#Kq@}ikGZ)TTi;&W}KGs3=|0M1_0MsGP4{5J4DPe6 z5{e!$Gk={Wo=dG?78vOK2flZD!J*a7Bd2=+I(!fau&oTw+Shq2p_Jw8gVA7Ds|>mI z^3$vURe0e3-lb6XiZUle;3)|bdD+^L!|x&hJakjW!@XA@cjVZgp&Ia55q<+8XPoSj zErtxa14u=#-~J3&(qr{}6`-`wNlfk))4oia4#9ZESdz5y-8FR5ztIRmQH7rYY z!h7ee2*B0PeMl_v4B6wx(dGQFFB~Vtf>2f_nRKXqhR6imsZ6>eb3^!QtBWv*?1RWt z2?Eoh2lKnrx$Y%ssJ1VkRa^XgNbo{s4LNvpd~D1lJBw~3b_fv>k)57D=7Bf#U|@f? zUOjm7VRI1HR)3We8vHpV$BKE}<^doeM->>mFAK;5M)V1H_F$(=*~6Fb&gXGP4OK}9g%ugPNY$a8k2VAs}>4>tY9T=FE1}f9I|E)*nr!R zXm(i>qS+b{^0yLwb!F4ObocPzX)wH$jGLniP{ z1&Y3{+xQB-Kbc2FI&jPM|FQKJTv@ei+bG>B-HmjLbhk)%cXxMpBMPE`ba!_*NJ@9- zO?U6f^StX@t}*rxFqq7FUFUfu3-0cGiQYRZ&TZ1S$Dku7G!pzAmpt4VPLVZ#-IUDU zRVfKtR37_3APFdUL%m(TO{umbgJSyC7O=!=w^jbC-{|~@1l)d=^q^n}!20DCD?JQN zMt?BofBt?MAr7I=q$jcD1=I))rEp-FRs0}{R_iY6#{|BjX?Kw@n3reGzJP{|bS3;6Q_X&k5jhr#@F{|CB5 z055z~riaa9m9ci%>0*OB%~wk4cQ$KPkdg1Go9)#mmSrl|$asHz&fhf4`tAMlq#2gX zs6CJDH5ky{{tTOWdp1vCF~J#8K!Yk`_f(JHH-*Ejhgqy!V#b0rn)n|rz&9xV?RfzK z0WRNN226wOuFTUMnoj{w*RRE?Q2z9!`L+wy+;)IejtGC=nzSV1w|=Dp95jEoI>VP+ z1O$X~RPom+Gv;iTS;UNTZR)KncRh4XWBd5o!J`)!ACl*IAo? zTb4m_+68gpXpq)n7)woX^I;I~ZgAuuL}BbBf5* zRs#CV+=PY&d|P6nLm2Rl?CkdRg&hMWH6zB;!vs zI_%PW?!|JuivF0dUk<&0{0!s!ERVU<4kAE9OHiHkENyW)P#UvREfF)}nyHul`LQnm z9C8#QyXAWq3%AQobY^Cz4@C3KdpSaR{fZOLFYW4sU~ijJVx9~%=GEo&ktDFPz7z3) zhW_Cf7DW7i_h}BuPcr^Fu2(vdkOl9`Rg^Ht#sTH+P3%MlV2SP1h(J{VLioO?@wzmi z<2~a3aS%_bQx_^IIQUJjV6#+ z$)~Wbw#R#?r}{kPV>U-=LP#j?gV3z&8_V_8IRB!PZ$rrczosM(N)O?Si2aAMiw@rw zjUQ7t`XBUwqGlu_p3k#A8k?3ZE+6q@EkhL~Qi5c$%*WCK&$s%yn~CVui-Lx}77V%# zjCSSQZ}vtSdp@ss*wd;*e9oWZ2~ zYFp~4pLT<-Bv6d~g1NhH1q@5-Yib=9jG3QrY*&UW-79<+nlHbX5?5PYT{v?BOq@+Us?_JxZk8=GZ_k?t1BiO zDv<73?lfUDyb1Y{Y(+e#1099kJBVkTelRRxM^ovs3-LRhP1|*c-mOf$dKRVQL$U>Hnfug>^_#-xQ{WF^C({~JqQ@%xLW;x z@p)DP9nNoyROAG0Y#Se6{qtiF^7#$H92*iVI}1ypaO7qmNG*F1XfE!LBZ$^%a?)OL zzA98q=)$P89$Rjr(X75gj##$>wiBFH7#1@aXgq@L-(OZXo=g>}jKgB2QkfX1@@2!J z;NPuZm#BN(JmK-V5Ys4Bk^;2dcEbkFAWICAZmVvpq=Ld3r$TQUSupG)g9gjmNt;%! zO(amBO=h>;c)AD;1x4y!p7!za+4CO3g_!uX*tXU4k~(-yv}0pE5HbT8o_=pEc5^0x zC#n=FZT#6D`tre2iBc{NSF^@Uw8?1)P{g5U3RxPncrYOAHc@tS8vff53aDRRh*(*# zfHB3)uiW$$?z5HLtTZ7C)nd4WBW^7tH@9LoQFKj7ZGsTK``M@!Kn71ArA> zfBwr40OGWG(le0=`L>vREKd6sI@S7^Wd|5(>o7$M!r+MRO!!$e-)|8mHvu-#A%Zt zjL1%kP2Z^!+~q(^Lqk)i;PNM5K2^(TdA_RnwLOK+6`xMEc)tRSgO8tIz6~HsJoja- zneT7Y`Vb)&2hFtte9Hzrt!=#QO4$xCqU%nJHDHvP%Q!JTKlhya08L(gNuaxu3 z5%Z%TL_WCLh64efl3s);z;GMvjTEqU!(WWJ;%g>|85-~2_(B^2*pIB|MMpZGkEPi` zSee9Sn-3y;UnZ`iq*P+ z2|;DA!FZ2p;a1)drTpah*q18zp)VA1Pq0FLL z+0qL~KIVKQV;a(zW46~p#B~6fdj}lTR-@eb(96P^x?5U}E-x`NO5VtXfpi>4JhBG# zpmXsTkrWp1qCZ>J6wo?A5)-QWd|!M^7{w!Mjy>%4>5!~08wN0z_w{4V&Lnk@^XPwp zm^i>p6LduZ#tAiDmHR`i^PWl)!IL1*TMe>_GE;NF=axI8!)`VjvS1BTXN`zu66;@gP)3M5Yekz9LZU|_-9nNK}#(Y=J}z!rk%xrIv{e2f7=Q<{#m9dg+!GEBtc}$) zz}i?KpFXTuAZMFuj7sp4@cM8LRe_WT9|>ke9xFH~$Vo`i3l=t&;PvSe&2+LmwD?&R z0-cy=MK4%bUtg`m<5?^BW+HX=<$6vZ+GMbIHi=nZK&3(tQXh+vu7|u@GXO%W^p6OQ zWNf0u7F5IQ=lHae9%5+F|C#(g+8PP;z! zw$@(V_$Bl`d%nC|VKM{0(os;Fv~- zT|VMv?||^zldYL(bwC9Kt&}ev`OQ*^OuTqJ^Unb}9}{n{PT<3i)z-#y!tHWvhI!-# z6|2MYIIH_+nq~}^!}m%_l~BHIiHk?xfKPQO(ij9mL{7g>yNNDdS=P~w{k=DP|9M0H zoj%Cl@rlK6A@czf!rdR0=9Z8Yd^Ja{GotioStoz3p8E1<;$ znS%|(vT4Ax>?!H>0VqIS*~REb8y#l#LvLVk_#3_jn;+UVT}@QJAy}tsaisBl=*# zZ&Vy$6--FL1CPy!B)pHVA5H7NBm}-e8zZD6rvq_2HuIH%qvxFV_gK|YZ~+BqZ~;Nv zU0q$nUnnG5Y!6ik`MrA0)&zY0gsk<~NTH)Dy4I_31DFO)4xt0Hu8(H=0E}Ms@no?L z_TWz*P?dY2eRoIWPQ>)<-z>oOU^okSaIT+lBybLtSLcFr)#GD8u%$Uirjy$9fIg); z@X7f&1Oghp&E4w~wcE0}T&(t^)wzTFnEf>%H@K|sFawBL`mI$zdEmJ41)>~y>f!XD z%>muuArq0%R&?fx?>xHrNVvSXKdLp`Z?Si+)+&)-_}qa4gXnK`QCVLlE%eUHjv6XG zuO)1BTNcdf(701dZzh`T)whRk1zU^}Yw%wA{DOL=-wEGGY@k_|&$XPcecusi^|?m? zatp%B&7SN|As`_XnuI)O+P-a&uazHv$RPWeD-;pH8^T}v#H)W4=`e75R0!NeGKCZ- z+IS}uUY!ennLSAVPbnMsm&OV+A{HV!A#_MZ4PArX|E2X#5Ebto)kuOWi`VTvxW3T%RV&y%8+)qhwa_yc=^Kr-96J0>O zM#yQm?7(#^+VLE=J*3Qsg>dM7aLsFqh4K&I1nj1t`Ms_p%(noNHV&g*I1CBTqz)aJ zA7|J&X*6JKnlb2HGXn@;(AB*x%Q92 z1g8?_j$#PT_&=mR4O$t5~KP#H|& z*U4apHAw`2e!nv~AZ$=cSEWqCb>blWl!$5Gly?! zII>EUSrK4uS(?%bL;9r76n(?bqxI5|L(j#9JF>^r*ToOJ?!Sfo2AM+WV~MPoU90vW zSZxh)53UavL(>ECU4rkZE)&*zA`?fFf{8!Uxk@$6V}lZ$%^E2_$??JdrJi{c`FR5(zfGP02h;-^jJ5!@ zZZs3N>%dL}Nisyn=(4p;@Iyak9R2N4YfGj8NuCH2WSE|yvl%{-`1c`ql!F#&)KEW7 zhHQ!w(Rb!1vU*7OU?Zr0!SuKTlPPLbWT~AI%TH{C%w=Fankcoz;PZQ$FXDCq&BLej zxF-v0@WUJ0fesElxhE_@g+h39`LPvt0OczeYhCf@0*n5Opub;@!o&bTj53!*!I5o3 z;Rg@Up&cKG^83AR0dJgKZI^O|J^mu)@cSX~e}0foOK`?OFv;MS3sH!;S>mt2fz*KH z9kL>PpRjhYHQ;cwO`?DaeU3pb7B#L!W_nmFmCgj|LUMjQ!FpfnXh$NqzPX^x3OIsGel{ z`8{YU>*crOEqI&4b?TwpDFq}_+oD?!&j`raqUjh`n|ucmUwc07Y8TuydTJ665a z1|I1b%k+tFjIhMj=@iZYik5mCBM=Y(6H9_{%C(BJ$76eCQmWTDRlEYEr+ncs{}}#E z0$7aFQ{4?Ni(~H|<>n!pnlAm~x-i1Y46{Q}1{~`Hk4 zW1nTv3q8mwuxPiv_To4<&0;1S|FX#8Iq9KA!+V_7(u-L2!YK7Ag!4kQJ{H z-@T%Q2N+yZMCD}Q6fcgijEqw#S{ZtXSocdE>Hb>!NhC=h{F=`lXM0-yCnZY)5soYo zFE-zFu3Vp4JIjZ=U~jQd(fI0cg&@P{VrYrAmk~VXmJK)Aq6qukjhOyK&_m&&ggi9; zS9n>R@jzV99EbIAyu5Ggl9frI_FZ%r1yWMj`;l;qcye>jI=k^M zS^bNT5h!A{IE9axY(a9jKxE~_pN0~vRHo)AGMT$jv!+#{v09~@>R zo4YI+XQ|Zom${BFYy~n3VSrqXY$vI>9cqYj?3;)ntI{lYfmszZWO(Pnp(B@OB_`}$ zxYrp#c9Z(_())M)xdo2Fyl4xS_W!(eQxL+!!k(eoHd=K~s>LJ^TrY;zZ3Jq?D)2)L9nmKFvH z7uNs<+t!u1`l)e>+?o1_WZH^|*{7xX`Y_A;mRRcN2ii=7BwDH0w)psqlcXRKIQtw{ zkC=dc1%Pb_rbF>-wGO+LFdNO96E@r{#~}6#FE-h2O3#>iFVxj*ETSWTI6oRaj$4TJ=+V`Qky<@@FrCLVE4Dh4UwFN z2j<}J{(fo^!~|2kai3r7BOR~gGWDR!x*BDURds#+Gyy-T@SBs)XI{>_q>n+U;h!~u zBi9X+P7k4u;Da*1nS+0Ys5fi1KBwvt+=|7+<_wKWY3{=FXIC`Cul2|!v>#Jhyx~73 zmsnWA^KsIxn{dMHVhDuXY|xzjAQ=hO{$TU}Ap+A))-}u8XTo z?_C-e^z(>!x7P!>a{{G2*>HerkhI#tjAlp+?&Sf{jqKOtDA?^+9zXo-=H5s5(^^5C z?K^&0pvx&lO!}Q1P_KT6n`i-|g(^(^gA{1J^5r&y+k3=FXI>z?Pg zR<~;DWI@pA{90}{kl)LXMZ*>vY*F)(G77m$;bhuPUl z+a}=(?5&7zL0Ht@$y$5(27maz>IKWTavp!ak+T_x&xPUc5rLjjv|;^jROO{DZ?`_MI4;Y3Y;)JX&^PNU30 z@aWuPbU5;#?k}GWE8{h80r7ckoO-5# zFc%L`w&kCEfR(a!SOOFBLyAD=jA0h|G6&z@)AfvYUt3`?CQAH@7h-tj)WKG z#>CWMBCF8malW|{8NvVf&#BQlj$R4W+_CP)ei4-9urgDu`HtlM)WC^Mhu!n&-tVu+ zR{M1`aLh|KO|d~5F?VDgjf23>NbOu`HC3HC;e#haZ#2#th~wejH@NWdOeTB(ywmCo zx>S#c#Jj=1mz~7kWaJAXR>@45a!Z_JCDK51+vr+x>#X) z!{$k6TI_K(b#c*{g|b(NnLyx@pEY`DG&DO++MlRbcCS62rqJ;B{p@rdlQujLHW5a3Rwjsu6t*2kisHLT44$o5@!M5}>r9ot0!@|>E z?_MbIzbU`yk<-Q6oG)hK&Jry7Q^uX}8ac^kvxOH-Ro$pIWhF~#HQEp>-*?TuK%j#S%Ul3)On|gUBR=x55qw&VH z=X|46n>430!CTH35X1bTfnZ5lh#u`v4>38^`74feELiVII=P?N?g*C%?8n|u;?QyH zSmDLJ=ucG9I`X~Z*(M;1nT`xKtO;`HN#9$2sM}=DzT2ra|)o5G8;{^OxQj}#7SVVWMkIsx1Q|ye4qq; zO&twzub$bBVtu_l)Ix*pcv=R_t3q`AxJFWI!I)OBBRR{PNx%^-F z#xi)rgF+FqZ5Es=6HBR=78YX85#cPDg@4YrsLPk6^CqnYrBlgZ6Y{y>CWataOR#sZ zuIfPvlmeK@#c(2}AEf?S_|Qi~a%rW^Mel&)e?h?uC_g>}2Zn5RvE5*3bX{$o{qf3h z&lU#aVjR>t$tyHf0sK0PU_k`z$bALu1OJ03eT^oIk4&$=A5HiapEI3Lh41+Fg>#%E z0tRlbOKMTu_4#g}_dFGg$o)4q9jx}SPY&U;d3x(){I{m=**%`te|>~Hv)(QJ#{_3K zTuP)aTe!W^#%nRx7`Dm*6!4OW_RA_C-#!&_>ZHz2g}oCYIcbB*)=Wi-+KRBoWv_%i zSyR+cXZFaHcK)8_fhU=Bq}^nJlFZEVWDl;4Glzu!u=F1-KzAOH%X`Z{^BZ(&&4+C; zlvZSRi+DmBggepzlKffQY){Uq)@8DGU>%V`3clLz87eo1>_S(7t0mPmJj3dB$X7FW zKmEuWc6(6jrQes4C7koVhpc9&nBixeg>XuShZmk@xiBm?A}u(Z&v9RG zZy48>Eh$~Mg9K_w;Qb~mc4ymM&4atXg^k5dx@P)iZe27*?;qp8WQJ?F1ZG3Cjv6O- zMJaFAj1J!?`R}47$qlQ}g#(>8KbN&xzr+1zSgKcgTx+OU&6LiADPH+)IbR=5ujOuK zEr(N|Cln#{8QEKdXPZo@cMp)x^p{!@BiV@Uqkf&$O3dBrIze(58~mF7*H*n*2}ng9 zJuW>D{<<&))rFFG^q(Lhupts_s-iF`Bw|9~?5++MFb3jNuUlY`p(CTPT#sf&>kelW zD-DFvpC-V#5aCswkVqn$-2NJ- zndszq{hVlezS*bI;wCG#6Qh_j(7CX1Ut>eXEh8)pPo^2E{+QS3|0*DMMsB-U8xUeX zVNhoiYCPN7VFcK(~GVCF987M!J$*z)FHV^4<@cq59bZcieg zB45ZLa5QW1{lTPRR;AYHW9Oq`!GKvgmNiH6NCir>(E=SH;4eNW?s!$(T+>r}CJjvG z!Bc<6wRcFbv!3f}Jm0FDBYyC>zMxud_YNoIcJe_54JyF`gFp<=ZU0W2&m`aJ6cjI7$t-MB7atzfD9d)rVr8NyAnv-X~TGV7bPWZbcJ z+)c9I{MwxB`++M`$$V5+A1#7_eD+8Lhcg{M93Wgp0Yp#SoY7+Rwd=ag=LNIJt7U6! zlpyN)E0uhpCh^gkHTH9Z3V~K>;T4PPmBEweRi-ost6v_z>twmJJmu%RXrsrP_wmL; z>NtJQm;9WmNHr00Ywf<`-)Cd7NKTj)a=FZyW>CT4zy3N-zv?EVW=g%fk-rZ@`#0I)WgsRMtljMO@}xGIuNV%j?Rvl zigKID&E1drcDv}n44HQZ^?N>jhd2bu6+PJpm)8%G6J4`g&CDcC{*S~wAM6UaBp-?G zEGFBN#<&&BOy>HJsVkTpg)_cDDwKQy2GNF!wQOneXDgcQc#W-QNCHC==3irk9lzCt>YIk{0Qu(LJE%ovO&Q!1|9_0c+|=Q8u0IbFc-MoJbY%7eAL5B(4zI6z zTJvWIFEP;|SXn`aMdP|2uum$1306c#YGFKn#r*I8)wlkmJcxhnRk8BcskRc##L!_n?Ll5T79LK?PWuachC>`c&AuFY&BgN2{_SU zVL9crrE!ZUGl9jqP%kS8`i$x~fp_rxv5atDXfzOD(rL`(RJ$eS zpJ?+&)z_rp*3`88HJn(xH9e$^w^%#-Z0b25XVlgw^G#@$L9SZP!|L|?5! zGaAt3X22uVS|*YPUVT{R1n2$d!3Oj}GhDBOLjt*t_+KH}K!%A@6@d~MAq4778z*J{ zwMp-pUY~JRJ1|0*yZsy8j*-p3%PpRXn-EH(g1Yv>S6stR1f9s+pC0ip^A80SNZkAs zbwyNuz0 zTn2&xI(xZgV#{Utu6X<`^a&Spo)XN<`R*$nVo%2B;!oDZ_hw^1FQ!0P?yX|j^0+D6 zP>X!Df?&-S8L{VfqEN11GqAM{9w!mBD8ayHK8_&l*yE@RaY(mUDyuCX#_xy0Ir`ON zlq2``%X71^$}OCC#3$|^>al5qHnM@p4oASIO7j8&_>rX!PEP$5`SBd}J%Uwr?$*r`g8JI|2FoT-G~$Pl_}do47LO zjQxvwC0Apn&-f@#%gwZDI%(&Jee{i->g6A(VH}*^I6+=~ zLhIFS?)mCZw~ytE(b&e(!<4j2c@qcZClsQ9sO|?Li`A}%$-V4r9{Z;ENu=liz}zte z-_F(M)sPndKFbvHfHKmW?@`KV2pNd-RY7E#&-(e+z5QRskO3a>m>9MO75^ggXbhCr zqZtZn;Ng}xw~lqIeLNKlMegStUcmqr2(i<}MV<^UzZYh?UYnH@iZOCfFbNY=Ibfhz zc`r33Jm|Fbi{Hxt;9jD1#WMiONTw9}@zUAJa3}rwt5kpon|x~`_m4t}D$h%^Qk^Cd z=52Joeftj6ffqn1o`<1znjGY+AVP|M0OeevUB(5)2;wRD z^q~8<+~6fIdFPhUTK$?M8a6Hyg!?sMArw1$P*n-X>;J4DP>ofBpYFdFH=6RnNT_j+ zhUX2IWL$Ee#S`UY(U_}B=&EDVYw5Ar2hzzIQ@C0?x?EjB++cFHDFgXj9IeW?m?vI2`|q3|Y{5>uG7wJ6NUl&bPIwRE zraNj)GgWA)N`~aUqy8MS@Kxdme+AiAB_v}SEBcW-6RY>r1d&pnG@_z(2rM+Y44`Wo zOhkTdj6V?!k$IjqLuD~qw1|j8P;@NSug8uyXHZDv(e1xltSBkkMCS`A)M7PUrQH6V zy0pFXnS=wng%j|Nz@1Fz`E!1-no)5?LSNk9Tt;u#_qiT>E9+c=xt@M}2%>ea8gdR^ zo$KQ$U$|YHiw*s7_^~8Do#0zf=HnR2RM9l6U8e=%g13}?``YDBSK18XRl9z4WzXQM zDDqF!(J#oG0*-@ECH1jIbofTUnyj($2~YRiDR7tkbBFj5Rx%xivT?cGSd?;SpOF-6 z&u=d!ugM?4u^pL&yP-5%E%By!O<1@xN+&ZFLmyyzh<~-rks0U5n3a8xrA1>v``S&6 z#+vUL~TohvJ8;Yrzuvkc0%#1?Jm*Q|qn5vginR3o`&EQqiAWUj}KZ zHrJ!%yXS`+j>R^w>%+k>6z|zgeuKqI^mn}a_2)>&JKRs10vhy{?P;d+a$G)-2#G`o zon*zVPdlH;R2e?IsV%HaihsMshmXz8%p6Eex}J4EUEN_;Qz1}c^`iWof(9t#L=q%| z<4FWk>goY+0J;6IR%Y;~b^t;W_kZm)Oki;Vj2tQa*2zUh$iLie*NB5*kY8Xlt!&NB z+wNacF-JCt0g(J3t;iGdn0~J)*R_xq=%ORpG6d^i&4`S2=c& zSSnlfgu?S5E&XE#Y-Ac+Sx@W5RpHNTZJx2o*K~Nubs@;zg!6UC7-4uX4PpQoGMVKq zxJ|Etxw+x5NMtqyqV>31|F!D+{Hua=*V}IPu^DdZL%GtY$u12Jv}mwY-0h1s^T`_D zXa{+!;#y2bRTT2hFVx4k;Zc3(J0l&1syrQ=jpUfs4!dGA8#xofZ*UVa0dn|>xVBZYS#{pp zl(NawChq9TrP{?|Mh2o#60=0hP~P6&>5gKVm(!D9Q`mfhj07h=dTUu7O11RtU)Tra zjiuCg5)M|K!vT{rpDl^|d(P$da{V9TDzuKrCt>|qhqQW2K)|X(W3PWk2Zu{(39in* zpc_l^TuBytKd?+r=F1@l&rynC=j6u=2kE~HyHc*PH-x0gdVlSA1!a54mAYK84s-0? znAH?r)0Lqq4klSDeNebi`SBr+jrgq1@t&u-?%}1TV9F4rrJ@f-(61Sof_dZM;IO{8 zrs!T!ttC0V;B5Anj1?gT@IduYq7(dA*$N1;*&wPJ3J~Dnmd_3@_Yj~FX#^W7%$j}o zm`ZI+IhPjZ8zCxhZc#z-@SE4o=K{VDA*RH{8tp!sAtEHtut))^+5;5EMo5jwJHhvjR zWOg~&uD$4MiePvXqymEDkkrI~J{7U%fIJ8iqbwv}u@g5|5PDO)(N%-J-p0f)B4E>! zoax!$>~d9{=}ZhCXg$!VGtp}dXHd#U%4gCkI_|J}K>~1j>l+PkIbGrIkdFQ9$BR?* z>JVL)${W)}IJ)gGAoCo)Ts^G9-ureN(KIFz0k7>#~bwp7N3dam4e;vwbSUgpSm1U?p5871E*np}myV8*uVYV(Bc%O?j-=DI59eojYlyK(b)y&?K_&)2iD5{{(ihXxsGgggvHhQu{q+!pM$;JEed7(9`;f5pd} z-;cCfvo_^iW4p$@34Eqb>CQ4{486^!)|gA?z+C98#yG(PMdEJ1Dm;+CNuj(o5VPZ) z&>;ohC*il&o+U9!mmfCq>vW=qB27md-`4xO8|%uZ#0Ry-2NLFzs&v zmLrgN6wlRq-|F(;&nqy4o^-@XNc-4HS{8i;pdu2Ij`62DsfUtc_QTYO%r9K_;_RZ9#PuMQ$e1XnTWrJbs)s<2jg^Uc%F9QD82ta*G3OGGUOz4Y}+UlGYp`@!4XnG z`o@vm29-q1G%}svMGAf)6MYzXr!YP?cJiqa#_;_=KkHo)L^6Ue%Ne@o5|Ev!Pv}gIgL5pA??0NVBzi z^C1ha`q|YML_<(v;(nI45o@=+5<_vo+hoCz!%;#A8=|7?SRT(}0rfhf$!kt2x@{6F zLqFK85*A9X<{JqXoVF!8#*Q6cuHgG#v{7F}w`rAWe=XIoV$@K_De=88x13xg0yh5i z)q5aZkv7av&5k$Vv!);WBhP#wgcYFjRLHqrUjxI&pP^x_<9h z+MFnr>Rs_KTb_I&HH4V9Q6ZQ)niH7?X^tMp^?0KgJ$160-LlUb9?T|5%o!Cg_NT$K zSJ@t8x6wucamCVz1(qFtPnkFp*XZIAuJOR~vfkufb`u@$M$#K1{4ht|P2)Vb|Dr=9 z5++OcQ30t6ZmTafnXFATEMdq*Q)0Y+#o;*nAb#j`XawDh;%u&edj9pKb;^PF!)#sZ z{&KghBcCFAUsJHt4C@NWfBeQQUyWNB`_ zq?(vJzFQh^`|mV&2!Q;VED%EX5#>Qy@ev4zxACKf@nHV`5u6}FPY)6x?XjoF+x_fe z2GQsG`C{wUXJhY{aXS!X!|jyiVJWFnvNW-YY8(xF{_=7}`LR@|__?C0M}Z?e0@UuI z0f3NphYknJGUvj+llJF}MDsPi6mB*uet%r<={OW~O)UEmu(DtGi!K$)om|tYB0k}| z5+SB)VJ~~5iu_S4CriR;_r2}|(w?jQ3tk{oj93&cv-FhB(;X{xxT1;6hq08cI4q+4 zC|A&t1THA-1uZSEsMl<|2?hHuHL$&(#1Fr`@6&My1aMoKG25&ziVr@A z`w2~f0)Ow5`l(s!<)Q0!_}EHRoXT2rln0kXdUpbrD^PnEks6~`;EpqY_lls)fQqG( zAt+6H@sO}T4xq!=x6z-+QA(!EqliTxahF>FO;eUrS@B|)p#MXRSDrPUT#eVMmo1C2 z%NqI_kAq5uY`9+f`gZo?^XBFP;{H;5!KTM2ELJg9bVXgUQ8Kz*8=hv__l5Y)8P2h0 z9w(Q-*A=o~4*_C-{d)*?fw#jg_#&rG&b-_*v#&%%(CF|$*v?U<#e+|dGwAWrA>@Sf zD^4d&mDYM^j`95KphK9EW#mxQ-1so@j}D{uons5V>DZn`s2Kc0le<`b6023fF zIp}<=(Sj;V+d({j0&=K2ZJwFbA{%N!_s->X_RV~!$ge&hW;&82I#<3DwC9Ja>C{kD zgN-Fq3;uOoG}-h(+aNP1iJ<~2SKrsiXMLjzvxF~Ju#$?d( zc_Zp5_O}VZF$aFeQ;TDrJf_}8j2?m31t&u+>gCo;ThSvCjwy_4uI?U87UXuqPevC) zLj+8MU7#I;vxh$C+G2h2s#xv#7mLaAoUtazmU_c(mggw!ECSh|El7PM`|23?H^=m8 zRy>yYq>J`xKHc#Vzfc;*cOhVv2hOnd%A##Po!Zed7nrh z-E8`E(IGKgT#h+0W7C%o%7DmOnUoK$f~_QiDxf*z38@bj`A@snqF5i0%9=!Gvts2LO~tFU8xnsQ!n#<7r4+URP90 zr_Didq|+Q7X+@PO^dzlr;(w9T*I4w2*1Jn`x}*?+cAtA3?YXj$f#riJndq2eek;&C z^~qb-BLA(|?EJxem0{{(-_Gx^h={YyiYu#1$ol;Lk-Mq{;oglweTZFo*09^O37k5{ zzljAgmgZ=~xkm9S6-)9hc4qN`&JNZE+v6!!4Ib;N?r~`N_0k&3Bb-EP8T_f2 zo<~%NqP0Z}=$J~3aSZ|3`)C?iy#H_o@1f%-{XqsQ6$L(whI{B)=;*|XJk`5{VN|K# z!$Uko8}6?*D6~}U5zxP}xs?jmLKpqX|JXEOBLq(Mw!&3T@6oVc$-&+K4o~7X+DCC( z_=~c`j1U8^Q*!k0qW>=o=gm%13S6gdFM)ylWX8(?9%9=XNAyNxZ=9aSJH^G(s1%}^ ztjo)0#wrIvO(`n}{k!rNRs>FKL410CrRrA89^V_!aD0AnU2gM+t$A2H??dN=pTa~ZTxj#g0Vwq%-S173=bKdPvy;d>!`yzaRQWN% z0r>@OZO^0YRbpu5q@%Z9b7DbLE?W2yLFFyRc|vk$jldAxA(J@s8E--W`tIgf8^}9= z1p+tvYRtz;txXgIp@2gy7%273267&{P;+!2(G9rqK1yYqjoSQc%$bzzR6?b%91b_V z`A{Fq-~RBBEm0)M5L9Eh1`iZ=km(rvKAjPV-`EV`$3sUMLWu@PO?3H;GY+PW-lZ0f z>V8XA)?IG$>@jEnge^wTs~C$JEoLz!c9mY?S>R$b@c>F6TF9kXRCyvA-R0Y@HjbvR z&u7gx%cP&nWz#MqJ_$%^OvX*`Mt3js<9hOE9+J+HRC`&l6X41r$mqIj3zfku^y$<) zOEf#+AL7#r8_bLYe*#(d>vO*~0e(xMw+#Tm**%Dsty#9oVB|9t2CsR*0I1VoRHSJ!ORTLOkY1+2E)?l~>K-r1vDd zo_4eFb$pH91WdBu)>;MpW{ti%mF702DL%Js&3ExfjSzBqu>s!*ioc*>tJl3w%lFOk z)>E9|*Bd>;{r_kIkb1sB(<{`PXgV{c+R_>%1NX6+<8y6)_J!yGHIv8rXI;~o1L7LR zOpUIL`u6q67Xe?CXWpC0vsQ5#9MPq*^1<}+)?rEjChdGMeMrjc4H6^WJQ?MUvX)6; z0Y^xp2@>f;%n`Z%B$fEvWst%n)A{|lQD)2egyp=n_`Glku?Wf1oNXWIoc(I{JL^}- zpLMQENU;&Fr+jV9)8<}lE%N_htf$iiGL3Rtclx2ld-569^Z~dfy;)HCwJ+;#`Q603 zDu2)fF`}a0hlvnS}FIXa9<5 z0`QvmXNX9ki`Sn-6yPDAn^HIRiMh!Dqitie9Sov&6x7)7^cg|Chb(I?@AS;EaHX>8 zuu8AZBaZ!j+~(beZ==W9lZXk*k7|SMZ`^~)6b*;mGapMec`_!$CwOo1V)Dy$>U!`> z6tX_(&z}Wb|Iupn-%Hl}lbttgIPCMq_T?0pw-5*A^pAgL70`m2(xh@9I6m>L&X5+0 zg=l^>YFZc+bejVULmxkFI|b1e9@ABS{VRn-u?Zp+?vsa|V2D`cH3_9Hz|VCBL@fFu ztjOtfIIy@`d=O4y_bhcA-6bYRW3^mm10+blm&+xV+@0(Kfacc2I?LH+eqdZjnA6FQ zlDU3&xQ5wbFoV7QB6?f4LvEs6;e+0HFYb`zwIboQ$Iw*Jb*tV8HXjK=2LvT4z+*v= zo~$%SEBc0Q9AIpnvf%M}Leb4ec$ik5>1>*f$EFC1H)V^RqCb;DAfuivtMj{TB#J8* zsbPI{V#q5V4%_R-(qJ7Ief9S05@cC&7Vvp}4&tm>TVF|audJ^JeyJFi`Vol?rcQ00 zA-LNk9r~mY4DNQz5=986nDlY2zR%dQbhGNdve{WmrZMWwdRd-W&!-gqFy(3$FfbwT zu@N^p4@d*&!2?$v@%7Fk1FPv|-J&?J4+J&+qbwSeIWD1WXP%qE1Ggl!OolIueS_EG zFXO4%@4~arB5<95gW%0HWpO{i02{Ch=<5ehw$haj^z%*YXm)G;CpQ0Korqy-t2nPw zg3N)0RjZ%X$k11Bbe^bA0JNYe{4sjDRS}ZS87kJ_GceGu_U6_^Y3cl1g4)XsuDH@Y zKF%DSl%&d`nV1L(mk^w*eRDHQE|!^E3>O~9Ff#N60T~T`Ckg@TJ4L6`^m{*YVadze zLJ(Fge)lkpGW-9r_SR8VZe82BG}7ITgoKoIw}PZ}hX~S0cZak=2-1RdDBTT839@PF z?(X=lEuQDm^S$pFzi*81{KWvzzVE%(TyxF1=5>wur%+{Z`Q4~u_#O9k?94QUMn$3H z(8+x~SRSL4A4cT392RCC0UN}@f zZOp~$dNaD(@k(Rk<3Ek*Vs^VfV}h^kpGa+#heV*YM0&Qjp@b|LfmOGDsL1R>GAfpT<6w{7a z3rNu%V=E_K?%#p%t(Vsg43aB(GjYk&{4PvV0+pYy(eK}(q7M-c;O$pU%YcvR8c;oz z4fwMziWim#MXd~{EY$wl565~1y|d7fk@IPLK7V1!D24y1mZ+$h5~+UOb@8Yl$f+5IJ~}1!D!)G&Prh^ z)!2P|OijlYekP9+0aL8bw?Y=*vv+~$pnvzCMeg90_Ptn-up!l0XzphX&wz1s$pFeK zt2{K_sBw#Glod{6xQYkNk)j$%S7KXx@5wo@s zV!T4Xlusu6U~164)c#kmtW3=A0k()%RTt{seCYji!u+L4-jeV624)>bG6fj=vlG&1 zHUkCm{ejT5W;B6+!w&JJ95j-Ngq>4OM9bo3;-p(@!^qK-szSb4F{JE9^3#TU-t?w* zmBS=9T@U**R}X2?gniF3<#o*An5eXx%&GP{3ouCDSGTm3WSQfaa+43LenLJ!u$`15 z`=1MD2s;n(s z)tzABn|xWiL_gkZ7kr#~ygHb}T&!Q8Ssg~Jsbm3%K@x?StZCCO0XKsQ(G$1d&2k&Z z5x^I<)oYBlEty(}aEKD@!Ib zpRfJX`x+Y=JC76CUTHq+e+g@qd zb*M;2wN8pV#qY^#{|1J&MAG34b$$fieb8$z(CJf0Q0pZ*m0i#vpUl)B-HBUsQ*I{n zPb51=Zd8=_iKgjXG{3(*C2lSMUvLKKs7fJ$6e_drfj#=4i4`Cm_+6lMkb?&OyVM8= zw#j2e$H)X&_)NA2b+9{KnAk+KG4OYN2SuD%Dc}i>!9efZxdcNTYJAB2#WD^a*MLv4-Q2P<+mfV@TULhGWw-?)g zm-j>;W#B259`GSS5m6~V$cK&_QK;tq8}4D2IHVYMkhAkiO}kkR&$1RdDoYhl^o>9P zg$=7@B*1g$^IiwB4Uw)VLFP-PB3Lh&$8YGsbjQr@-jyu0^8~a6JN?No(2nsJO2(^T zpfXXq6E$yrjnh3v-^we3v_UEu`@$`gv1os=ERT76y596)JoN67S-Mp;9eT3aT+!oeB}rBEv-W<7 z!LUJdpH;%(r>9mO&5zF`F|+`A_WTqc~dR7P-yO33?yn#Akk7=I(4tu+VIBXLPs zI1w=Ai>C}pi()j22I=OqMVXD3zwu9`x#6<69_&pkJ%Iq;!W3}jc!apDlmxdovrADo zuqg`~^Oki=AfxlF+Q!&XM!Z<+O(sPhVbu`2+1lV8X3^+a{7@uVAwBt^v)~cgq%XYb z`TbBWNhN4xyiai1Hv56AOR=pVeNv(LzEj{ngvq3NOkBYD9hbo;_K%ZdfeXmE#<*TY z`I~c#^N|=00`DtQXFET$m|C2OV_9*WV9k+XBd^bt#k3S>4u$7dw+6y4Be%zw%IXQ$?hjgI{a>wNs>y zLeBeiGCUe&9=+ALi5hy2P7vLhS=%;CVTrQzj!%o)bW9l4`Rd%waXpsQxtk3fCBnqT>+R7&eOW zbR2++V}_Nevt2pCT!J(g!`yCTihyZwQCOzsTn5RjSqV%{hD*nzHwK`N;oBBogw#tu zWaRmK^e%8k+CoOH6AQxH4wrsnJJSv%mZaF)-0 zjmXJ(lxH&WV`sWva$`bKhS@36b1svns}-H1JMn&2OK~cyrcsAbo9Iax$H?|f7`Q@+ zQVhHdaa-WCQSP^pOLt2K2}hEH7^E|{7!EpOq@KVWK-4!`frV+YQ9(KF7}nxV%x8<7 zKTR*w$r!d34h=kfBzU}0)r}=J4^R? zXb&(6C;C8a@eEG}fQBLFL3;<>n)5e&RY;y!TvD!1ok)1X{Sv|)Wp2M<68-B77LQ81 z3_hJb&gjI4uJ5OB>)EA^GMc3;!}uNR#86n%gc&N+F}^x)B?W)M(?Mu&2|$&4Xf&Av zh-s>0@4ev6T4?aSKtyNCE9m`h*A5SKXonZiZ?gnUrpn~cMhM~YELQetDql@kaw_<$ zJ(Eq6UqlG0RG<3dD^rNphXS!ATATC8?zc@?I1G<1?KfpHw~d!i=de!vhg9l}9S_Mx z`WKJ#Y<9ue%LmK%#Xhv#2s3Pkhavd*JW|rz39 zctS1=UsqMEX>C~RWmfI776mT6fCg_N8~de&&=XKTpzlw+#uel(QLh)_(CSKsn;jmp zzawA7J*h$k74@lSprN74cMJ<%Z=V1sgbYH)bKGKX04J$@!7$ zyM2HL5Exu;R}_;PmT+h}7@z!uh!<&UJ31iRI6b{tNGm5HOiP|NX%an5QZV&)W|<9>vke zxm1G`JwyLWBV5vT^CxlN1b?ur_9me$f{E1_t(ze0r8Ww)cl)3yL%sEXt~Y3G<_e9? zE-)Ot|CiWIrWyDyP>{X0X)6sq^L-b+Jrgwu=eg>Phxs9aN%L;Bx@Ee>SI#iP88u-Z~D^-^ zE(|<&IzVc3pE{M7o=U>Xk-oPT0JEKvUa^Oiln%{+oMT1`ZvnRx5mEvQ(9!!tIJzr(_-2ertG@;YD#~H32a(F$v*0 z))IfE0m`$VK#OVbX(IpKzr5dnJ>Kx$XT6DK_4gjrvK*zuN>bj`05-U73be`fzKw)x zAZZCkp8^eW=t3lO3PFVH`A%b(-H8r5dDAA>fnR{uyRQ{-863x~7v>j+9L`8C=O6Wb zODZlLq?7lLx45jy)*o6HyY{PTwT>yiO79yZj!@3kdJG}8WP!&O>fU|zg(?8g5(9hM zLL;lN#S+7^Q`z!qgQ6rF85iB+@H>^jQuE2qU5rmZl++}&D;7$RDJmDKnc|#ma2xM@ zO7sSDTuDRA5554SE>5c)ew1|XR0%*)vI+2&BIJy z>+3;rE)jcRR2SO2s~U3y44)YwBAvYgJG>5l|{j04SO=qnJgT=N>zN(xwPT7e1P`#={-sR%wQvPJ3tFcGx2z{0Ms1E>dXki zlm$XIbG&rI_0L$oyz3K1QBlzk7YA(ho(GDg@380azJ6oMl>ffLX7$kRYCWnlNe+OD zhCV7;aVFB{zq|B4Co=$TLkYTYDkcB=!F_YP2vY~K2-NoAQd#ySI>XeK=Fkp1X^cMz zdZ7+U45ZA+vt@r#KmLJap^=9=q%dyILl{u4*LJl!Byrs<{0bmZulqCG1FuPsukH$Z zdUkYBOnf0<2ss=SOaSA_y$4g)hjX=dhTd~(1wz1#3PycfH|ZF)i!;3RG3y|l^K|mMDeyUd*%jdNN(7wI9p~1rAX^}nK*|(uNJN-FhUW-*vpry!DRQ(_f z$u(JxHQUWF944u1%}=qxso+b!*B;syiF(zeu&%r!QURwOq=H9o78>|>NIQP>x_5m= z^*Y-`h~5^3-9d`k+wYJEVLnYeAVR|fx!ql#z9iycIXs!vOLuG_H}q zrLv{rK!1I1g<*Y%<4+bvkb%<}LXq*j%V__z_s2u7@}r|rLl$<|^Re(ZjW{@E(SyBCzXw}R%Pczi zWb}PU>}RDHP+#pfqxeC<1sR}kekAu$KP7;&?5JCgw3TRoeyL%x7|FF5<_K4dTValiYlpMEq!dN;sxtelcW63RD7FxERC6E#Z#lBp))N3*Cdk* zui$W##Mpsfc;b(z^%!_f3~$@J3!H|DTh8Tl_K)^6rs z4N4Z%J7He&I0>S>uX=n9+_wH)%`hkxkkn|(TAxMNEkr4F# z2$J}@>#gKRvAcvH6_Y9M;?S!(?=>1W&O##0cTY~b?D!qutFK99-E9RcId^pm^K$VT z4$uSSCUIG3vEBHL^s_z2d2>%8=c*(d1Dk4fGOt3h0?9F7nz6`q=U5?8NVQ5LXyPc` zNNiG3Sh>Z+b-u}cRHGP57d8} z{Wt;uC!p75ppBGUuKb3Kz{NwO;d{fmqhlZFT4n|?w~I)a5M}d6Z2iD62~#v77M5RN zU8Tvuu|mEC<@|u=+P=DSf%wx!Hoe}=lR|C6-cAP)|HklD{H)jd6`CM_64S%*ME*>f zmdyvtXoq2}ch%cYMqBykigu~Y_wRi#{yI!uypVT7TkmzPR#$a= zyhpX20g@AHhd1}Y<%Yy*2DBL|75dr^@YsPgAEsl+Sn99h?|i119;PS!PEAc(HgJK2 zH|4uYf0ci|p*4+0&$hTybjBeOgOt>nVOI!&hMp>4*XwfE zfD{D_U$uR1WhM098-H$S5JG`^bI< zXb>K6nZ>V$SU_w*Hpz*&m0Q+&>)x^SX6V3MS)EUSa>ZmLd%t!bD(UB7@x?vw*UMl* zp^8px!{F#>icsD?QRuh@{QR?>%}M|wweab!qr{&f>wgn#6e+#gE=iQ=JKtoz%6Dkn zk3&glxHbnHJ_`HD8=K`7&GP;8orwSwd0-XNZQRjZ4&LIMjEn@O0YI7IKk505@OMDs zpT^K!+BNWH`N=>3{~ynU{#aV=m&E%tiR*U8p9nyn= z$%!!5-$u>>XjWqqOXc-wf4#m`1$dT91KfwSzj_4eV(o99l?wlZ1%TQ{HMQ*j_MCET z@SI%@U{^M_Pag!`ei9Zn*e9CyuKr)|ge42DchHjd_58ue3qH>T`us8vPxn7LV*dCR z4l1*Kr8=&v#6XEXv>&1*!$wGZ9sz%s92^-Jvy?u;_>(33R}d}(_H1_R|M>OqH|M)Y1xq>q(oCDe z_-jx6y%)ovuU~E|I~oWSL2J46ev|Y!3xU<@JutDl-I`?R*7SM*1wIB#QwANj8>>jG zC;4k@W*JbR!M&GM&O-74O=ef)?INl8cjxX?kKm<9QUWtLEH?hFgd%o`1<;Wmn{b-9 z?>wjrK6SN0RPXlM{qK3?25*t8VnX`Y!~9@TVB_5$@d$`w^(g$3Ec36XHhQ&VE~h%U z=Sc>9&_e36`Y-T@*d`Q)dA!3>@{ed7SnPIme%ODxgj}eeWWKB4!QZ>dp(Cbt0>p*=7WJ-GLd21|h-%d;mETEgx@0Wi9_{{$}&9__ezh>lO zkyxeeaG%k<+rx^MX`ueT-4bT+>WN-DOAjrJcgEn{1^8a^)9TSsMmk_-VI<&TnVl(L zZog(PRFUxc7NO?9s+3|-`MYVtf1dvP-B|85H-`pO@uS^(mKrf@bZ_7jmZ}H{e>>;W zFu#dX*^8!g(v%X9Wt80bngHpMcJbdwWl#aSU6r(Y&<^*XS>ORU6znsR!hio3g$VCA zvOQWlwp%`aNd+K14Y0{V|Kaz>#tE~5o(dW#!BVc(X*SR|59ScumlM;kV<=lmR=E>#$OId+xqKeYo~T2jou#6tEg) zU?zbl|HpkuW%TMh5*($daTee0-xBGt#xpRZWEPed78cA#G9w>82Q%7l$U=YL7!_bu zq>4n%W9nc-UC5h@6$EDw#sF*+Pwb>c_%)04el=<<^Cz)~(8HHxhn4W#X#eqwV!^-x zGsTYmzJUHQAwN2J7VJ~34RA-{)U|N@GNR@;u&>F_v8*bjf|)~vuP2WHYsBorh3li8 z*t>ulDZZYZkKor91Yx@!oYOUeEbrkOXLJ8_st5t_F>SD6WD?A?Al?C^?hEHj1*KJ0 zvQP+4i96ib*qAb7Wb@Z7UlX^O76C&j)n*P`vH~+`93?@)VhX$=J%7y3Lf_s9O2F~l zsO0AO#e_4uOHUbUTA@vB!;Epev=N+0w4%*_34m4FiDwN>>)KXg@5i61`fPy{Xi#)@Iuxv1x0> zo#5?1xZT$tWN4SDXkEP%j2#^!LPwF_EvTR!)EEvt++}JKc8tXo4C8crqy|@HN6#%h>*2ZU0qjXc#tzRQxZz zL?A36K4xKiJQQ{l%JWTjai8`){vfQk@AftlgE55_r9Grx_T69n`iNU_Zg!R`*>Yuh zIRt}TfRV>h7;6TQ0?=*k>7p<{ZIfL)`v-+-wF*cnEJy5c);4hQrCsdwV^GbNG9G>X zJW(~da(NZ=C@ldOEb%!kNIw82zRTR)Rb92~2I4%!Bfpn%`B`#B=Kv3$ zctP?POPGDUZc+u!elry+Tc|2Dkj~ar`qOQrv}1tV#H34PpXo2p?N7vF^oofR&8vNu z)bE!PI2IB-4QNpzL%i4`d%&<)X`+BfSW8{eOxWFANP1qsyLNnVLB60`{nq%mZ*F5t zOA+Q48V!a0Zu7?GF;Ske4*~=8-Fs~g7w5dmgiDb`Vkld`O^|5&^!TqkljQq6nwmNr zuGCz}c44V}%3u^-!++9PJKkqy)_=LIr@6~SFRjPX7wt3t88N@XM{lnn+rg>;jw+g7Y-;@0u~)wa z=>5KxJ4XjOJ`zbCBZ`ytdN?>^t0`6hj3IEcAyYvK%Mf$@YEBrD*@H!^37-Ezsw9?* z7Z8P?0$pFE9r)X;|36t6hoRU{yLFRm=Y5pFRub5<6^uN*NIOEl`*&^~$=^(iSOo|^ z8mp37gKlT1r*6R=kp;$>dq%Exo!n^Cy7MjDVCIL){hQhZ59aoC=b=NJ{nb7K-?MK4 za@_0b?z+6~5uXgl7vGHX^fTqeLC-i>;}bTZC{ZWR=in9@ z*gz!%l{SE#fYKM@F=^naTk$H_%LWDnly6lQ>)r*e9OXtig@+c?s=Cti=H#HT(-L`? z)*fWgVHf2R6*?B1o0$!otrRCaQ>~!LYQNE`zSXd)m5m7XaiE!#)g$b?taRvTXSM%^ zzF#{W$Ba4A`h<%F4WQYGo{W2~mVXudR#noNYoOZXp&UKsUh4K!v17^i;#mE{6$0qh z%2#^S3qvNNQt;r2OAH&E7r{e7-Swl~TY~5~tMtb`E`X3NuyQ9hm7MxI^`T2xgF&qW zuj`sTz9_k+!Elt((9lWS?>9ixfnT@Yg-Sbo zanLMJ46va4B=RgLy8&b)jzqZtiEdHbwKxX;ueulc6cIiIP`MfuTp+h3J7qg6$#^7ek$$odF(Yo{aLr z!nJtzEq0H&x@=htQdInZqxn2+08Pyx^%M1s>L>h#7gV4Msrq`h8-j~g+_{t-I7(3j z&D%yM+dBSEIq{UC4IL(Wo_}5Vy9d8wl7cHrPgfWX^~x%kO9b=IZ4Dywy3YQxQo+)W z2klZScxHGUoz2_**t*tbWrCd}gJo8m%><2hDK%7~4aax+nqRq!4tQ^0jWPRLQ646b z@0^8(glN3+E^y!63gY_xCWokr>jT32T)_y`HXNJOOitE13nkwcrZQQl8oXL}3cNR( z+y>nzjkgGUAO>WF5Me|#(yTDkSwJasD9Vmlsf-#gwb{7Qj(>Uj zLkTgO=<4~2?M)yLpwvagQ70xSnZdrg%NHPF1P;onyjVUOaLS`z0(VhmiqVC4p*%WFgWx+9^vt+;3F6XsMp zm9CI;haV(n)cKxLjT+YI5}my_>d($D*Koa%jz@IovYujMmah%b((@NtT89W1uJbK8 zaXHJ6Hebx1gmYIr*#`D+Po$mk>06K~P3M|;*L&=#mZ5~QL~GSbU+pBF$6u|s(2|W2 z2LXEDxfxld-PLPaP$5?O!D>FO$7qlPKT%yF8xT~>thtsPdUD^W0K~Op>t7vb{yM*Y z+(0wcSmD#xhW`dqP`|H1H@mu8jp_H)iKl~!a&XY}cW$WQKZfj0^qEpqQ`2c}NP43D zM|4oWv6}WmR_?BEYa(egcTQ?pMEOSS0AmWT#rLWs(1<- z$PBw`h=sgmm^-y4qkQVkK4YHKjR4rypuyCmpKiUjxITKP1Q6AIunh^#mO{S6vD zeUe8jljS$%+VxtLRzzhFU#IO*S)*$eB^KH5sU`{;UUaA>)GE5U-83c0(;7@?(WmRA zgm5&@gP#41{Ppe!Ds&QR5y*ANh_)^EY_8Hj%GbDz`l8#y(<0W18U!obI~+e9ZD*+- z30TY&@qWI1BWX7|W?^qJ^Wd_0zwi9DB>U&1Tw%D+cXYLUEAE8mCeh@73eu-pq@MUz z95;DPD@h-^3#cc2IufZ<#~QrfDpsOjC;~nQwK8$5Jx8#{d$JY;q^J95sFrwdk-klx zJIj7K{`O7mdD*U(LHkBR$(&wBvC~L6gt^7M)jmt(@a%^<*-u~i$q~EGuWkEG^4Lgt z{%-*Eh;!HY9VhA2Sy2$yoZkC;*YHZcz;9V@&}!iRy1bji5uO~*TLCt-6DDmA+$x!0 z5t7C|qfF6@OcoqSY1eS=x_B%j8$AhmnvjPdhJxjsw|(h4V-L?}fa!>}&IxakzW*y) z--KiAIN^5GA>&aaS530~0+oX-zJ#ZhrtrA!n zuYQiFYcn5`UtR@2*M}*tsEejjm^jo~02i>r!G$`37>d<4B>f^6PfR%yC zVP;{Gdrta|qm zBdKybjN+;RUsTgmTz`~zQ}!sV$?GhD!iIF=JWhUyT1F`)loNk(W|17ie{>}1VE?X~-$0dbZvSwOFY^tJO58DjGUWG=Z>|}0m6>0D@ z-n$16SL@oGXBw)OmI(0=jeQ{5X(6IAGLvGAxgokRHa3^hfW$HEUFwv7_u$iPPr^X8 z`xX7Q_aS^{ZO4XY$*#yL+)$2E#0PiBOydZdM@d53)+~^8{!}5aNd08j+4i{wfXWfU zp_dEOJA1hR(!fV=MOZ&S>p|%<^a(K~knN+`L@Ug%WV6SVakIR(EUxyVj}+@z55dm{ zOr1E0)gH6nyPko^xMeE-G)*S!L+@oyOp#E=YC%|4c@1xW^DnmyN<_x!pSp$rKHxmm zM&Wh>=Cf_|6!^ApK*hojayM~)Kul7lkb2W$YahYM`9*H+(z0M6$EO{Mp*l`@A$#tYZQ(<(M3ce zrBzPTehm!0#hC#uV$Y{aUAqX993#BqPK#hKpFcpk*$eb^{hpsHbjjs%F7b{@lcUqs zeS6s9aBY(&#eC$mbN7YS+^(l-2!_~fj&peBXG?s-8-c*ott7cFg=7)9<)W!bz%|!# zdYviYKe0nx!9ZA0*pAU7s z;^;g7J;gU_9y{Ab$rc-W`f*Z)@`W)8&_>Q_P@+V$?=@#{8Yk~JNjkX?$f$7fH2N}I zaw4yh@bIO83yX1N=$}3Ao9vF`O`vyrEn9I!+6}+7VHBJ8}a$}eL@t5$lcCmXJ&kT>0EJm zoYvxjG?5ICJg*`CiMpc^cqSS{pT@vaZlj9%m74wKt||QFOnV)U$))Kn-%?ZgJC((T zbm$dQsov>3S^!li<*Z^t)-<?A*qHQqI<^7$VkJk>@$8K$EO^eU*P5D;zLIRek4=m&;Pu(1w9PRFBDadfrIiOip z_(bgLNjyD+BrbR?V+G@!JRBMt!@0<=ZmjV9sZZ0`RF5fobzMA04+b zcSO@)EQP4#uVV9*>fTUTj1)`1JMLD*D>VbQXnl5A-?D4Ex`!;5yx+cRTML|bD;yCJL5#DszoD~6@dxhUPpHFzbdtmtZsBxla&nzY1 zWO$K(?y6Yt67{DJ@0;kXV;tv`N(K9~OKs^if&RYPH7=b!M{LF#hb>MDQ7=EV_nd7U zRy?3Be@&B_ml_-!iv@(JFqjj1xjecRE|h4s_wj~&BAx@5Y+IbOH|jjt5Gj#*%n>!j zFfE0SRRnD@BrtUZ?%(cxc?@J!$&xP29jm0Afka{#mv6oh>g}pr){^eQACAaL<(8Bp zko@ce4y(+4Vmx#W1PZBhwD)NG8t*3af-{1?F2k7vDlMVDF@T+tj5DeEcra`=1$z|| zIaI4zXDwcjiJ?(W2$7S5;5arqsnc1{@K2!6WO@+e@M1le@I^LJ(Yd+?FROzNq_4)b0i~cU$6)V#*+Wr`^9oT>wgar^{ z1E2;hVD@?i1loZA6UtR9KzT!_lvCwv{m{ao0vi}TNl_jO-WmSW1f~Lm4}4B&ru8?? z1%zx-B!)pE^)l!(UteY)9eZBxa@1BiaeG8}!$t6~6l?oETpN;GAsh0ZF82YYXJ7F*&mLo69v9J5y(v-l!DH1Y6TRFGY@~Jrz01xgd)uUu&4 znJr#|+gI)SO|m^;hSJ$I{2#Yyo7`@;w@9CykiKeG_+}ToJ#AOIOy})+abw`cdqgKe zu;7hEZGUjih+b5H613wtJyzvd;57B1;e27IAi?3_9AC)yxMZZ~%zd9w>QgEOql%1y z4b{G%p@?rtt|Ao~m*DePaehzQxx>O4CwYrImQYt7;y;(t%5Z6WzPil8C{#){F>}3w z7;?0eakf)hplitX&~8-vllN|2)#}eO1Y3iq{1*(V+=fkS-I}Fg8lrobk;DRG0wbX2 z5X)-5z3VA?ankJn(?7g26DRs6e(cQZGq_8K2ybXLOR+AthYO>f3?;jsh%Y5Mx9v1| zACMd|;l)+ruME}~d9Xhjr4Ac7XiS;3-yY`Z+j(n3=PMbU&y9ZG>veew12{lN;cME* z)eCjxnZ>XjVnHTia@ssS@ zi4naz**c=Z)2Q+?ei17}Q2V+_Tul!ZlqR2$V(U7L2G3y^l110iMiyhH;q z36m_^mezpU4!SCrGE>H3}Pzy#2sn0P6i)rwxjY&JvUxy;2lh-}9(=1=# zZ%9&J^qJpb@t!zpUph!YBUd#SRGBmVPT3?sd1yOHPB>toJatiFLH$IV1`{G}E!4Z< z3aB6r1uBuVIEjXCOD#x+Z>!^mh;xg$mS9!b{#d zhhxAMc>TBz(fkJsASD~){j1L3Oa^~Vc#dVbKv^V;%gVo*u|a3=Ad~f}U-CW5K6_|& z6Son!lrzkDg>~Bl1+%D9`7Juje-a34a9Gh)xTRba62IVe$X&Vaw2nU|+FrsKG}yxN zk9lq*@cyz4mn{SHV3Y1D$48O@Hx5Ut1Ad&&;5#NNBawhXyNlx`FO_Sfp?ckPgEQ`R z#PTCUH9|Is`9K^YpaUEFU^)#O{KSBJcKio}7LOewPqD!RebdE!WZR+-4o5@o(z@8i zSk$N$RzD{26Zupx%+#kI`XE)SWUjW8|fMfk67DjC0L^J-@OU3rP1Uaa(>=+nIj(LbO0;VfPoIpcdWUGD;?pX9{;47tH3-c}nXlm%nR<=|6q@Xe)}M;Cie zOS##X*d}`z>4ZvWefyh!;f?{Thq=N2v`8B%nd(K$ZcaBDnfhSHf!$-7Ph;~tb2lPh z7&OvWsWMgimwQ~M0q(Kp1N`W}AO=(a;M~@mJ zorotp#$q#!NGLT+Xw0X`9B6(2w$!P3fAYPwvY6UNm|M-Tb8Fkhw}fkwx^l@97)gA2 zKF4`RAw6CBQG(-u^P?WZx%Hvubo2ZB@>__`Lq9AaZ`>2J_f2Z37b2QlLK+zMo?f1Y zFW+8twT4edd1aFy2!GED-OtuiF*aIATFQ^p81A$gEJmGhb(9T&zpMU<7W3@V_cFlRYdzTB))uKbV z^VCbQ=YlPrNkUGZdJZ;4J94?5d)zni~eclqV{LqV$?O zugaQ?AsNH~GMVkKl!5$I7zzO~IVZJu=OGw80AO$hSSaAOHgE=^t>T63i&AQ)WESn> zhn>CqPo0FlH}Qn(4!JG5Tj|1Qc|W~8y??D#eK;z;IZ-cJVc1_^!|n<3>exxG&9;K3 zcG67*h`f9^}G9f4dMZm;jKwivKpOL}ei zEEb4=(O4C9s~(9-K`~Fj$mUUM&Nmxk4?uooE$Bs&$t0DqeDrZ9mn}JWz{v2SH9cmG zl=aPVpB3ofqJd6^M3U1QH{z?e_teqOmJZAx=BnwLYl%DIs8hcLgvn$z`&4gD3B0b( z5$pi>&zLOWii;JIq|b}UtsNYx@hBG||4h{fn!Owzt9L#Ky`+*ejX*&%XuASn@4GK< zs40Hh+wUkAc?QZv%0_wg&xs0p7_lK9ZD+dr>}A%w85`);=0BCfu#qA=^|ohkN4@;a z{1O4JLXJtYOHq1>#JM%J+T{$kw$=;tl@yt0TjSeDuQ}tU8>FjRUSD~#kNs5Vh%AKd zNX&ry2+{pr;i%~Ru1?&R$gPyR69a&_FKRysd0?gfoRF_$o2e&!PyS_>TbCyApP4|dWrv;gVf+q2UTl2 z0|rIg4+bUmizfy`oku{{i?d;&6bAJE>w$CZOfLJ4GyI)i z0;mZv+_ovjpp8GYSfQ6UmV$q7{TPY69B_~B$eW6{?vZY{NtCoh*HZDE;CY@gras$1 zgk4`+)l05Tx*0L@LDP%Q3Ze}zWpQX*29L4u3f9TD%7xE)Ggd=)5B4}M`S?*A9rwpW zg?yw*Z1QM{L=fEY0~PhG#|0RVQfabVtRlzQEk=3J4mR<#8;w&y7cL|-+Xt0$0f=ha z?+oK)=F2#hs83>AFu6+Ai};=gt!M8)4#hwf4

    `Ks^i5!)B&^Ldr|Fak;eei8=8j zvjrCaz5TA8t1?GK|1*3?;Ej9O?l7Ek-wUU{a!m|UKNaEbVwhUI0^)7US# zyqFPbI+46uo{@azYCA0x8e((UPs*S0#Uei7Wy@ncysi1%57RnPelyR0X#Yx@N?1;LZF+#QSZ z?ND`im5qA=C^YpBJN>NExZ|jYwXH-sZgU>LyHSBBi~(Hlefz^f6h@*h;z~NI-5x zb{yySbicE|n4q}>nq)%pY>9MRyk3`RYPmXGm(x8f1K-PD*c6oxKp8x90``L;W(VdL z!h!Q=I`y`)g&OKu7dRKFb?xpEVr`hB7QT~LVDNCTK+>^DEI>ZA*xp%F<2{x*CiZFA z_U&cwB4uI0$b^4(4Qt=-K>p=2z>G9bosVXq2xysV3EwMJN=8)APMb?QB5AP-O6#}z zS$Bmw@vxBIhRdlJPp$aAFaKHZtE^pnNHNL10xg=xsF^Rif?>zl1J)Cn>S2UzsV$4O z`J6SFB#0}^=Z331Zk)_k-C%N$e6_K;foN{c^ssfp$tDy-I1Yv-3yJe{V6H87zMfPg zNx6MUT~+AoEEqvv1e3_B?btA#I}1O$Vl1??h`~feUSgHmr;$}0YOVWBp?#*lDtl>JM*>ciW73cs}~60T`kP(O;R%9RwCIo`TsUD!Kh1S9%+E`sMrG z2WUO7U6@;_o)vcKNGT8qQ!bLCSmQ8zY2Oaq;Cj(IcG&?45chUl^a0U5sp!oc{i9Q8 zXGY#PMfwu{ZtTuz>!bGqQRQ2J!%wBc&2~)~``&vlck6c)Wq!h4>MO%XHfP)$ydk5z z4mysAwmL{EWmNlg1@PE$9p@OUQQm@8`7V7)tfBO|%KKF)r*yIpUO2xCONq+MHD2{# z=+lj+p(}5K(c3cf#Kv$xIL{u5*tR@)X!YZ+jlL(%^@&s9c1B#sn=X~ijcYI=L&5=um=A`x*}qx3mx_5EYjgesqOpEDHf&A4qoy- zzZy@c19ot5W0H_dgb0ff+&FpGkIH!-vkJE) z$eaTVIw~arE{U}Bcdm5zD>^pp_HhLJU6#f2~t#x>!w<|l?(($^L*gdUq34MY#> z6ioLe=K4Q1cF!$#UfEuq`jYnQd0C?|^fKiVc3y->bmG2#kN?X{ZWZXtYXVI$X%3q& zF08X7O`(H5#N76L$JUeCtD4nP*iZEM)OXzEpFe1Sqy0_P5%pJ%(5D0=hbJa?wah7^ zY1+M&q&y!&2Bcop%6G;JP<85jN~Vw3mzW>sD=P^U&(uF=)Tpg)ag^cSea+}V^F-$% zI{(2p=4j&jJHQX2qqIL83k1R_h?I=B?{6Cfze|2$xc<3U$$5O*On-@ZLhncaTogX= z-#F0PyJH`|PIR+Dh(|)Q?s;>4m2MH;wtZ_}{#w6`1&Ga;xgF!>eQLBTjfs-{j{i|P zxe2t{i5{Ae^`{77#1z|n?31}|{$-y?Rm9>)tjO&^9BdJlD7Wk=nw%E|1RmyPzmD|Biqub*8O(uK1}=JtNCH0FLW z+s4turd8=s72h)5%nAxMw!@~X^-lb5QADAj@=XaV3XIhSbNfC}>(wcLMtsR(R%{7^?e~IJ@Ut;97xH#U6b{b)6C<$$ePDi^BiEJih3`$@MeNtXQoF{ODB7o}&0%5@ z$d$WqptyJ+rOwck9hFb-gr@TTKhoYZEUT_-7ZwCWq`Om)6p?O_QV@{tQaVMtK@^Y_ z5Ji-h?(UEdY3c6n?mZW9-_N7(et+!m`{Tj2z-z5F=a?hUaSpR0*7HTmuAH4Nm**S# zMhu*q0v^v;-lyINIS{?Q-&k(vxd!VrVm0{@ZF&ug=Qy%H2ir}c&VYU1&K1#^YVxYi zL%`s-sJSLVobi_mA?;u46qgmUb4{DxlZlol$7(3}EQstx?m2VYXOl+8>WykEEQSM) zfOf5oHiI)q`_*jvtxWYo*q0vcpu2vn4=o#k)G}iO&UNguJ#5M1Z07=ub^}{H30;z@P ztd1yZ1@pDvafsMg-7nU3*s|5dM;va7Mb1_;sT4zn;uq@!2{Z3f9Wjnq`i9{bWk@)U zqZ24UDD3m3YL`&#;L`pm2+963;Cn3Xoyzv|7rWuoEjUZ{lo7Fa9A}11%2~LS+81~f zw}y+L`esQ9T8-W2fuM0;Zq(2A)!qz2b^F)v0R%7F?ROoiNuN<|6p1g6n^=UeeP=)9 zn)O@vKcPfmof6p&@rVQiG8k94XZ`#WDD{Alj{p_R1+fIl>E>CjeQWq%G%Ud9D zfHC|#0Wfwu%3vTZ}wjX zFd(r(f!nXl2yi*k0Go`HzNs)=QU3znM?6*(WZFpQ=BiQqX3#B-U9X&}eOCa>hlqb1 zP$x_adVEdL@mHjqCMzYpiY3Yc5r@?xo?h-I;38H<3s0l-J$)IA^+2up=TO+M_hJ$ z*r=P_p0_IwiLlR(uF|)U`6YMru;9&=fMwC(ciOlv2K-ZY%B2Bflex@RhWA$r4FNHd zerx+u>B(CFQ8<{aa&d@+>C3-q6C>}0&tzYlmLI%}wmGSA^!X}8VyNjCEJk~x=!1?! ziChIRTQC3|uX62QZhA;mkDWNji}4Bnku>S-K4D@7;uPq?ZLML!!DPiske+?|KOMCd-^7~d*zU$&hG!wck6Y!HW8H9 z(8?c*#TT&`bH95r+l-c~Hj-=AjSEAF@q=Yf{+>%qdVt^d!R81mSyv2?ui=nLYr*S< z`;5^Uu5`sh(=Q{d9g8R=lKRE7%Kg=!KG=ct^|r6CkX2c(yvC?aiaeRwRn2)KXEF*R zg+$w9NuI%+4A{;Akwsf~@j#yshIfj(ypQ;)-F3FF^hJ!U4vo+PvEmN%fZ8gnoeekBEKV2WORKKjc}jKoL>Dm>`+#kQkz(5Bu5K zGAqLVR>SbD(GNG@BNP$fpw?}|ALY~+_Jk){c#y4nw-wDRb=C2_^LbZW?u@peZ@LNFF4weLraoJ@WbpnCfGbv7l>Xw(wQcP%=O%5FS4gR>^m z4?ghW1}$@PLOMf<)gtVEm!j2UV`HyR_DrG~iV6yL=LPYC8|&v3|Ii_x9iZtzuRx+J z=+7pk;)lBF-FGe?F3BER7CXSHvoH(y>a`;>Fza{C}M)>ssJ2T6L|#MJ(2Al8XIlObKTjv)=@OiVS_z+Z)GC-EIXzPEBMoDMP*aZNEk+ zzY1v<#djp5LC*UP6a1ADOBQ$owe>~po!>v!=vLf75T{>Nxo#7K-hi+T?wNO?8DaR9 z_zae%Z-6TKtT=;i#Mn79v^wMq4mtL1m03xFE^+z9 znKSNbRTE$8AweJ-RJzha;WISR^ES;U&ow3=(^pj@N7!v@t#>Gun%`{)yCZ``em$Jk z>Y?q@Fah`OoM;<41R*zmKLB>y#V7 zgL!mG<-v9y**^pG6p{}6>5cr{KAekH6Be5XPI#Z<&;Q8DOMNplYUO8&Z_p8Q>xYby zK>ydhb$~lW9|T6FVgK%H_rH`z57a<&;zR89ho=(f*@y&`3B@h^liZi#yS=69cMtCzH_Bxuw#$ zel&e2iyduk7^4|&U1crtDxDqpc|#VCBY16+s*2OP^D#CPHRrum^0$Zpjx_r_PelA! zDODY-+P$xc-qrPF4})ED{!1{caf9RU)J5~(#HJCNqTM2J0()9@Vn;36N?vzDLFzM0 z!*usOyX04T_KUGfybo9pdN}Ql+yp0uhX~1Y7+D5AsNT$<=0vrusxY%OBocMTa;G`0 ztxG&4*Gmv}9vznVmNrw-ooSiG(dfK}&50{4r6U!rZ#mtU%)DSan)x<8%8}{g;$TU) zkfl9H;4Kk6cv6-WopSgYInPNlI*_5FK_oDb)-ge;v$5_xQ}?w7TM*Ai`qHMD z#?^YZZIa7n2N@)nJe{S9YFSph@4V8I6F|(4{ zFT^{hw4rw@Kz0xH&iYT_t(H13R|=m-@=cPx5BP#)b-KT*qR5o(N}63sowxWx3(`G<6@^6KKHX3dArpwU&5zK;lC zcxpoeN;@>JOUwpi2!E4RdGXQ9@v?h!!cn#++f$uxkb7)&8i@Ke7lcCzPrfb{IyS*F zMpq4eG%y?SE6SOydA3`lQ|0&p6nZH1W<*;w9+5pZ>`dk9{HBtVr#s?mu3aTOBJOz4 zK^s8hrh(?cw`~0Uhb6iN{|oYNZ1@SX1O5=CI3dP+kmmW^(JjG9Mw)URBzwbUvaDI} zED1Z>hu0{U>h3G+cSSUle~UET+nUfS{oO~#g~4nYfLCfTXj>DiTA_Ko-dUcGl?qSU za71(vE$aU%EpTEdTmlWf(1xhkN;6>ea_q(W;NHB9q7#Dzo?Zsn3+DmK?p#BwDjt8L-Gh(0yi z@W0evi_p3O-1WNT7hp{$=%hVGH95Tyk%+fU#O3*tk%83O>luA?{AKN=n26!f;Reoa zBdwMT``$kyFv3oC<~|4iVIXwYfL_{ob(R8?IY$= zusiUu_uVUf6#$n**vI$7@(mVqiOGdAWQG|ouUY12fs{_-f;wIy2H=&L0+?v! z8c=_W6CDnF8cRn7pf>&hKQ$F%M#}jQ76ACgQR#Gd?CBWF#MT(o>yS@G)biR=-{-Nw zn?DCzO~m`!@-Lks3F(X-)iS!jY?2(_BPFPrB{det<)--{*lN#hwLMOhFH=#D*NHi^v~extj6LelFZZT2Zb+1~mJgdo5( zcmbfG#>xfX3p=`56ke1iAG9|E!;@MM+8eeO&%XBdoRz={zZ-F+gSX^E`}sRDR?!6E z)6u)Jv?pwbyktai;kgpi!7ZVb3|}o2!QkkCrT%1%PxgY>A&T&bgG;UQ5ef-blC_vv zFouG7;?uMQp3@rlV?q+b;vU6Lzm5JbqgyL%X(>FvQefkheZU-;_oa64D1kVVl$HCxE+xlN%S~^hqmnP-q{EI?WsHFB1mJA1mOKi|TK4b|g z|H?5%ij=G+tS9w|7JxaFVM-G1`;sZ_^`u+}n=K-1gsT~K!&@b%lP4ujEeE4g{3xw> z7-ZGNk$ba0VGJCBdiJ(PBfWpMDGWS@4~Q!%M@HQbHn9rrvT3pgdJ63%_LerY*qg`F z+_!YNH0&lo5#Yr@8ln5ywuJOI(7`a>Fn!zcWb>6G2w^~T1RR_zuO0)~Q67CKj zy$`53nx@NZ@_69lbww+$htpcW1ke%UyO{SC3$3#?<=M$+xcAV0oBZaJjPX5KuiJDR zPx!4e@a&z8=uv~>;DBfU)-K6?l1}3E1^j$jtq9G5O#vTJq#%48sQm5D;YJ+by-PoD z*qzLEGSh!^L|`>p+4Pg2cZ$Sotk6OWFjZ?ABg+-=SrW?P4V=BCth5kU|fUru4cIOfnMB^avvad|AjxiiTVZ`}5! z2%qf|uMjN~iR}b=g_i8`l-Pi8<|{qlv2S|V^<6ZG1}oloyv z#YsC`7IU2m&dm+)6|7$$x*zSaOH&6YTYO9WW+Yp+st+0}{@)U=LVp@XRFB0#b)&Vd*6!isoOq<++N;P*gzS3JFUIJGnnhT3neTQMix%y176#i zkXPwc=qVuMgW*c%MV@aRTU`!zZe`_x>J%IEr^#|B5&$xK0w6{ZP$Yd71LpHx(4nDB zj2>Ni3G$dlMS~1)iEcaFcDvvvv;kIb-o_!R+x9byI{ChbI(#nlxLq*`4tyd+wws*z z9V513L;!z*Qsmc^QA{Ru68IG*T0A2~> zRgi9Zd2~d85UlUg0RXX8tRbx=0fHekA>aKvGC1-1f;4J2j0dzyi-Y)DvWPBqy9UO& zDmTeD#wxQS>nQ@LzMCqG_wxWTXI}9jlh+vg(xvmrqm4LIt-Nh&KfuSNFQqZ}1662= zu7LN68)!zIsH%Lule^KKnfm0gq9a8^m{twPZVx)PJE7UkKC6myDx8Z!qH;i$sp zq77c03(`P8M-fGLbR2R@?a&NMKfG=*GcPc6*oH#u1Ua{$p!pkT&g<1pGxSpoX5-cJ z&lW>C+FIn9(TJZ4ZL)(wd?B{Xg>VuqmW3c)cj|t6fC4}g1m~0$=5MwGk%>4_4h{}_ zi_+Bmb=zk4c@qj_{i5wHM*ZZx`S*gU=*6KTcZiW}2e)YT2`XtYLWlY?Npm+sIX!s1 z-rLd6OF9MdHjRoEzK0uiN!rxQTf54Z;+=dxRZ~Pxx8$gk0nD^V`(P?`mI9W~bZ~jr zwv(9cSp);6-?el?<;8m9oT$zg_K4h%bi`OhIClk(_!nfq2B0tuJgZR5 zP`SNc;UFsR`+WKk5TJSuV;K_iyjQL;FUvPecfw}S;*&fd{s4=83$WKH{E57MQM&{G zd{;Jvang(TA36EAOvn$LoW8St86O(g-Th+n^lSga8+Q_f>Mo7yHpX?pukzdz|)9rCrphKY#^_xB8;&T>qe)20KEA z2Gj)KeZWlP$jFi(u1^wCDJ@6y>vgAeI5}N0Seq!u0DV45Y5AD;23-+%#H22K0I#60 z-~-@w&b;onY^+R1cJce-&VbVzU(UIW{K2SoEayQ8X`E9QQm;Yh{v>FsK@xSkXLh0K zM=)C?DAj~a^HzoGLC3&x7@!^_>q$_C2SkHoQ}H+cmGQ`AjSErMstQYKjxoN5aEyc1 zWSphb{-oQ*x5F#Tkv0CAkabN@Duv|KrOM$nn}%_Ft_RB%(LDq-J5Wjc*glP=;CE+I zMhEz5ku~PF7J|uYcW)!vSQC0P0B-`)ZvXTP1%I#MQ(u7fCgP6DFGI{6tG1TfCBqQv z_{dF_Ki}p9fwr3`^6jwS zAP;mJyX?+i9%vpdNQn7HM@MgtS2*}K&h&C$511J$^f}Y*qQE2J)$cWCpr}tC1evi67{wJ@B!0H&afzdSa&T_i7nfe>FRn2dvoutd zBW!`9uKGhf<2D+F7J$xYQ&3DkGF%c`8^hGy>rQynopf5szo~2dJ4< z6>7KT8SG0_BLO2?lzI%;_TGcoIHAX-XHdA3jqq$EYhk_%1J6nZHgio+Cgo?*U)$&x1z}2UUNQf* ziyTb4>9xM8J9cn z9q^@&N(XE4;_h~D3B?!0TZLANB7{>*1B3TEM(uhC4X1xS19Xq&FOB6I6P|wPRTsqT zzbhK?c1aU-vo`@RoSfcesk}kJ$LXNqXB96>Og6v)qAM}jY5|9^>GOUDdZ|hDgf+up zW`H=;$dh*#07Oa8WzKJl3UcD?`iSAh+bjJ9z>if3xEvsmMH!dWnMX)9}=TY=@DL(90#QAF#RiRY{ptWCVK#fSZJXtUG(z9 zRq~_8+BP}&q#7voa||2rt3Y0t5*(f9aIJcO8PtY&uou>TU1@*wzm$8XJt527(OZ^< z9fYVsY9GUaf&`1+J5G)q4LbgJpea1jS!RRio zrO~7aWwPzJhhTWhM=op3tk{4yawb@^8uaewWH!E$drgXXxMheowr zIDSuQjRr3u19;l=s@rcnRj!Mz>m=KPh{ZJb?3gW?G6-YTKQy_->LLi}1wjhD)42WvKH*rFPHoybuvL6?l+meSD26kxapiJp~Pb$m!0QL}u)R+@MfC z7=FVaS~~{%m>!j2O*{X!bNEn$%C_O!UjS@RSPZ!5I7jFrkN18`T<2)Wr9b zmcYEqnU%&G@;c&WRs-=3*P3*hW6&!W6kojpgK-C)ImcmT*(>ntJaZ&beoaM*Cs^;Ri(!vZmgh(_)6h2;EO~!Q~y!=vmXulD+7;(fZK^5zzfVa?xq03Nv1ZXbOaZd z(O!!vX>@4Y(7W9?VHmDJ6AxC!)jUl zy)2YQ%h|ih9rVgv9G?=#T%6(qW7e+-(+V1{!ZSPK;BO68TQfDCAS+r-#A}chu8y>K zPn#E$)RS_!?2U0<I1910KW*UTX^Xl720ewTRz2;saTVjK|po82}KZX6sF~ zb+mkapz}RDG+cqa99HADa($P>N88B#iVzB=sKTCTSeM%+x}{$#Y`@cF@x-MpZuZ1b z6(fK)WO^RL3 zzN(@9zh*Va4dI{Toajzb8Fqb0guVepEM}k`jlP4Yq9I1 zaoVk78=Q~1ws5i}2E`|5M|TxfPIr!3OUv16ftpLQIEb#IsH^LlumHWBueZlcmbq>s zl0<$9a};v(Z-b1?(Uzy7_nt=5%9%4YCNDZ2?A<+au((Qu+RZ*&WbNB-4eF& z!`F18?>yN7(fh?oMNvJ}9gd~}C*bUaDhz<@M^9J;F|Fy*F?UdcGCuzVDYp7S0IYfW z+uAZX%z%NsKm_-`R46PUHLC*|+}G>9@Bj*|nc>!e!gUSQOsGn^NsaVc-=NK*_eMp> z#C&@LG{`bhNR|u&I`O3q9S83qM0-3@mSp;r4%zT{p{ivqve7U)aGVREul@7*ug>96 zIxu9#&O2kA|CBPWS40{@XY`r8Vg5^af4%Tj68N?CqVzuGVsOywt^=Z_^=&ZAtp0RL zH~N8QsUzJU;UjF2H_GdI;58a-2I2M+KyNmUM$3Rxcxi1YYsuU)ikb_Rps{XIQ9pmWB$Fa3E;zT_3gYV~;+ zimg@OQ>&xSJ|ngjF3ti!{;z$M1P6~&`li&cv|sIaNEt5nO^^Jg4Do5etLn*^|M#49 z@bI3nH0Y0%B0?_Fg5UteYP|*wC4Yp)7$9=xNO}e#4m}wXA)W_eEQ?)cj&O>4fi{(6 z&=zYd6Oml4BCM5m&69Kg#^;a60O~F@M;9YJ5C@1>v<55$ockdejbK2p2!iNiJu54# zux)NX3&eLmJ_$CvXccM5f4cu8TKgdVH#@YC`IMkhh%oy|&T*SYfVrGmuSwF_`ho>c3755a@w%;9w2r2N4Ks=S|aG{={! z*Fe7xE&yP%uPD*dM~m}SfNnyG_?OhLoC%cq1?owa-73e3ua0IU^gviI-_ZWCik`66 zpbuAq_@|BfU)QmZWNY8WhmeE8p68i+KObB>HNWA^nbs(XpNNR$c^uj^MhajYZjALN zJOc&gg(EA`a%6(z?ib{pa1fD--Gvl-q1m?yc2He|DI#PO_BPD=$Ro{DwLT1POZ^Ds z>W-LuQG5FVJI4CBGnI7to^=sp#@8<(U&<|Mc}+_BQWa1S;v+0<{jrJw(*dzEs|qb( z|8<=h2mWR1>=gtH07H%hwnp#>tNm6)X1!l}^EF1c+CWqhC)_XDn z&tj^NU+!Lj?60rNG4S~xAYniBaonD1T)^bQgI9#F2E$h<~Oar*|DIwTA}|(UXHQr;jvDBBaSY+`u?cw_0DH$q^{U zc7U~T@#mS;K%c6?$$PJ`K>C+7tNPs%A;W$1LX!sC#e3Kgl_pw{)Qzw`1!rLD)r~Lu z2r3}ee;@MyOX9d8q+e%BebHY`Te-q83J3ylvHt|{!{pQ@&#&|V2K2SFH;JHJh?6jL zmbmQRm4x^~c)XVrk*rr*;ok~2wx0=fT|IL4Z&N4q3GO)oWXmNbYGt6^%7i%EGI(Sx zeXbtV9PBU}k$|h^b?PV(w3*e@KIHiGIMM2U1{Ni9KJT^M-y2R2nre?y5o=qqDUsbG zpM+lVe&#+0Ps6x7R{9Sous-;F^!(mSUt9crk2s7+1`%j1 zq#|UlxzNT9#Xx4yVPTBr>TIc|fR6)C*<;uFk4T7IyYC-z?f;zqJSK3|%mZu${yu7c z`JQxunb}qJ7=I`fQW9bi0x7h)N$h4ZvB86F#ov6>|DwMCS9SkFh+?azRzmESO!Ac8 zQa!dmu{U&&V@ND6j2dumxM`6Ad3!xNb@5I26x91n_n&9KS`ppu+xFxROE7yO3p~pz-}{1JEVFdp2P3rTjUA z0IYZ)VkC&HYJE*Q-z7qu_(%fm=dW$WQkv_v^xOvj6W^+d^!hm=((IvyGMm9aHqz4$ zdMK^M*73h&H}@F?=_5je$SB~!c}spVchw{D3y^H1?ed^yUrQ=2x)Qi+0)4al^7yWPY2kD)D7f zQfr9cZD_sxSiyZ}0%Ndlw*NUjFxbzyx$&>14Wdm~b}bzvy!!ahy8z@7c_{ zG}s)8+E4MIHwN+U-Mi%;m+l|e5#OXhf0GI5-O*Ng`%>4IT4#NuESFjJ{Usd^t_6S< zV(mpn^^&@lfdJ?uW$=f9KiTMM;}pPm2$27EOIqCs*qJ147Z;ZlkW3&`F#ibZ1~(3J zLo|4NYa=UE)=4luS%j|~O8rL7>vw#t;d^>V+en&2RNoJ@XyueMSj(G--%^|Ufp&!Dh zcgg09taynv(->gnZ^Cu`6%AqF8}r~gOsF7Hl%I zn_sQO)2C0nq1G>R*pP3~qUl12S z2k!gO;rCa%aRwUMQ5v>bd36_E6YveHBOk1*{<`TWaD6N7ufr+?^9=)?`DA2tl=r=l z&q?IVK9E;J61(CQCbx$(r&!B#n^j+%B=HL&n}GO}Qb$YSdw)oi@zxshwBIB#`}@Co%wQ->2uPhQ<8^y5Q^#}Z0F^Lte^P#|=oB>R`gHM$pAB}G!-q-wI6%9h z-~N?XQX3-4t0TZ-pM_wP5oQBo-pN_eG^<8O;O^Engo*$Uhe#d-O`zbH;57QMWo`;T zsSK*6Jjwzxl)OJ@e7$v5(AI4ZzcKn_>pa&X>MUNPj_4aD{I7%w-G{;Q75EpUj30l^ zAaMo<;)^`<31r4d7C9I}-64Ew^q&LSyd{=#(FOKL)CpdQwT!A&I0UShhdb4&KE3(D z;WTjQ+bcs=%p>IozfaU|(00N+Zmopaw2Hq6QeTLCgtR($FzA?EDp04>Z}nni?g;pkh|wl@U$$=O?j=L z{xpaGDku%G215f*HYILeo7^0H4vmd zzb(c$bf-fJtPKmo*@gX#bk?I2X$deNxNZ|Pn?X&|3?l)LYK!X^i`*Zk^MfwhBx8mr z?39}I?QF0&nGae7B5o57koGd%yMM)N5q6;FFAQisr^XVp1{`mYDviXH_;o9!;(Z0T z;hzI~Vc*tg+~n79mIkRB=iIStuJeE2OaXec-_|w-f4^BAjc!}5Z=`oTnt&PfyI)e| z<$58~oE{BLM1oH_BmPOv5XJBs5TD_70BSU|Vq(=wcZu)WOvMjkuUVziWi$XNn6Fa8 zczp7PKk8r@5L6!7@SjaI$cf~d1l1&1kSrQ*?GccSjuwp^BUDt(v|RA}pBG(sN(D|h z0D8~uz1CM^H8T8s-N{lRTd@J4mv>FQ%tG+7{+mb6mRB1I2CMj*@N-|A7S$Z^x&to7 zV+4O_R$X4`bvxB;uB^zvA`9s_PhdR}$n&n?^!KoH4|1EYkY=u-Gy3_9ZQk%1CF)Zj z#4K8Gfav|5lxL4eJ#J&(f128Fc*QKT<6t5vH!Pxfgjn(W31R}zyHw1dU;|0Bv#b{y ze%KGcbtEIsT*Bk#dvm@c2GTqqgKO*uF@8MzHnCJT3Z9)uumh`2d3&_)1~iz87}?2D;BrU1bG1xZw( z9jnUp8!68t2m!CcyGu<)v@~t{SljOhmQ_6y9ph_d zqFdATr@BB-YQo8N*aNyx+)ptnh=zWiJ}iUb&1gN9hE67UIn%Q*3fyP6T%;CdngQX; z4~v`TBYal&QK0Wx$U!)O9QWqwl_kvmzvKLBo8W#OPREfssg^BE9hW|kEeM0{eD_uS9TWB zqqd6Jy$L|lM^0S#Fh@HYqJ0w^z2Ub_9wKc9Z;}AqGzm7CS7|Pw;ThtU1*A>2?RdM= zyCUcCi(rZHEf_fTC1>5Zhjq=Z`OkDaJu=4LJD86aXMN|fYojN{33Mt(7lQui<{z&} z4DvM$*3L2b|6GH9s+EHKPIB~u@vL2r*@+wUBA)=01RXI5;aIKcCTMSMl2l zmmF08daEv^mb!X#P3byTwN6nZRxVJevMn^Lo+k$qW)7>s!zhxISB{qwG6E=F*rfa2 z2)<*a$aIOLqa)QoCB64r8XPSp6(z0Po2cTNU6FJFpVAby2um!P1q8O&1<33M3TT4E zqJ>vcg3c3Ouq<~c#_AtGs_3{Fe4Epv6GGPJ%l+nA?+>ftHL|jLvYJa8yv~>~m7I&v zw~emUYC8^$O_P-e;cOX7!ZzBI`4RV(qS$s~rSKT*7>4r~Khm?WN|w>P8~zLd-HN6> z_n$>67Ly8KO%vhb(kNB8WvI!C!arB+e&=%wHjh~dOc3g-9YA_JU>>G6o7}!@)Wu$b zU~BeZKkAZbtbmX81V>e{LD=>yb2gyyTcZHZUM<;@;>x|q;#+_ME zr-q(vzJ;1hzmtA>!bRt?ejDSa(XabfRnd&PtWFk9YxXb-=|8pSOh^#x; zLG;FnVq*}+tkF}@h>W~T%$=(uzPFl8pq|U|0ZEW?yifwFx(!P!jV(jO6_at3Z5M=3l6|hV??ASwm~8nVUo3AY;-*B6 zrqo*3lUzqlmGY~fxY}b;nP8-%ps@&6%)dR3MS-neAYHu)Ob9V(I>Lyu5x0|zR%ZVC zpnxXG-u7OyHE+$O9A?c|QAMGoC-GxZHv2>@!fndg7}o1kF(i*^a}y0GgHD5`{1wv@ zi)aw2JUwlIcycxO;1m!Kd#EDVZkRc)$hejJ>3FOAoc3W{=@fUc$_L+AU~dHXTC!h1 z09Q9`2{ZrJKveJ`M@CCQS=~9}iR6ZU^!hW+F0?z86T}CRk1}J;)oSQRnk!e2#GZYk z%^T)xuHmIkyn|q>ES768f^)LOZ~GmaG5KP9soegg;jot}E_4HBV4XW%BZ<9nVxImdOQ`K+x=i8u`p- zimbt}IPEvOUG^DLJg*O;hVA$TT@G+aJ0177WYd%>YZkigDncYz3NMvedhWI9cb>#E zPUN=7jP~Kdz-iJ74b{Id?fG7=RW+~^8=$flWpKD`H6H|LHN@`Ic_pq*@Q5z>B?%Ay zkkYAOAiQJSimF(DTn566#8uJe`-FUNIy3|}K>QXACpILsQkQLSn1-s+7!pb4PLK%< z8b4e03p!9fTpz0J+>3X&RnG_>Pm;?x==oZ=qKaew0_IMZa*EtNNVRsjtghRf%&>>? z<~Ti#lNe3jsn6T|wV?R(^Stos{bda+iL*+#@R2vmX9NK*cS!iCL&*3)4eY140k$Rc zY^FE9R$ZFxys@d?WMdT!C>as?F4|dQd-z;W(tl?fS(_XV1U=68KpVF+`t?aHnej** zFpg7|c$|uz*rVnio8z9O{|JmRBrWdAh?BLzzO=TE#P9-KrMV(~8@ zyB}Z{#JU}>lP*sdWi?ui*sU|!eJkd0b~&N&^b7zCSc*UNbJWE@S zqx#eMEdG@$(0%Ti>&x=Mnc_d%0sOQ<4>m_j*2nJ8%ZQGdx8l)4_<3-HZ^3FaPG#wZ zEOx~{JAPY6EKmmUTmI>UJqaS5t>Ie1t-Dr{nr?-83HVh6!SNe0Mdt&uk0x-xCAfZzsl_uTtaOQ=9v!ufIL zpO=MdXW?5PoJMV3Jv*n~a^pwo{!#8e=53w6;CEeRu zWu^$*uMe5`W*oL{PWQ(Fc&|{=4_H#2vb&N%0d|~R>>q_H9rupkQ>1LP2On5h)<)L0Em%Ks3h12W|9ba;N) z*xYQ7F2X;!C>zVBB+;r7_wl~WS@HtE;?(RKTJzkr_nokVs=ct?zAoerG7EGX;BwT1 zWkyzLG;WUmR@^=WlekuG6is87q@^oS(adt?A=XCT(nEVCv-jk`{6!eJF(q&mDq@gV z|GbR8MF<0?rrLZmhl7FRQ?1pgCk2!PUpkqHjANb`6h}+LpedD6tw>x91iz`vbW>1dVlx={Ry^pp}N$x-5(&` zckq0%u-e%uAzc=NNbU+qkJ@6|Y18{neN*8zPEbjki}kn&RlbO@Tg)EMEwe>l%W;~_ z^LA-JkTzq=R!UQobw>V;*1k4brsQ+5v6lN4$;_7Qk>-TV=dqWlW_n@P1-McZXYka* zb?;Cozj+~&PaQ*AbQjr!%lV0O@v`ONjAFM4ynykQ-pvNC^E9D~q1Yem^HS0?SK|3I z8?rzBG%qgZskLrs#)=Q^?~flp{mL_Z7X{Q;JI-SgYXgrGZPPq?xxbE&?@&>_BRn@|| z;*il;CUq2!rsCEQx7nzuLwO}RMLTS>juE%4pjB?iLdA4BiIM=M*i>}M-^rZ{(F`cy zw1Ibz>8hQSz5Lt;-#%Pb`juf(GFVnH&xQE*m0=IOi+=EEL6rHAWvJDHRPAbO?!!MW z--5}(e&K~yln2O`(B2FnfF7}0-vYL;cVOTul^{{d`0^`UEnHlKSOc<6^8>f-$riJj zhzx#vk}5X`eXV|hgv+4Uvp8H(ljNR+xifXaA7F-}UKrt-b9-GoWWqByS4Ey)$G)Ro z2b-P-qyVi)s@9^PDpkIqAf{qEeNd)90?Ze!+>GCba|FLV@wwc@^RR}OBY^U-Wh0A- z-HOv;-mo=>wPd*H3$soe0PX0X_`$yaLq%6ZC@a>SW94{K|dIhu&o5wOFEYCQyMsyv$=;z|=NZGDd zWe}{~>u4rcsy~bk3k3)zXg-zQ`O}`?f|Y~&t~DhGMG=` z#wF?sWeEAD9ZzyA_u%_dM@rLNtBMfMJ~1sY0%^RBf!kF2>pSmp2c?u!DQ`>2T8~gU zABK#|S7>5%9N!p8duXjvWsVcxEFNt7_%LbLC_FYgZOP2h9(pyIrW=4nlA~P}mfk2h zh+<)jOiWIvkc1n1B(O6zp>_BC*eC78UwQuHme{8(O_7tJNL}Oe^zbOFz1kS%(-e@N z9auY)0ZnqoHriE2A3>#rhld>92r?3za+mu@%qFvw^6nM;8S!E=>TJ>ir<6JI)f>N` zo$n4dH$OqaCbPG ziPm2OpT=bJN3r2ysKSC|C}vWS%=~Y6$iGQ%+$D%A5Z>j$>>hr6KOXOZ3%Gubm5|@E zqReQyDA<^O=jW~XaOvF5&aWcHd>2U(5Cg71QCMc27KU7IU0)%E3 z?mNLj;#^j?^(5KOxd)a8VJz6_~BU@&8L>R{*8#rQ)mne#8ro#-%0*^c|9F~ z@NQ-A>T~{gBn38ulwN45UIZ=J05}27h>m7MuK?US-*&?`z~=Q{y<-*HVZh1fP>gUU z20ziNZBNVfxvh(F+*P^hdpLEkm3ecGDv(jvesD)4m8IrNnHRR>bv~;QXSbM8e@~}u z$xm)Y^X%mSC*xYgr?xLzHkHZ-Aq9fOUmj)=4pqj6qNWr6`goW1{&>Z5kS*svxBZdE z@_1Ur^o0U$=BKV54R>@RC)ldmkB>)7FW*hO^nhrkQUzGK)7UH|dB(3rFN(A8g-Lh_ zJl1%9yn>S?oFN{@vU?}5JXFrjr!^+)^Cf41`tO;z`5ocU&ka|<@wvW*%bNI6w(#Lz zN>m;3^!oAcVsE*!jRNg`)AmnH1m5zdN%msz_j)VulvKYF6H zepgRA^V(m$JP5t*lBDy6gX3`c)f8fyd_jzu;J!+|)Z!qj%i7t6#Jx)}&!i=;Hkf16 zD9f8?v|L(4D(gfVvYkGQC%Zr0OtdrJ@|AjYZQ<{0cIp%<^&0)YtW|mcMbYnB#yX$9 zFy;gIzOC~mzgw^qbkITV+;5T{od0=#C;7E^dgOK!kyXP9f_wQ{c;U8Hd_^#rk){`(>jJIr=5Rji->ZxJ+J1=&v@3{UgIZ{dE}EgR(Z* z=Pc7doEYYIVc)L-fpPx&oFM5u#2=X2Ubk95KQsa=*|V8r8z5jw-vzdTc|{2RN}trG zQf$UFzymsCggWDAGqs6!7>fcTWQ^IY}x5ANjUYF zk}&EoaCb3P1Ml8*p@veDw<{c4Hfts@FKM!DZ}Xhmn}_PTl`m~j7{zqW_a3vB51Jiu zhCOtX=2i@_p9ZMip~B#WB4#9l;N{_8dYjSPOGpv=Dqi6T`^1*`ihg&%BQk7)-bJWuSY?@-rr97 zG^33)z3vX=n!PRpj=Rb|)<5%BQ|cgu%HB7y|4ZiK5}2me=*dil-w7nRF+PWMH4PNA zRybHnt>=r`$|j~-0V7wHQ+envPDY38^ z&ck(La?n$4Gjf;HqVE~t%0)xqdP9{JvRS`)eDF(Yzl&QUIp`pAI9$9voqE@HQBLm@ z3dkjaG|S3?$+C_bh>7z`wF}&_j?2F|UF@oF8Gx%|HraYuMy)DIZ3H8I^JHC7^kHy1 z)%ZO+O60>pZ8w4uOWJ-O?_WaBUq_h>8VN)7JpHpTpHb%8#Lnp%uof8EmGC}{#GLJ` z(&L?OHs`Bn>E>Z#a=lg*ZJB|>28HI}7BJf4zTHTne6qY z&K^*eOZB{65n2?ULcbt0Dzcm=Nt{mUo0a?)K{l3;WtHRg9yXoQPd>)Dw+QsFrvj5< zZwGL$h0SgLc->@+VUwxJCb1wxkios!x#{t#!HULu!vcg}X2vDFP&>ZSF*xj0;YRK8HM6`?S7TN(Bv6Rd93v>-ZBL-9|ASs9iD zv~FULlAkx4_ImujpD1Bi5L+ZkWxC~6FlgQ>ds zvU6bcT25|%qn4X#NM7xV{XhybP!d~Stp&fUs=7pdp*_xns;?e4|(bf^hZ)BMvrBS6H z^DBx?y+F2;FSfl@FLAtk68<>VcG8Q7y{|=Rzq3@r+^XuT>634Y>0=`$b?_mrRugEQ!#e4W}MhI|l zPge)Bm~hc;nY_%aPka{Y1SIFFqLCiFxky0iOcfZ)(aO&4M?2h4PX8zDcREcjdTjsN z$~)uHW^%e8ax9y~j+@$0{B|({e3lj+mIQM5TCwfy|KaQ{279#Vc>sm(EEY=dEPJY7k;`2_UwJG zb*?zpvDRs}Pa)2(CRaLhP<%a8cdV;A%1R#quo)p<+3(S5$1nZ4!mKO)B$&7sXmEZ> zPkv-gw&7~^gyVv8#;fe`;nD<~$>txwM>@s95xEVyW;*^T<;?H0#g^9>Io^C|AX3Or z)=ZiDNIPI)`ZKbhJY|s2h#O+6upYK;UP$`V`MWGdRgF{7v+Op%# zMH656&dl;NQBOwf&rFYX;Cy#LspRv6{i6;9F#SH@-1=V}d*VYUodd68OKZ`o8gp_)V89= z!Hv&%j{>hTP$syrU9EPKDKsCf7ECF36)ZFk6~6JM-B^(*YV>6q=$;crUZDJ0UhTB` z{E+T;Z`RUhV2D%-EF$*yew2LW1F=6qNW}m``qn}+)8VCE+AncJs)T1Ylx=Lwa?F~S}C>SqvE(};0ZGUV9QbygqWR)oL1(Dd9#{SpC6_hW7oGL`X_#q<; zoZb8y^D;3|&)Fr)fNkz|3udVD#LsD3&F!n}F>2@!)kTs5vKIpYdp{f3Dbc5nTP_q; z#?qI48;#|(mnbi0$FcUriGiC`Ic*tg7TO;<9KHmg`6s~lxEnR8nifu4F6u#GO6}sb z{i|`9JV-o3@j9#bkQ&40Vz`9#g4cNY5)H#(ajn`sLlXa$Cb^2Htx?;{$31o?YEE=## z9l4eUb1X8Qp#lEVxAfReCN#ym0royyzwJQbiMBF-A#=4ZUCS?vUetJo7;BDvvN}yqvbP-Mestnab1Kiu(bs`+gydUz5^JR z^X*KRuw1FB+Y|~^KBxRG{1`^TSLxeFTukj;JLAv{Oj?zBs5|og;_?cSAc$<=QO^vg zc!nVw%N`3=>g6)+Jz1^I3&*lf`+tWULv;<;NMCeFX-7U9=0 zfNObBhswO?E(5hvycFOvX2L}I4ac!z0W4iU!|NUAbV3A?f9e;Q#PLh%7 z46rV{`bPyI$N>f9ueSiEBG)qpfX_3V5Gc9#b|Vydn_C+{H8ckL(yv5D33K)quZn*P|fxeFr@Zf zj4NtOAb^4_Q~R@J1W8z*VfvDG{KG2TcX8mR^D0kHO0jN8Z1C7eEPCE&AL-XPSm;WW z(=h4Tm4#lV9K(~(?kO;*j1aOm(aUEs^J!jo*2I#5Pl!kafKvein8 zl^cgbeP{TQ&a4qg8Cvp1`KO;V!#gT3U(^dA*T%X}WJtTw4UD(_bRb87b(%B@{=#5N zAYh0Lnjq^RbcE!Jh50AlHzB@&gdU1dzfSDrp~kd;fkegQ(w&cEvcSsx+W#M?^5pAc*k^D=^8+f9)hyS>N8 z`eGnQ`7PF^+2$~jYI;?uK*VGACeQfdm_)6?&@_K=PFtyXVRnv7$oeaeQPQ>(+Lb_U z8IWPja*N4X!MX`Z0D1cuTC91W+cY5g!vqHV;mAbxe+)MLg`{=1?_?dfg7@vefB*h= z?Mps1Fgu!qNiIKXhn%fNkN*^1DUAmTh2+Ll^n~vDs~UXo7}7-f&Nj2;p2nuqN-7f9qn8qH1KHYZrw-%ZJeaUos)YDfF1`BGX-(mp41dKB%qkd z-Yo0xxf@*pc~e~>WRTCl@C|GtwB5)A@n3y3<{>)pODC@CzwYmrc4tftD6?Zc z&Z!LEF+nSALzZ><+`{BRpncVw9g+Wpmw#|x~m{aDct^QOSkCFk%v4lw z^Qv?fL7-tF2ZZ30Co<;5H^c_5l-oSywqgq3q@0NF4@1xm|GX@_AzCIWZ{vyIJ^PUx z{QmUQbZX|*?eRRMM67=Mq8uF-nf3n?pq`+b{k>0@u6d#ag3V{{N z_vy?&zZD@D`VQm1+}Uq9k&GKN_!>3h==RD87~#Z8Wj0oATRKLh98HU5u) z;C!&~pIdf7w&5iGIVUV28bKhHa&}ggFH)X-Br-bJFeaE@?^Fp6)uZAsy$3i8Fm2}o zI)R^9z@Vs@uK{M|8Qp&h=|-VYB+h@||Kc^UBlbo%JU?@RnNJ5=8HnFLWI8uD_<)@y z{!TVwi&(h={Y`BJ|Z|p(@fxXO)b+mesKT*Q0p%BO-SD3ED%~V~6y-Pm$;a0vz$t-6OG6s81L>;+-*5oWFP5e1MAxQ@{{ih_&POZf z5n{0 zOHaplb=%?ug(YK|3@4pCR}nx$t3jTZPEBY&cmobTlJN7|P6U`2+W+`6p4kAWTkRJa z#xY8{!qaE3+&~Bu^ak2~>hkutjwEnbzU@P&je{TZ`0k6xJ+=p>zYEM7?iYq;o?>O| zA-ztB6>}9X{U`tq)4VzQslxxoQo&fvW6o6FQd%u^ZK7O87_pAq%^+EBt2YRH@tlavi0aO_V%IT7TuQ#pO^b(YT zf;<{dp0%_LdNqEE@bfXz5O}pZ%5wrc)m)kcz7a)#7pPXG#Gt8w0wqvdjX5ETxl5J2 zE4WUJA0$Ya1#;ARm}p64V9ymc&3j~_z!$^uxFh?C>w%qIieIJH?Josk#xHgGUE2jA zE*lu_{bOpR(Yc9Q2-YXX-+T(lhO*6@0vZFSvWQI^^#BglbM(e7XId!>l}w7* zzASg&ermP#RpJiYOX$R;Vg43sP-zfvKagaNUk=QS@P*00gDO=CFD)LaWkh|+oFZ(; ze@x=OfDP&u23YMGzmx%qCB>;4*RPP)(xM0P3hEv?f`mUpA5Usfw}v7XBw+ ztB5vY^GaOjYc%aZFGN2{ocPk4G*F1(tD_fsRs()DU<9FQS~RbHA2{QdPB&L=;l%)# z+SP@eE6z`8o1R`A193sceF_Qz)eYuHi4(;_%2Mw?Cq#<9f)aHoTPLIVClY{wm9=DORrEcE2a7Y83{^(}o-yAc}> zDrUBGrluV!`gsoX+M-A^hv@0WUta~3hv+gCQu7+T7nULwJKxW|M|PNIXcYT21OhaR z29O2UhA@b{+(Iv^9w-4^LFuQSF(^CjahMq*Nn_B}O2=ahV+C=KsC2XY4f~#R?JVdc z9W>a0A<29j3AnPrkP`z3Z8x1!K=ap}ocKO*5cW*<7#1p>y$Y>OrE=|p_gK)abg=2g z;W#3f?syBVU=cZs*u4Q!euT>x_UlC+gqR(gEFMYYi=Re1?6iUfz#7It<1cV!Pgre{T|Hw3 z;k+Lby*lX{mXm!~`QqgyMl)~(TS!g9u0Y&4vE@6koiKkBofwLnG?(;ZG^hZl?ppXqoo$ghF8*b*mpd*$#&^j5ESpVai!N1&~5SFAwhwA)gU>-}q0SvgDRD!F%PmKujDV0G3c5pv;Cf2d z4_1C-Da@VZh#QK)t4=r+teoSGPWeGT$+BnVeBcfMn5PdC_*N9vfT@97)d&a8oOoOx z9UOwuU1g}b;^Lpex;^_JMGr0=0LG4lx3n`cSF7qlPg%+Aau49)o^a5Pu1A%8cI)7c zJ>aQy+Lu)w%uYn*=O2jg`46mrvD$on0X(`8W!7~z^)n#8X zu;SO@$*caYHReRl6A(vPO*-89Z2et89oMJKm5xB^#qfF7IFwYBh6#MUcE`!t$;s)e zb7_Bm9?``&>QKQ8oIMx;&OkPdGUMCu0067d4|wr)^h|5RHi7dptI<7Dhn>Q`GELoJ zg4GJKP`4l{{wAm(AW-r?%00NdcTW6Ut{4|OZr}mrZZcjyEYXW#boPFE{aBc}Jg(H5 z#kg}9&>yJA9oM-J)tjhA5O*suAX0x7S8*a^#@~0oRQU2lcg!*h^#fot%ZC0`;Bb-%bS$;9L|^bD+RPgn{?zdtGzk>>qi_?HWC_BT$v&SMY*cb6oY(C&O|1Qn!f@2zh%f-(z;BabE=ff4z` zc*B0Yi<^J&^=W05-I01L{?Cl7qgktiJz1vug~orlayi4UGhfO8+u5+)*miv^JRUSN zfw^`s8-J?&tDofs&3#z6oYNyd>KQk)Wd+W`LT@Y0@G>L{ar;L(0lP&K3!AX(lxwuu8R^A(F(f% zgm{DZu?aqlxg0@+5`yWSIOG1!+F|1x{DbU-FEKC1Jj^q#H@d@7lsgwNTFp%-I_WT) zQ&G0$Om2^>t(^-fwIfjq3Gy|d#Vy5n^4t6D(c##u0EGIof0`Sp00+hq&**rH0Gx*6 z?xhrf?a3JP#PK}R1^tDjI~WqFF?C!+ylaDx+s~&?-Bx*Jh~`(jb}-khe=8tStGV6Y zWuT;d(CNFk4;T%}8c9)6@BS5*3`OA z>^a5C;{B?-$r>fqAykI4s2lC>6C630+u}^4-}Xk$;?~_T?uDY>gW#y-(b?L#)hAE$ zLg#kF)Nr1rjQIRJ8nAoskB3vv(&=cG+_LD9wi!esd(&j|efD>uAgsHWP}%bXw1}>N zSNSn+*@g1K%i&hGH{<@7erQ}!un|?5_8I9zy+eK8*5g;a2d|0eW4<8@tqfyQVbs;yre%lF=lQhAb;B!2-Vd(yM1peL19x^s9yZpt0y2z z0$llB+v!NeuIS!Fi$| zb}h2t>yd30PTlH+)0Yc4YvXJRzGXYpDxBar?njT4zT>-l2uFH3LS2dAWX0mr*zB6U zT*5fRTU|1$MeBE&EKYs1x_I*KtK+bNQPbjqum_EFX4 zVD@aG+zkEQ+BnkG<8pfj7TYsqT93Eh)b*?}G!m1bCOFxKS0i;x2u+FVZqveb z@0~-{>hUVcLdRt(-kCJIh#9~vDzio66uL!$2ySIa66^L+YuCDv^(0aidj$Z}>0&&p z?IVu8>5|q5pmGsJrgNIoMnU1j`>BF;H*W!()xa!E1P z9Sy~qAh}FLH^7tdSZ-Xnn1w}z9=~zr%BO+sWArB;r=IYNiN4t5BzeA`jCYC3obIN? zgUzM)oLSeaK>KCOS5D&}a9nhGG)(h_>5|}@ZZre`LM)Hvr;O*qec>#f^G~c6xq5>; zW4Rb()z}{wAv>He`)iFiDxzRGbKq=FM!;DC14MR;_EUog;Zb*4ba!4~RYjS@F$J*? zv}ktgZ0f^TS-r2V_bLEfyg;9;uzG3wc(X7nyfD;4S_LnXO+bufyBzJ|3CH*47F1eA%DUX?Kyy zYxX;7(O4tk zG>-4@48+YR)b4TU?jJ^i1`6jFgu7%$U5VN9S(>ABmWcH+T8PgF97hx)+l6M!Mdl@} z4@L(pKAe-U(gSvJr+uo-68L0EOtxICvfK2k->pBAyBzxd@YkQLQ8YfKt1(xmrjt@C zwOP!fG>zRyIkLk**NJgX6=<02f$7+KBk{-GJ$V!9n(ZJ*>0U|&S@&3W$8)VC=C||K z`NI&x7Z4|~{&|4c02B)z^KjY8qfXQ?51_B>MQL8V1RXI{pjW@7_LTycJUpm5`x(9# zaAoO2ld9LF#M0vaWpFvN@prI1+7b1SSIX=aDBmT1z&}|^yLz|3@32{Y**d_9R|?c z#B~`lnC+7Q7XE=fhE=#zTWoziN#t?-TkV0RR#HBhrIxmhD%i}FqIl#r}4J3OBM37;m>%vniugV zN1gRRqff|3h7r;4F9tlcX)CRlhHyEZr_w7UsWN6dqNQ1C6Q0vlal^m-Y9q86Otb+* za;v`x6+ZBXc}~BS!>5rSdgyIyD&CPt*HiP@G?tJ+5@@a3Wua6qUBTCBIb)692Iz3gAcokRr9N}i615@BstZ%jc z<FO&i?G)ZoO3o6bT?yrL-lzeow~LMATh9nq zZPG1VQmXs1S@Y1%_A=xs5i^k+D`74gMILt>E0H6V7~(n?SQSy( zqt5cOX_AE9^GGf8$PZ5t@(Nw$?l7Lm=KTbPW%a1*3j_E(vO1ehZq43B0+SH}AH@ za?}iW0!9lhKMAU1MW2>Ff$vEAU|>{_zXU-zp2n!Ii}f`9`1Q&}_y2U!q-JD! zk*U64c@U^((=|2-TB~%y!7EXN-v@8I$gG#eJ=V_<3d44cTV8NmcHUWQj#;vOC|!*a zC8)qgpb$oFMEr+omcb>RSsr#>q3@AOPQkUi+-z@PoS{w49R9I#fzQy4phZc%P`1UL zcU1S?YZ?&)37vz}wi=-TCHyDyFB=Bd9roNtu1811CJf+`#^G%kF@zcQ zb&u;?llJ|hvEqrVO7&LjDwVDKNppkzwCG~5XJ`R9LdUwrbm`PcIF^fWQvHyn7@r%yNKnxTmi-B8>qa;Dw) zf_#mNThZd1JO6N=2JSzuSpo+xyO0?|P)bXc@Et=nd(!9XlSD0B?;FsZgdWg1*q&b( z!T1&CwE$=EmV#n15%n8@xt_A6JGeSoE{BuRpBE|Q^-mJER-uXe&zQ$O2Da@kFtKN& zInK$ds|UD)^OD3&bOb;B1}bGRrL_taEnW3Ay76Z6GWDfk}(C zD5#k{CNm)z_Bf&60no1TVTMX(uN0W&U%kj=GanR+D&FTHN!V3K^&8)hbyRanHt)1D zH7L9r8^?=MqrKg_y7125VyWyEzDf~ot2MR(RhqPuIkT|BURBJO8EtYGE@Td4) zGS5i+v(Qr9<@o?nNqJ_~*2)D)=AqTG|B}QG-2Q%--%<3(o)-&EOHWzv_0BXWjy3*! zzPG50iCB{72e($&Oh3*S^vkT1!W!HEbYU3#`7AHlZliIYh`06`Qkj+`ljZkY_4)-* z_OpO-XZr^>HEK14^tJWQNK%lq$%U4kh_O-w3iaY2qt51R7tA3(>nhF9qw}H$8d0kY zDqUp3YZ&PWT#aXqloTP%I*sN+KL*2v1O(iQ%*IC&)!H=7mu{dRh6P*G#xxJ~F;?Y# zxK(p>>%I#Ap)y}{h$K=I%7?*}I7)CFMR-j7Tz|GvK41qxb21uumME8OWs_MI2)m5H z)_n*L`{=*UlK$HF;Hhe6EVs?~oqH9Uqpn=aPEJ!I>JS~Qk1f43AJF=A$UIvlU z06_mRa8wfI1t-wtobmXn0@0bowr)S7w%@XqIMz_u-T)LGq8xmQu4pyq93DJ ztMj-D%j;gN3WqvR^k?;a3D+l&nLDR5{*Q|R1-cB8TM2WTNo?cd9L8oWdw#% zgn=kdO%^QmNUROc2}?+O3$MEDZxqc@1-`eO5`&74Gmgn=Co5uaWnZI@}TL7w9V& zQ_9kVfqBg%(uvQd2SyI6CZXQ-_VSp+qdb6}=~=}NaINsUw8*pl=q~$C_w7ZMcVRu5 z(a|O2Vo?zD(iiJ2Yc4rwOvq^wJYrnB6RfH&+SxR`?r1!}F&#;sEN9g=Daj_B*&|hS zV?3zNlXs;y@r0!%5CfC}cb@#hKWCMpwjMGQdlw3v&Y1})Rq%0k|9Z_ieOz`G%!`S^ zw~KQt@#c+jz7Ml(0?GGEl%n%W7VTB4mtS9%V0}3FehBY;+7{@qB6`0x z^mFo+!pB_4>mgCQ?U{K)u%CI0qoJu3Qg(86apm~_#F}a|K`;#Q_REq`(#O!ae92wD zsX2`5KF(NyOPB=FfHdeqf>Ay#xG`234}uS->g z0zmAZBAupq>tOqO`(`~&d-xmbwkVB4n!e!zlHFsuOBWjuJs z{43sIRns3coDf3~0@BJX7?fQ=0CHrtSqb%|!89WM`jd~-MG2fo*mpwd2-_#sV4)6r z;LMOm>#3cuwfS z-~MHrR>CkaT%$zU155cDiK&XIAa6XB^=u063NpeR{IQx}m@SblX(Ct^?1dBxeT{PG z>;QSPk5O$%@AWf_iDzeOs3=|8?(4*2dnATrx|899ax045^-AB3Ip;$s~pc5rBnS-UZSzHIL3Ud`B{NsyQQEp8Ya*A=+z$G zqbW62*|dH0heEk3*dggSsD8{N1iM;4gM5FeCg7{BbrxlYrK5QFm-FQr^~h4MXn8Bz z1dy3Vx8gKavlR6K3;Z&z_Cjha{lG|Zpz+(WPkd1tMJ8Y}z$4>uhCHtMFPG?M3lA?{ z&t@s+y408Z0Mrpw$0h(f@D7hujZtHM{iN8sRJL_Q)t@$r!exhh7qUhB(Enlven~d3 z%C^T*yGM!dXEX{Ame16fq|Me0^Sj(W`chy%PEUu!sZc&O=*}A(zFltK_jAwh3{+$~ zRX9cqO=9&MwKQ0zu?gxMwOc8ggakwu`e`O)fZnkuW^?!b?G;Mj{+4K?C4o&J_450I zviV%O!j&|uHizS9P%ja8E#M`&Xf*OjGk#jo9u2M~aNFKYk) z;)H_jdhde5>NMblE=d=m?%ur_C*sxh;1Dh>5vOBg2s&UsCBWzKdA?(&=NknzODbeO-jW~$ z({vUmHmB7-rhT91u5>NA1~NG-XSA@yCt#&jNy+)%f!2L!5JdxzYM3PGB%`AvRN+t| zE0TWV^hpI28&cJ(1aaUxzy8S>&x>5N%nf3g*|WuDD2Ex*fRjt|_x6ET(LE^Dj*?sF zv=}xTTKqe7C100TG4v?u==x`6Y`yETkLCO#iZy0w*yE8Kj%tpCM!Xfwa6Ql~L*4Ft z8iYNAfb*F9j;B@bzrZ;?1g*sLIsPx6jyn-v!iY$Kj1}cYR^Wz-Tpvr3^V5!xu(GpK z2(`*5@{I(kD*S48uj1 z^m%_&;`3WFnJQy7jv_|k-8aZzWWRZHMeM~#`|+e9$Y?l0H@pxMOvG#M^UBPUvSQftE85A;8cv)4)h(MLOj>A68}se? z-2qjEv}NR4G(b2MQ7YJDD?IP(Hp-SSDS}2Q7OW>oUr zOf1PLC6LS`x$~1uxXBnL6mtdUD{=~F`h9}G8va<-M5VjLsyNtFEousNl?lbC-*Dx3 zrp5Pnf^kM9^}AoAW~7g*yo*vv&!cK~wc3t&+#N zPjDv;PihWQZNUkHj9L#_8iTtk&Hs##D>uf%#>P{yc&^aVbwGDKz@Eg{L&m9_Gc2+V zQN6*C&q0#Z1A<4|htoS7q7Q;esGnew(M*(yZFHd|(@W z;T507m#H!0gHsYG#b^kswi+$jD7T|U-1ry3B8RNJdPk4rB_u_?;o3DU>`~-%& zRPgM5drq#ct&M3mUapJwmoNFGjeF%OL;|yrqr7l|O%1d>`X3B|sUA&lw&HWtL%@76 zqY3MuJ6_O$CVyj)i-y?uHW~-z(_BlSy+p$8B+w@^4&fbLjoipP^e#5X*?LIec;yOR!@gr9gFRgvv&sYAF0iaMt$&o*GY+nM6 zEKg}5D0QV5jB~oW$JMnm@}*nw?KCW<+zRH$TIC(cQY4B0VQE5m^nG_ZX~?YL=re!) z&ZBTFFH7SIX@xPFFx&eje+qZT2&GOG7~;@AL4091Q*>k2pBfvwF05gn+|uLEWl zne5qqR}Bk;1{Jw~*ILY(?$1<`tuWEHe|H=wM6NJv0E)c0{Kxb0uE{?M$&EWw6ug?| z=5#VV;PWoKL4qM`RY)COUBHv`ij6*{aHfMu6o>a|_<2U$p>!>xhW7*3|DsfaWOf#V zS$TC@0Q3dj0F~XZA3z%;M5w@-rr|wF&MbY~FId%F4OxjdIG#NK)vpwbz=9~N7uOX% zZDsKv%6GlU96q?BW}Yv7bue8iSr6{4mEgU>q{GU;975dp%Om|8ivnq>$igQ(XElI z24NxkuKZyKVa{y)4ki1)46F@KSY1gP&fh=pt257aO~gWT6WTLMNVpixX~c8vC1XeP zCNv~E6BL(;hO~@0SaV}uFWbI52!V&73Y|U{8#34_0H?jkCr}Y3HPe!fHWPOKC{K}S z?SlSCw8b(^w`(Xv&zQVBw_;_uxMj|z==n2!DY8&dKsE~MhrQoE>E2Txl~Q3}A3H$DaRU>2u!wQBq;=y%6&Kqu8oOR<39t8ncI;}KpBvf$} zRqr|z$T8vLrQi#Qu=wxI{2<%<>l;9scy@j%v!CPe{9pZJr>2Z{fIz#ASZc43g% znm9q{9%wvlUixQOrU+L2ZE$U(3;=`#-IeMmsA|{NzuS>`!mUxd*sM)I)!;&Wt)F}M z933fAhI>38)3F_ ztcQZgexakxtl>>0pV`;B-9NsJK8GQWR{=UUn|r>Pa-Pbtr(|VB#BN@EzVl@bqvQQaZ0WD&5Kt8UOWJqx~Y)gsM_@sOa!cH zHvJP`kL&BUePg-B@8Tq=R-zdAT=KESnq>=8S9`n+;tztB^hM1{P>ZPCeP7_S3`(W= zH9ePXP{x_#4lVx88je5(na#`cUa$`1$BsolMP>x@h}Y0F9s> z=eHFgSvq?MM}X-*ZRr-|@29FI>i%trX2d zf~{PP-lIXtm-_1yTyeNl3j;H-VBt-6h|fetYKp6F8G%y+qo7)>e(uo2CC=cnIVn;a zI>L?tQZBnO)<;WfieLu(>!s~crf*OQscgGxUA91-x+NTqr&eOKxC@vIVSYkCJ82I; z1}Q$Z9QXhrMDY;&6Y))bfNIF^T31d+qy8-xSN>mQkW(9+dlN`liG(uRM;DSXP6Ftx zOoXv7uqBc2FQJ9(*m4z>-x{oU6o3w%_R%#i7nxhsVk8z|_>g|VTZH^DXgN+6MaF^I zF+XnS+41W<@ZW^$0H7kANbK8d=kHC& zDU=uwA4V|61n&80W-t4H*AfN8^`Hu2>TbqH09N-4Q>`#s8qn|Mhfr1LbjRWc!<)dw z;e|hd0CO5v7s*he#M2p^ZVZr%8`evRVc-J3H5X0p#QF8C2VqwfVEev6<)oSgMVN($hag}R#uVt@ZuHQx>!qs55p@z z*M6?3cW`b;&;S1Y8Qp=bT3_e&#lhL`ETK}=u<5@C=NTF#MpfIU;VyF<|8CfvTPlniI_Xwt z=%=O(8WI*0(=D2P>6l8D0JY`a%^AG5sk)=dA0BAuq%c*4UI0xvFWLIIMYK#jLkY!I zE_F-O&OCBzjyR_!9}hZ&N~g9Kx)DFWfw+VbWpIl=!ukuFv5kiwvX3I!-P}gKbCUcE^mPO-bV9 zk1DjbmJj^MkLdjx6k3m*l3&SL>a^_CdtVL^E{CTn#Usnc0S)X+?%37V|0) zMZ&+AJbAg^EvP>DAE{6%0a=yZP1E~dvcfgkH6`~oHaB07k=pFnKf3723yWXW58|Y9 zIZPsMp%N6v$O)=wO$9=yA18y|nk? z_Kf&)OT-}Yrl=XqUaQa~qa#b>VWCuTTXl8boDQuQZ%81S>q>W>8Ud|yepPrqpG8kb zeiP~_V~Vn+ovP(JHJxk7s9+AN#Z}~6*U0gUSBrX~tCQ7oF-IxeMut@k#*^q`jzEV| zAvQ0`5tHqws6+8-crLNh`~9)$AfIUSO0%KPkk@q|#B+1ze6)>2e$`zS=}0HKpP}2U zF#*?iIw&fn<&%9>YSYye92ETXY1WDrm@R8%vFL@}dX%O9TBpGaiJlC3y5OttrewXWOAIssQHWDLNn~x??o41;Q|}C0=7= zb^ZFpW!bgDmCdFppMUvpi!#BTj`ovS-pt9wXYDn6I9FM0ZHhGJ9wUQPi%AQLqnJKu zJw1*oeRtHwR8A8efY3#k``?|d%Vey1+HtTVTCa$%@3Z}(-^q9^bgb7e+gaWF-evi# zAQy^^)%fv7UgO=|9OdsdvHQcNhs5(ZsQqfpj(|HNWJ=PD9-TbfJH6n+3a3$nPHAhr z6IDpeX`gBRU}aZJLFrq2%KO`Ja}Dxn6Rwn%;VR@LF?nS1XkI*ZU9x{Q%EPYSfjB^U z+gGGLARck}ZYau?>$SsAu7kaM$4<%70x=+Zo!~{HH|h*3B@s;*S|U~4n*Sv zW)!2riR>=(A76GDHR&gJO=2SXAKpO!@Lhel9LD0sJJP!T4l;YqUx6WPyqv0jY6DLN zl|=zFUL=H-I8(h9-OzdcIw7s5riO;bbEG*R++nv~<6wh)+PT7nf5m-9 z(cOxv43;`d5fqv4I(D=jAI5uFD;VVWuO8V^YbP!G>K;qbQ}z(1=G;B|mF|>$q|~Uw zqf4MZz(4>=A;NJ~~u-$q$oRY^?LEJ4&P@y?_sMJ)equ#eCpUarZS4r#Y>vCQgqE! zi8IcR9&hp>kqAB#<7$M>=EvQE;9d5yhZ9>!bN@|Z^Cw2Ubc}o3@y8@qLA+U0fughQ zqsH;*sRcZ#oW&Sn2Jj9pcf6fSx7KkcD-Ti^qVg<~f-;TCwS&utcwRulC8ecU>Hg+1 z$o|LaLD$M~Zz7u8AF@3^vrSJv-t!lrGe^CYlKen!hT6ECs`8p%JD6JB-=fhpQ^~}< z*T&ys??#wwoL+<&Jo_+Zc}9AWA9dLFrnkK$D;CcY5s^& zrb5;6hr&)DamJ^}uwBs&iv0q_Qf98qE?J=J*S^nvgXZJ;;HT?=Jt8sfjo{nxK4)@tV0d>%Ha z=4$EpZS-zAYuWwws+(6-(G9E95}_P-lg$aA95V6GPE@A9p;r7W#@?c3 za4y1h7FBWxn`6mjeiSBVu`KN{mFT7A%e}rQ)F2WvY0S@q&fDt};Uj36qU|>s?_!hY z-F+)@+(m}(%H*JiWVBc!Jl;Zy3gYByg96>%JbG%#bO#aML@ewn{qr{jf)wBmA$TGD z=BFRFAm@8dCG7Db=Zf1c_wY8sRhOCFz*aM@?_}`Kwaq57z!^QChhdk^KilmZYs#S> z;Cg7891Y?Hk!{WXCbb>r)pK2osbjLv-4i{!cv#`GJNbExv0&nW_{W?YdfZ3L?6`j2 zm5`8O&+@Qn9+UFp;}E7-HHOh8jR^DgAlubl_i~RLoxDhsv5J|Ty?VhKe{UD$Ybq@R zX$;C`^E?@esDg&C$3-aJV19z&!%ZBI3!nIFHlE+cY8ZN*RO3gL0KVO0_IFVtIMoNTM^=zSY;`_lGe4miFr{*KZge2M{0(tkG@r;gvdaw!_X6 z>$NZ8yug%y9(%prOiu2nA8*gDY4>|y+1w-W@?-^Et469GM5%}j+nUec$v7q)^ec^a z=RP7G)@wLikfTR+lOl00TfTd}@kh6pA8>e^>i)9z@$u1ZNuC(v%S z6n?*~MJK6-Oba2sXKshfP9pPqz@>L;@Yt~_j}4Q|Ql8}T_D0{bwHyifbz^b z+Y^AN-?rWrKJ)Y^9}_)G$jr>FP^Nz$A(k?n?J4$w{5g5{ZCl0rz@_q1bUk%Xi*vev zG@VMExbBX6Gd0sbMV?wsIpDOqOiQ+LxZT;*63=u0Bvj2&dAHb~KTO~{T8a^!*Biop zOry6}VLKd)`$m+cR4pV<04H^E!fe){tA%v-qQf{3}2?7zdd3mDx& zqkQ)h(_THKzje5QQT!alZ1}Am3%A`nNvxXlel~ zh-nDto%es1ZUvvuo58x<@L1S3(@5M5^|IsEp+T74d@SWk zw>`_Ch1JD4XGG+XGb;OTvHghIwe-FrW)?*7sOwQ-0YBerjF-nV!uTC*b;Pb0^Kn*h z8^J*x8pltN-{Q?dw81Mc{(~bp6leyOs@-bjLWX3%L0{)E>dFWl2Ej9P(EEFk1?~VM zSP0-;4*4Gw?0yIZ)V9T3l;^GQ#VY}#XO&B5B)!r562H;K!b)23lCZDz_aDOqlIqEn zjuz%WJALC>jkm2?mE7EslMZ9pHl8f!V;{B_8&P|u_2O_jEhvcGWqqlDz?&2mxmW8# zzIr9i=mXi+AK%+@d?h(1pV&P3PL@IJ`{5)rZUFo85m`{r*=*~T7l`}tUxxp;leoyG zK~c6!$!s9jm}c zLUEg6*i+f&qZWNOtIA-r-muZX1=$TnkT@1_aoXUW@ngTJFKFYKW1QK`r$pdyKo2iY}| zC|mY@N$9=L8R|?t@B4fI{eAM+aps)+zLxLxy_PeJ@Ql7h$P@Mb?#YrY*NQ%NO1>tU z-0JDzvmsrm)aUMHf7X4w;!CBWPD*lgNB0AP;pYC;`eJrHD*9T0zem5;;jd46)rQDH z)o$PE5xstfB@QoMuRLbxX4`y2`Qz;6=2Ks!R0`zps?4=#iRwaa>fnQM7|m+O8wJG& z^k0UAz!D!NXOu-9$x|PDKUNJsQh%%|#jLS#a$<6F(5OoF1QI>CXdmLh+`ZsguL}*JAu65_6A>7A$iX8 zU#B;eb{x7_+Cfs`C1GF4L|IOq-*mgg)Mb9%v6t)i-0m#BWNcz4Qs4R`XIO5uqQ&`^ z*d|+0quzRTf3JM~{29klUFA$k=lSx-ra1$dVdUQKD^Df4p3XZr{Z#>qNkNExlhZTR zwLV>_FmPJ-p# zmj$&kIj$k#PGeOwY63LafgT7LqZ21VHyMc3yve`=r+<5=pY$Y^2a(Rl#M-U%$Mzay z*%1*5!`eU$Jrhv$=!~9bkpREU$y7Lg(?I;5##&WDSL3dLe|XnADk7F+BAWuEm$ctq z#*}O~H7*Or88|rZKa*P*qbF9W{jPs-kU!t`O}rb(Zwh}mDZmBr1y(?{UIj0p1&F19 zcV(LDosn)OWCNBJU^%mYK5*eZ9|-c??ewm_SWFLV9?cF0+x?J1_p>ZFbIQSh0+fv%EaV#rYLw4SxCWmg5gLz z{_c&I!X^OTn-1GyYt=xi?fIe@4#~A!6bSh|Ks*d7q#9I%u`L(!Ir|co4Caqwwt522 zfBfmwr`Ij`3w1(ZoxAc0m*I7MjR6(ovwnOVzOXG+GEcg@RsPi%*P!=NJU5)Z+f6G* zm0tT@nEd%eOb%WGXbXYqDgOx_g8t&o5Y#6B)}2X5w_gSIE#& z)xH7NRqjq;h1MlN@Hz1y5;My_@W;a{TTXG@oKrUX~AdDZMS=!8{{pwu28wd zEq&MQsl9Qdr?vINVLoG#(F*JtA#2+2VmH>K0!{uR&cFl;^SKlw$HSBHA&- zr{7)z24cJ(yC2$J1G1K*`Xe@2ltth-fL2eRJL1OjARZI*zuu1DKLNt{z^!x(72fGk zKk&tGMpe##*;n4|t&iVe_QK9#*Slvug1>L&ttV6<)&NiiVV0;zDzDL zo=!2Z)u3Re(@BhQt~me|k-XOpASKX~sO;-lt{s*N68+&X~dfTr?DNe6+kroBW2t=shXcR-(;0ikMc(<%>gqmaj{$}c)EXfcBw z#QV@c%_67HRBLjQK0Q70USq+06m*Dbn)3IcmzLD}x5PpZ@@!*c;}J=&Jjk|O0*cN0 zS8{!X?|6E7_4wxOy`v&{h;ppF&h~+8OCKgj3T&FIS_LiZe}FjLIS6OUZ`b$@?%?(s3)j4^3O-P-eA$L;*iUeY^%#j!)hJu=g9Nc2_Q_Non*I8n} zHIax!V${OILX=Xa76r4xuz~9Cn*eIAAJ0{n1fOMOOkBz!TU&;<3~w6#cwRIGUIsjc zn^u~_eHo2N_<;;hEvwwH(164%u%$aqpauivG<%KV@gXHSAXo0= z8!gz=-;)Tw;0k_>|5e4%i%HBL!YAT=BQ}*e%e#dIL|_%Vvuni=uG$REp58_WBWF+4 zmuoyw4*8QEC2R+EAL*F}XIrR*^G{LEwA%6&6!%3=2li3ZG+FN`Il9a$$oeK$6sr>Z zIQjcUwsLEXH+XPbB&2W~w}vTYrsTU4*=txVSIsW2EWheF*+Nl0qJr8S{R0N_wViVh zknl<6WSb(S=GN2L2nHfkgNXl>g-V^k0?!Fbhi{7s7{TKUo_8FHNaY%CKH2!9yI+WE z@#w5VENShVAK2Y&ZFyb<|6^UECIyX{Z{GrsPnYB?_8cyWxT?G@6}t<9ka54FjsB+s zzQ}vhsiMPHpX~o?l1_6kO2=b42(g78Ef4k2DS|vJ(L&?8V_;&;llO>!1u@B%=s1Au z^@;i4VmNI`u^vQH=nsQaXvngidT3^M$sfXWB;l|6Rq>V}+|t5Zuo8nRfV~>|uy%gZ zsqeMAVJ8qC;o>Z?fgGR!-OGeWvR5<|8qq04Hb@8pc;}NJk584 zf_dJEdEOcmPAbI99&BPm>m?W7w)RP%{2-Z@K2 zIxIdhi|3s5lN|_X2)l|6tifkL6rKpUu;<8h+~3HSTPRQo!hs$J~IfFWjr?70^2TW>^AveMtks#!16NBE%AtpX*WuQuj%E3Bywn_H##t!?&!Gs&T1yMq$L{pW+lXiLOTN*2SU6^(5*QD$-t0MtSwo2N^)W6Bg#C-& zDTXU7Q1jd?B;uG%&v-whVus78a4)(N^Oj60^0ZJAA15oZ}d%HGNIL}N5n@JbaYs6@)! zl3&ipfYD^97LhdDCanu~v#W^eEDakcOnnLp@fcAXX=p!Z1UoXTfe>!mlhR75>FXx2m^YJbs2_)$)bG3kP>^I;`ptWi;QIm{HCiEMp zzEa-fI}ppqf))}JcSWCT1)GvtQ`i>5Knv-Ro*O=EUm3<;AFrEk-`gH8JTlrJS7Mq) zHl6&73$`mAYLZoE``29t2P%pMCl3#5Lbf1~aq(-$0*{uB>89qo`^vBaDtnbUg&Y(V zQb3_r+TG&-+wLcmP!FuQ-m1;SfX|MPeHI1JJV|LBAW)K~7pEeG%Q%iP%89!1fVtL1 z3={4GkGWY6BByjjPf!>3g^cqp*N_f{VOcC;UvE)42tq{iRO6N@)6Cqi*QBl=l~saf zVfnmp-K>q;&Zt1IR421J8@Bc*u;`ew%{+n`{bd0$rf6%uM3{F8F1scCuZ`d_(42<^ z<~)Qf90&cl5bo$1IyrY=XFCR1HbN+n{;Ay{vPdG%0{is2jc-~>`gLq@mu43M>TlVt zj4pHzJ$On?LZKY|+{}y(;#cBpd zZh%J3nneUTP`-qNx^>#6|PNResO|3wU-rZR!Ye>Yb>+ zk>d{>x&o+c78TWuIQupRcu62WNff!$qVoKb8FN!4&muuEKb!kPh(snxeqiz1&(fEs z5Nl~fg0=e8AEQ@Ti=J29|EdZYE>txRfa$JZpe(KB@BnQ}I>x$)vv#0)!c^27Dy{?; zV`g8tCoOWyZQavd{m{C~fOZ&s36}+S7b;ARvO zd;lpzf_34m^NlV&FwguB@g%pLWR`%yA4ggC8RPTHv&Zo*E@4K0Vzf z-cmLlSt~QeLQi?Gsp%u1^8UQhk267JVAsWkIm)>fr!5wjT9JB!JpB(tPK$G1l7!?Y z7B?@@S{P+hXu4^(1Ph?9v%-c&()_tFbl}fP1m6!;3Eo=pG&xa@*;AOfftpf&nx;k^ z#mc?Imw}bbs4*=AZ>+_I;2+%4N`Tg5+sM@M&u>O?;0BThOzb{Z>Kmr)?b+O7fb7q} z_Jo@Ni)uVhG!vrhfI4J?>+NaeTofl(AFi|X#6DUcazY7a6U>mrMTauuG|fNn3jCPd z2Q{=h&~U<7<){!1IRAa90t$ifLhvi>K^b^=sk5R)QfEgf$-E(jS|cqAL`-37;GH-> z+Lr7!J3D*spMw|skU+wP0-DZJ!e-NO#bQ+IW#h29UMbJM0hetlx6by-`CuPmE|?jG zYeh}w$D9@H-iHXw-BNn^8VvM&2dZxGJ6qB`%zO(OPn|JN3vtI!Xq4`N;8;g-&xHFr zNp7~CV>`$Oh2NVFI{htzQq2QD&cQN+O$%v`^fKWXGF>oxcFHDY=2fzBZ-4*zkr>C! z6q=OD!!XB6;i1Fo{3Nqq2e@?O1z3MF`-_ns|+_`n-V z{ZN+$oZh(0AmG3+1t_w%<2`jQO0yR69Vgq&*N-3iY5Fz#BuJKb$>ChzS#VgRqQPOP_V5%#fb0$7Tri*r4qCG2<$M3l_0U}OD z$wM@nECS)HBrz>5?R5=4({$6q*Oc1+&?N&v7d+?U;o&*o)wEvje5YqaI09SfV!$Aq z1)}O!T9n2gG@8NE66e4)xzpu|!b;ip7eJAM{Ic9`XoCFtvp7x0{PprrJg#)`_92M9 z1^Xln5(KmcTrTaB-Tx@Ol1W!~^&b!ahp2+l{s+?kSI{Gn_a}z`m7k5yMc98!S`x4EIIwCJ9Y%BiKBNCQ(fqe4 e|Nl|=#|EFc3spI*=M`3gKg|O=`_oj9Uj83X$Ld!A literal 0 HcmV?d00001 diff --git a/docs/source/reference/llms.rst b/docs/source/reference/llms.rst index 402e18ffa97..c162cae7c0c 100644 --- a/docs/source/reference/llms.rst +++ b/docs/source/reference/llms.rst @@ -1,19 +1,491 @@ .. currentmodule:: torchrl -LLM interface +LLM Interface ============= .. _ref_llms: -TorchRL offers a set of tools for LLM post-training, as well as some examples for training or setup. +TorchRL provides a comprehensive framework for LLM post-training and fine-tuning. The LLM API is built around five core concepts that work +together to create a complete reinforcement learning pipeline for language models: + +1. **Data Representation** (`Data Structures`_): The foundation for handling conversations, text parsing, and LLM + output classes. This includes the :class:`~torchrl.data.llm.History` class for managing conversation context and structured output classes for + tokens, log-probabilities, and text. + +2. **LLM Wrapper API** (`Modules`_): Unified interfaces for different LLM backends, including :class:`~torchrl.modules.llm.TransformersWrapper` for + Hugging Face models and :class:`~torchrl.modules.llm.vLLMWrapper` for vLLM inference. These wrappers provide consistent input/output formats across + different backends and an integrated interface for loss computation, data storage, grading, weight synchronization, etc. + +3. **Environments** (`Environments`_): The orchestration layer that manages data loading, tool execution, reward computation, and formatting. This includes + :class:`~torchrl.envs.llm.ChatEnv` for conversation management, dataset environments, and various transforms for tool integration. + +4. **Objectives** (`Objectives`_): Specialized loss functions for LLM training, including :class:`~torchrl.objectives.llm.GRPOLoss` for Group Relative + Policy Optimization and :class:`~torchrl.objectives.llm.SFTLoss` for supervised fine-tuning. + +5. **Collectors** (`Collectors`_): Collectors are used to collect data from the environment and store it in a format that can be used for training. This includes + :class:`~torchrl.collectors.llm.LLMCollector` for collecting data from the environment and :class:`~torchrl.collectors.llm.RayLLMCollector` for collecting + data in distributed settings using Ray. + +These components work together to create a complete pipeline: environments load and format data, LLM wrappers handle inference, data structures maintain +conversation context, and objectives compute training losses. The modular design allows you to mix and match components based on your specific use case. + +A complete example of how to use the LLM API can be found in the `sota-implementations/grpo/` directory. The training orchestration involves three main components: + +- The Data Collector: holds a reference to the environment and the inference model or engine. It collects data, puts it in the buffer, and handles weight updates. +- The Replay Buffer: stores the collected data and executes any pre or post-processing steps. These may include: + - Advantage estimation with Monte-Carlo based method (using the :class:`~torchrl.objectives.llm.MCAdvantage` transform); + - Grading of the outputs; + - Logging etc. +- The trainer: handles the training loop, including the optimization step, serialization, logging and weight updates initialization. + +.. warning:: The LLM API is still under development and may change in the future. Feedback, issues and PRs are welcome! + +Data Structures +--------------- + +The data representation layer provides the foundation for handling conversations and LLM outputs in a structured way. + +History Class +~~~~~~~~~~~~~ + +The :class:`~torchrl.data.llm.History` class is a TensorClass version of the chat format usually found in transformers +(see `Hugging Face chat documentation `_). +It provides a comprehensive API for managing conversation data with features including: + +- **Text parsing and formatting**: Convert between text and structured conversation format using :meth:`~torchrl.data.llm.chat.History.from_text` + and :meth:`~torchrl.data.llm.chat.History.apply_chat_template` +- **Dynamic conversation building**: Append and extend conversations with :meth:`~torchrl.data.llm.chat.History.append` and + :meth:`~torchrl.data.llm.chat.History.extend` methods +- **Multi-model support**: Automatic template detection for various model families (Qwen, DialoGPT, Falcon, DeepSeek, etc.) +- **Assistant token masking**: Identify which tokens were generated by the assistant for reinforcement learning applications +- **Tool calling support**: Handle function calls and tool responses in conversations +- **Batch operations**: Efficient tensor operations for processing multiple conversations simultaneously. + +.. currentmodule:: torchrl.data.llm + +.. autosummary:: + :toctree: generated/ + :template: rl_template.rst + + History + ContentBase + LLMData + +Supported Model Families +^^^^^^^^^^^^^^^^^^^^^^^^ + +We currently support the following model families for string to History parsing or assistant token masking: + +- **Qwen family** (e.g., `Qwen/Qwen2.5-0.5B`): Custom template with full tool calling support +- **DialoGPT family** (e.g., `microsoft/DialoGPT-medium`): Custom template for conversation format +- **Falcon family** (e.g., `tiiuae/falcon-7b-instruct`): Custom template for instruction format +- **DeepSeek family** (e.g., `deepseek-ai/deepseek-coder-6.7b-base`): Custom template with native format + +Other models are supported, but you will need to provide a custom template for them. +LLAMA, Mistral, OPT, GPT, MPT, BLOOM, Pythia, Phi, etc. will use the default `chatml_format` template. + +Usage +^^^^^ + +.. code-block:: python + + >>> from torchrl.data.llm.chat import History + >>> from transformers import AutoTokenizer + >>> + >>> # Create a conversation history + >>> history = History.from_chats([[ + ... {"role": "user", "content": "Hello"}, + ... {"role": "assistant", "content": "Hi there!"}, + ... {"role": "user", "content": "How are you?"}, + ... {"role": "assistant", "content": "I'm doing well, thanks!"} + ... ]]) + >>> + >>> # Load any supported tokenizer + >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") + >>> + >>> # Apply chat template with assistant token masking + >>> result = history.apply_chat_template( + ... chat_template_name="qwen", + ... add_generation_prompt=False, + ... return_dict=True, + ... return_assistant_tokens_mask=True, + ... ) + >>> + >>> # The result contains an assistant_masks tensor + >>> assistant_masks = result["assistant_masks"] + >>> print(f"Assistant tokens: {assistant_masks.sum().item()}") + +Adding Custom Templates +^^^^^^^^^^^^^^^^^^^^^^^ + +You can add custom chat templates for new model families using the :func:`torchrl.data.llm.chat.add_chat_template` function. + +.. autofunction:: torchrl.data.llm.chat.add_chat_template + +Usage Examples +^^^^^^^^^^^^^^ + +Adding a Llama Template +""""""""""""""""""""""" + +.. code-block:: python + + >>> from torchrl.data.llm.chat import add_chat_template, History + >>> from transformers import AutoTokenizer + >>> + >>> # Define the Llama chat template + >>> llama_template = ''' + ... {% for message in messages %} + ... {%- if message['role'] == 'user' %} + ... {{ '[INST] ' + message['content'] + ' [/INST]' }} + ... {%- elif message['role'] == 'assistant' %} + ... {% generation %}{{ message['content'] + '' }}{% endgeneration %} + ... {%- endif %} + ... {% endfor %} + ... {%- if add_generation_prompt %} + ... {% generation %}{{ ' ' }}{% endgeneration %} + ... {%- endif %} + ... ''' + >>> + >>> # Define the inverse parser for Llama format + >>> def parse_llama_text(text: str) -> History: + ... import re + ... pattern = r'\[INST\]\s*(.*?)\s*\[/INST\]\s*(.*?)' + ... matches = re.findall(pattern, text, re.DOTALL) + ... messages = [] + ... for user_content, assistant_content in matches: + ... messages.append(History(role="user", content=user_content.strip())) + ... messages.append(History(role="assistant", content=assistant_content.strip())) + ... return lazy_stack(messages) + >>> + >>> # Add the template with auto-detection + >>> add_chat_template( + ... template_name="llama", + ... template=llama_template, + ... inverse_parser=parse_llama_text, + ... model_family_keywords=["llama", "meta-llama"] + ... ) + >>> + >>> # Now you can use it with auto-detection + >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") + >>> history = History.from_chats([[ + ... {"role": "user", "content": "Hello"}, + ... {"role": "assistant", "content": "Hi there!"} + ... ]]) + >>> + >>> # Auto-detection will use the llama template + >>> result = history.apply_chat_template( + ... tokenizer=tokenizer, + ... add_generation_prompt=False, + ... return_dict=True, + ... return_assistant_tokens_mask=True, + ... ) + +Testing Your Custom Templates +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When adding custom templates, you should test them to ensure they work correctly. Here are the recommended tests: + +Assistant Token Masking Test +"""""""""""""""""""""""""""" + +Test that your template supports assistant token masking: + +.. code-block:: python + + import pytest + from torchrl.data.llm.chat import History, add_chat_template + from transformers import AutoTokenizer + + def test_my_model_assistant_masking(): + """Test that your model supports assistant token masking.""" + # Add your template first + add_chat_template( + template_name="my_model", + template="your_template_here", + model_family_keywords=["my_model"] + ) + + tokenizer = AutoTokenizer.from_pretrained("your/model/name") + history = History.from_chats([[ + {'role': 'user', 'content': 'Hello'}, + {'role': 'assistant', 'content': 'Hi there!'} + ]]) + + result = history.apply_chat_template( + tokenizer=tokenizer, + chat_template_name="my_model", + add_generation_prompt=False, + return_dict=True, + return_assistant_tokens_mask=True, + ) + + # Verify assistant mask is present + assert 'assistant_masks' in result + assert result['assistant_masks'].shape[0] == 1, "Should have batch dimension of 1" + assert result['assistant_masks'].shape[1] > 0, "Should have sequence length > 0" + + # Verify some assistant tokens are masked + assistant_token_count = result['assistant_masks'].sum().item() + assert assistant_token_count > 0, "Should have assistant tokens masked" + print(f"✓ {assistant_token_count} assistant tokens masked") + +Template Equivalence Test +""""""""""""""""""""""""" + +Test that your custom template produces the same output as the model's default template (except for masking): + +.. code-block:: python + + def test_my_model_template_equivalence(): + """Test that your template matches the model's default template.""" + tokenizer = AutoTokenizer.from_pretrained("your/model/name") + history = History.from_chats([[ + {'role': 'user', 'content': 'Hello'}, + {'role': 'assistant', 'content': 'Hi there!'}, + {'role': 'user', 'content': 'How are you?'}, + {'role': 'assistant', 'content': 'I\'m good, thanks!'}, + ]]) + + # Get output with model's default template + try: + default_out = history.apply_chat_template( + tokenizer=tokenizer, + add_generation_prompt=False, + chat_template=tokenizer.chat_template, + tokenize=False, + ) + except Exception as e: + default_out = None + print(f"[WARN] Could not get default template: {e}") + + # Get output with your custom template + custom_out = history.apply_chat_template( + tokenizer=tokenizer, + add_generation_prompt=False, + chat_template_name="my_model", + tokenize=False, + ) + + if default_out is not None: + # Normalize whitespace for comparison + import re + def norm(s): + return re.sub(r"\s+", " ", s.strip()) + + assert norm(default_out) == norm(custom_out), ( + f"Custom template does not match default!\n" + f"Default: {default_out}\nCustom: {custom_out}" + ) + print("✓ Template equivalence verified") + else: + print("[INFO] Skipped equivalence check (no default template available)") + +Inverse Parsing Test +"""""""""""""""""""" + +If you provided an inverse parser, test that it works correctly: + +.. code-block:: python + + def test_my_model_inverse_parsing(): + """Test that your inverse parser works correctly.""" + history = History.from_chats([[ + {'role': 'user', 'content': 'Hello'}, + {'role': 'assistant', 'content': 'Hi there!'} + ]]) + + # Format using your template + formatted = history.apply_chat_template( + tokenizer=tokenizer, + chat_template_name="my_model", + add_generation_prompt=False, + tokenize=False, + ) + + # Parse back using your inverse parser + parsed = History.from_text(formatted, chat_template_name="my_model") + + # Verify the parsing worked + assert parsed.role == history.role + assert parsed.content == history.content + print("✓ Inverse parsing verified") + +LLM Wrapper API +~~~~~~~~~~~~~~~ + +The LLM wrapper API provides unified interfaces for different LLM backends, ensuring consistent input/output formats across training and inference pipelines. The main wrappers are :class:`~torchrl.modules.llm.TransformersWrapper` for Hugging Face models and :class:`~torchrl.modules.llm.vLLMWrapper` for vLLM inference. + +**Data Structure Classes** + +The wrappers use structured :class:`~tensordict.TensorClass` objects to represent different aspects of LLM data: + +- **:class:`~torchrl.modules.llm.policies.Text`**: Contains text data with `prompt`, `response`, and `full` fields +- **:class:`~torchrl.modules.llm.policies.ChatHistory`**: Contains :class:`~torchrl.data.llm.History` objects with `prompt`, `response`, and `full` fields +- **:class:`~torchrl.modules.llm.policies.Tokens`**: Contains tokenized data with `prompt`, `response`, and `full` fields +- **:class:`~torchrl.modules.llm.policies.LogProbs`**: Contains log probabilities with `prompt`, `response`, and `full` fields +- **:class:`~torchrl.modules.llm.policies.Masks`**: Contains attention and assistant masks + +**API Flow** + +The wrappers operate in two distinct modes: + +**Generation Mode (`generate=True`)**: +- **Input**: Reads from `prompt` fields (e.g., `history.prompt`, `text.prompt`, `tokens.prompt`) +- **Output**: Writes to both `response` and `full` fields + - `response`: Contains only the newly generated content + - `full`: Contains the complete sequence (prompt + response) + +**Log-Probability Mode (`generate=False`)**: +- **Input**: Reads from `full` fields (e.g., `history.full`, `text.full`, `tokens.full`) +- **Output**: Writes log probabilities to the corresponding `full` fields + +**LLM-Environment Interaction Loop** + +.. figure:: /_static/img/llm-env.png + :alt: LLM-Environment interaction loop + :align: center + :width: 80% + + LLM-Environment interaction: the LLM generates a response, the environment updates the conversation, and transforms can inject new messages or tools. + +In a typical RL or tool-augmented setting, the LLM and environment interact in a loop: + +1. **LLM Generation**: The LLM wrapper receives a `prompt` (the current conversation history), generates a `response`, and outputs a `full` field + containing the concatenation of the prompt and response. +2. **Environment Step**: The environment takes the `full` field and makes it the next `prompt` for the LLM. This ensures that the conversation + context grows with each turn. See :ref:`ref_env_llm_step` for more details. +3. **Transforms**: Before the next LLM step, transforms can modify the conversation—for example, by inserting a new user message, a tool call, + or a reward annotation. +4. **Repeat**: This process repeats for as many turns as needed, enabling multi-turn dialogue, tool use, and RL training. + +This design allows for flexible augmentation of the conversation at each step, supporting advanced RL and tool-use scenarios. + +A typical pseudocode loop: + +.. code-block:: python + + # Get the first prompt out of an initial query + obs = env.reset(TensorDict({"query": "Hello!"}, batch_size=env.batch_size, device=env.device)) + while not done: + # LLM generates a response given the current prompt + llm_output = llm(obs) + # Environment steps: creates a ("next", "history") field with the new prompt (from the previous `"full"` field) + obs = env.step(llm_output) + +**Integration with History** + +When using `input_mode="history"`, the wrapper integrates seamlessly with the :class:`~torchrl.data.llm.History` class: + +- **Input**: Takes a :class:`~torchrl.modules.llm.policies.ChatHistory` object containing a History in the `prompt` field +- **Generation**: Applies chat templates to convert History to tokens, generates response, then parses the full text back into a History object +- **Output**: Returns a ChatHistory with: + - `prompt`: Original conversation history + - `response`: New History object containing only the assistant's response + - `full`: Complete conversation history with the new response appended + +This design allows for natural conversation flow where each generation step extends the conversation history, making it ideal for multi-turn +dialogue systems. + + +Prompt vs. Response and padding +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. figure:: /_static/img/llm-data.svg + :alt: LLM output data format (Tokens, Masks, Padded vs. Sparse) + :align: center + :width: 80% + + Structure of LLM outputs: padded vs. sparse representations for Tokens, LogProbs, and Masks. + +The diagram above illustrates the structure of the main output classes used in TorchRL's LLM API: + +- **Tokens** (and by extension, **LogProbs**): + - *Padded* format: All sequences in a batch are padded to the same length (with a special pad token), making them suitable for tensor operations. The prompt and response are concatenated to form `tokens.full`, and masks indicate valid vs. padded positions. + - *Sparse* format: Each sequence retains its original length (no padding), represented as lists of tensors. This is more memory-efficient for variable-length data. +- **Masks**: Two main masks are shown: + - `mask.attention_mask_all` marks valid (non-pad) tokens. + - `mask.assistant_mask_all` marks which tokens were generated by the assistant (useful for RLHF and SFT training). +- **Text**: Not shown in detail, as it is simply the decoded string representation of the prompt, response, or full sequence. + +This format ensures that all LLM outputs (Tokens, LogProbs, Masks, Text) are consistent and easy to manipulate, regardless of whether you use padded or sparse batching. + +In general, we recommend working with unpadded data, as it is more memory-efficient and easier to manipulate. +For instance, when collecting multiple padded elements from the buffer, it may be hard to clearly understand how to re-pad them +to combine them in a cohesive batch. Working with unpadded data is more straightforward. + +Modules +------- + +The LLM wrapper API provides unified interfaces for different LLM backends, ensuring consistent input/output formats across training and inference pipelines. + +Wrappers +~~~~~~~~ + +The main goal of these primitives is to: + +- Unify the input/output data format across training and inference pipelines +- Unify the input/output data format across backends (to be able to use different backends across losses and collectors) +- Provide appropriate tooling to construct these objects in typical RL settings (resource allocation, async execution, weight update, etc.) + +.. currentmodule:: torchrl.modules.llm + +.. autosummary:: + :toctree: generated/ + :template: rl_template.rst + + LLMWrapperBase + TransformersWrapper + vLLMWrapper + ChatHistory + Text + LogProbs + Masks + Tokens + +Utils +^^^^^ + +.. currentmodule:: torchrl.modules.llm + +.. autosummary:: + :toctree: generated/ + :template: rl_template.rst + + LLMOnDevice + make_vllm_worker + stateless_init_process_group + vLLMWorker Collectors ---------- -TorchRL offers specialized collector classes (:class:`~torchrl.collectors.llm.LLMCollector` and :class:`~torchrl.collectors.llm.RayLLMCollector`) that are tailored for LLM -use cases. We also provide dedicated updaters for some inference engines. +.. _Collectors: + +TorchRL offers specialized collector classes (:class:`~torchrl.collectors.llm.LLMCollector` and :class:`~torchrl.collectors.llm.RayLLMCollector`) +that are tailored for LLM use cases. We also provide dedicated updaters for some inference engines. + +See :ref:`ref_collectors` for more details on the collector API. In brief, the idea of a collector is to isolate the inference part of the pipeline +in a dedicated class. +A collector usually takes as input a policy and an environment, and alternate between running one and the other. +In "classical" settings, the policy is similar to the policy being trained (with some optional extra-exploration). In the context of LLM fine-tuning, +the policy will usually be a specialized inference engine, such as a vLLM server. +Collectors are defined by the following parameters and features: + +- **Sync/Async**: Whether the collector should run in sync or async mode. + In sync mode, the collector will run the inference step in alternate with the optimization/training step. + In async mode, the collector will run the inference step in parallel with the optimization/training step. + A replay buffer can be passed to the collector, in such a way that the collector can directly write to it. + In other cases, the collector can be iterated over to collect data. +- **Steps**: A collector is built with a certain number of steps budget, as well as a number of steps to be + included in each batch yield during collection. +- **Weight Updater**: Weight updaters are the classes that update the policy weights. Isolating the weight update + in a dedicated class allows to easily implement different weight update strategies depending on the policy specification. + +Policy Version Tracking +~~~~~~~~~~~~~~~~~~~~~~~ -LLM Collectors allow to track the version of the policy, which is useful for some use cases. +LLM Collectors also allow to track the version of the policy, which is useful for some use cases. This is done by adding a :class:`~torchrl.envs.llm.transforms.PolicyVersion` transform to the environment, which is then incremented by the collector after each weight update. To do this, one either provides the stateful version of the transform, or a boolean to the collector constructor. @@ -43,64 +515,70 @@ transform, or a boolean to the collector constructor. LLMCollector RayLLMCollector +Environments +------------ -Data structures ---------------- +The environment layer orchestrates data loading, tool execution, reward computation, and formatting. When fine-tuning an LLM using TorchRL, the environment is a +crucial component of the inference pipeline, alongside the policy and collector. -To handle text-based data structures (such as conversations etc.), we offer a few data structures dedicated to carrying -data for LLM post-training. +ChatEnv +~~~~~~~ -.. currentmodule:: torchrl.data.llm +:class:`~torchrl.envs.llm.ChatEnv` serves as a blank canvas for LLM environments - it's a basic tool designed to be extended with transforms that add +specific functionality. The base ChatEnv provides the fundamental structure for managing conversation state using the +:class:`~torchrl.data.llm.History` format, but it's intentionally minimal to allow maximum flexibility. -.. autosummary:: - :toctree: generated/ - :template: rl_template.rst +Core Functionality +^^^^^^^^^^^^^^^^^^ - History - ContentBase - LLMData +ChatEnv operates in three main modes: +- **History mode**: Uses :class:`~torchrl.data.llm.History` objects for conversation management +- **Text mode**: Uses simple text strings for input/output +- **Tokens mode**: Uses tokenized data for input/output -Environments ------------- +The environment maintains conversation state by: +- **Reset**: Initializes a new conversation with an optional system prompt +- **Step**: Takes the LLM's response and updates the conversation history, preparing the next prompt -When fine-tuning an LLM using TorchRL, the environment is a crucial component of the inference pipeline, alongside the -policy and collector. Environments manage operations that are not handled by the LLM itself, such as interacting with -tools, loading prompts from datasets, computing rewards (when necessary), and formatting data. +Transform-Based Architecture +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Therefore, the fundamental structure of an LLM post-training pipeline is: +Transforms are the main way to extend ChatEnv with specific capabilities: -- A policy that wraps the LLM and the LLM only -- An environment that handles the world around the LLM: - - Loading data (through :class:`~torchrl.envs.llm.transforms.DataLoadingPrimer`) - - Formatting data (through :class:`~torchrl.envs.llm.transforms.TemplateTransform`) - - Executing tools (through :class:`~torchrl.envs.llm.transforms.PythonInterpreter` or :class:`~torchrl.envs.llm.transforms.MCPToolTransform`) - - Computing rewards online, if needed (through :class:`~torchrl.envs.llm.transforms.KLRewardTransform`) -- A data collector that takes the policy (the LLM) and the environment, and handles the inference part of the pipeline: - - Running reset, step and gathering actions; - - Yielding the data in a consistent format - or populating a buffer; - - Updating the policy weights (through :class:`~torchrl.collectors.WeightUpdaterBase` classes) -- A replay buffer that stores the data collected using the collector -- A loss that takes the LLM's output and returns a loss (through :class:`~torchrl.objectives.llm.GRPOLoss` for example) +- **Reward computation**: :class:`~torchrl.envs.llm.transforms.KLRewardTransform` for KL divergence rewards +- **Tool execution**: :class:`~torchrl.envs.llm.transforms.PythonInterpreter` for Python code + execution, :class:`~torchrl.envs.llm.transforms.MCPToolTransform` for general tool calling. +- **Data loading**: :class:`~torchrl.envs.llm.transforms.DataLoadingPrimer` for loading prompts from datasets +- **Thinking prompts**: :class:`~torchrl.envs.llm.transforms.AddThinkingPrompt` for chain-of-thought reasoning +- **Policy tracking**: :class:`~torchrl.envs.llm.transforms.PolicyVersion` for version control +- **Step counting**: Built-in step tracking and reset management using :class:`~torchrl.envs.transforms.StepCounter`. -These elements are presented in the GRPO scripts in the `sota-implementations/llm` directory. +Integration with LLM Wrappers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The design of environments in TorchRL allows for flexibility and modularity. By framing tasks as environments, users can -easily extend or modify existing environments using transforms. This approach enables the isolation of individual -components within specific :class:`~torchrl.envs.EnvBase` or :class:`~torchrl.envs.Transform` subclasses, making it -simpler to augment or alter the environment logic. +.. _ref_env_llm_step: -Available Environment Classes and Utilities -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +ChatEnv is designed to work seamlessly with both :class:`~torchrl.modules.llm.TransformersWrapper` and :class:`~torchrl.modules.llm.vLLMWrapper`. +The environment handles the conversation state management while the wrapper handles the actual LLM inference, creating a clean separation of concerns. -TorchRL provides various environment classes and utilities for working with LLMs, including: +On each call to `step`, the environment: + +- Takes the LLM's output, specifically the `full` field, which contains the entire conversation so far, including the new response (e.g., `history.full`, `text.full`, `tokens.full`). +- Sets this `full` field as the new `prompt` for the next LLM step (e.g., `td["next", "history"].prompt`, `td["next", "text"].prompt`, `td["next", "tokens"].prompt`). +- Optionally, applies transforms to insert new user messages, tool calls, or other modifications to the conversation before the next LLM step to refine the prompt. + +This mechanism enables seamless multi-turn interactions and supports complex workflows such as tool use and reward shaping. + +Task-Specific Environments +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We provide a few task-specific environments, such as :class:`~torchrl.envs.llm.GSM8KEnv` for the GSM8K dataset, +:class:`~torchrl.envs.llm.IFEvalEnv` for the IFEval dataset, and :class:`~torchrl.envs.llm.MLGymEnv` for MLGym integration. + +These environments wrap a :class:`~torchrl.envs.llm.ChatEnv` and add a :class:`~torchrl.envs.llm.transforms.DataLoadingPrimer` transform +(plus an optional reward parsing transform) in a :class:`~torchrl.envs.TransformedEnv` class. -- Various environment classes (:class:`~torchrl.envs.llm.ChatEnv`, :class:`~torchrl.envs.llm.DatasetChatEnv`, - :class:`~torchrl.envs.llm.GSM8KEnv`, etc.) -- Utility functions (:class:`~torchrl.envs.make_gsm8k_env`, :class:`~torchrl.envs.make_mlgym`, etc.) -- Transforms and other supporting classes (:class:`~torchrl.envs.KLRewardTransform`, - :class:`~torchrl.envs.TemplateTransform`, :class:`~torchrl.envs.Tokenizer`, etc.) -These components can be used to create customized environments tailored to specific use cases and requirements. .. currentmodule:: torchrl.envs.llm @@ -192,73 +670,173 @@ Similarly, environments that load data from a dataset are just special instances augmented with a :class:`~torchrl.envs.llm.transforms.DataLoadingPrimer` transforms (and some dedicated reward parsing transforms). -.. currentmodule:: torchrl.envs.llm.transforms +Designing Reward Transforms +^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autosummary:: - :toctree: generated/ - :template: rl_template.rst +When designing reward transforms for LLM environments, several key considerations must be +addressed to ensure proper integration with the training pipeline. +The examples of :class:`~torchrl.envs.llm.GSM8KRewardParser` and +:class:`~torchrl.envs.llm.IfEvalScorer` provide excellent templates for reward transform design. - DataLoadingPrimer - KLRewardTransform - RetrieveLogProb - MCPToolTransform - BrowserTransform - PythonInterpreter - PolicyVersion - TemplateTransform - Tokenizer - as_nested_tensor - as_padded_tensor +**Reward Shape Requirements** -Modules -------- +The reward tensor must have the same number of dimensions as the logits, which is typically +two more dimensions than the environment batch size: -The :ref:`~torchrl.modules.llm` section provides a set of wrappers and utility functions for popular training and -inference backends. The main goal of these primitives is to: +- **Sparse rewards**: Shape ``(*bsz, 1, 1)`` - single reward per sequence +- **Dense rewards**: Shape ``(*bsz, num_tokens, 1)`` - per-token rewards -- Unify the input / output data format across training and inference pipelines; -- Unify the input / output data format across backends (to be able to use different backends across losses and - collectors, for instance) -- Give appropriate tooling to construct these objects in typical RL settings (resource allocation, async execution, - weight update, etc.) +This shape requirement ensures compatibility with the loss computation pipeline. +For example, in the GSM8K reward parser: -Wrappers -~~~~~~~~ +.. code-block:: python -.. currentmodule:: torchrl.modules.llm + # Rewards need to have shape broadcastable to [batch x tokens x 1] + tds = tds.apply(lambda t: t.unsqueeze(-1).unsqueeze(-1)) -.. autosummary:: - :toctree: generated/ - :template: rl_template.rst +**Done State Management** - TransformersWrapper - vLLMWrapper +It is crucial to properly manage the done state to prevent endless generation. Common strategies include: -Utils -~~~~~ +1. **Completion-based termination**: Set done when the response is complete (e.g., ``History.complete=True``) +2. **Content-based termination**: Set done when specific content is detected (e.g., ```` blocks) +3. **Step-based termination**: Use :class:`~torchrl.envs.transforms.StepCounter` for predetermined step limits -.. currentmodule:: torchrl.modules.llm +Example from IFEvalScorer: + +.. code-block:: python + + if self.set_done_if_answer and bool(answer_blocks): + next_tensordict.set("done", torch.ones(...)) + next_tensordict.set("terminated", torch.ones(...)) + +**Input Mode Handling** + +Reward transforms must handle different input modes correctly: + +- **History mode**: Extract text from ``("history", "full")`` or ``("history", "response")`` +- **Text mode**: Use text directly from ``("text", "full")`` or ``("text", "response")`` +- **Tokens mode**: Decode tokens from ``("tokens", "full")`` or ``("tokens", "response")`` + +The GSM8K reward parser demonstrates this pattern: + +.. code-block:: python + + if input_mode == "history": + responses = lazy_stack([r[..., -1] for r in responses.unbind(0)]) + if hasattr(responses, "content"): + text_completion = responses.content + elif input_mode == "text": + text_completion = responses + elif input_mode == "tokens": + text_completion = self.tokenizer.decode(responses.flatten(0, 1).tolist()) + +**Specification Management** + +Accurate specification of reward and observation specs is essential for proper environment initialization. Both GSM8K and IFEval provide good examples: + +.. code-block:: python + + def transform_reward_spec(self, reward_spec: Composite) -> Composite: + shape = reward_spec.shape + (1, 1) + reward_spec.update( + Composite( + reward_answer=Unbounded(shape), + reward_think=Unbounded(shape), + reward_right=Unbounded(shape), + reward_contained=Unbounded(shape), + reward=Unbounded(shape), + success=Unbounded(shape, dtype=torch.bool), + ) + ) + return reward_spec + +**Batch Processing Considerations** + +For efficient processing, handle batched data appropriately: + +1. **Flatten batch dimensions**: Use ``tensordict.view(-1)`` for processing +2. **Reshape results**: Restore original batch structure after processing +3. **Handle variable-length sequences**: Use proper padding and masking + +**Reward Aggregation Strategies** + +Consider different reward aggregation approaches: + +1. **Simple aggregation**: Sum or average multiple reward components +2. **Weighted aggregation**: Apply different weights to different components +3. **Conditional rewards**: Base rewards on specific conditions or thresholds + +The IFEvalScorer demonstrates a sophisticated aggregation strategy: + +.. code-block:: python + + def default_reward_aggregator(self, score: IFEvalScoreData, ...): + # Format score (max 1.0) + format_score = (format_components * weights).sum(dim=-1, keepdim=True) + + # Structure score (max 1.0) + structure_score = think_score + answer_score + + # Completion bonus (max 0.2) + completion_bonus = float(complete) * 0.2 + + return format_score + structure_score + completion_bonus + +**Post-Processing in Replay Buffers** + +Rewards can also be computed after the fact by appending transforms to the replay buffer. However, done state capture must remain in the environment transform since it needs to occur on-the-fly during data collection. + +**Error Handling and Robustness** + +Implement robust error handling for parsing failures: + +.. code-block:: python + + try: + cot, potential_answer = self.extract_tags(compl) + except ET.ParseError: + cot, potential_answer = ("", "") + +**Performance Considerations** + +1. **Avoid redundant computations**: Cache parsed results when possible +2. **Use efficient text processing**: Leverage regex or XML parsing as appropriate +3. **Minimize memory allocations**: Reuse tensors and avoid unnecessary copies + +By following these design principles, reward transforms can be effectively integrated into the LLM training pipeline while maintaining performance and reliability. + +.. currentmodule:: torchrl.envs.llm.transforms .. autosummary:: :toctree: generated/ :template: rl_template.rst - CategoricalSequential - LLMOnDevice - make_vllm_worker - stateless_init_process_group - vLLMWorker + AddThinkingPrompt + BrowserTransform + DataLoadingPrimer + KLComputation + KLRewardTransform + MCPToolTransform + PolicyVersion + PythonInterpreter + RetrieveKL + RetrieveLogProb + TemplateTransform + Tokenizer + as_nested_tensor + as_padded_tensor Objectives ---------- -LLM post training require some appropriate versions of the losses implemented in TorchRL. +LLM post-training requires specialized loss functions that are adapted to the unique characteristics of language models. GRPO ~~~~ The :class:`~torchrl.objectives.llm.GRPOLoss` class is a thin wrapper around the :class:`~torchrl.objectives.PPOLoss` class -that codes the LLM-specific functionnalities. +that codes the LLM-specific functionalities. .. currentmodule:: torchrl.objectives.llm @@ -270,9 +848,8 @@ that codes the LLM-specific functionnalities. GRPOLossOutput MCAdvantage - SFT -~~~ +^^^ .. currentmodule:: torchrl.objectives.llm diff --git a/sota-implementations/expert-iteration/ei_utils.py b/sota-implementations/expert-iteration/ei_utils.py index 179ec4d8aa2..ec061b5f318 100644 --- a/sota-implementations/expert-iteration/ei_utils.py +++ b/sota-implementations/expert-iteration/ei_utils.py @@ -104,7 +104,7 @@ def get_train_model( param.data = param.data.to(model_dtype) if chat_template_name is not None: - from torchrl.data.llm.chat import _CHAT_TEMPLATES + from torchrl.data.llm.history import _CHAT_TEMPLATES chat_template = _CHAT_TEMPLATES[chat_template_name] train_tokenizer.chat_template = chat_template diff --git a/sota-implementations/expert-iteration/expert-iteration-async.py b/sota-implementations/expert-iteration/expert-iteration-async.py index e8506a85c99..5cb62b319ba 100644 --- a/sota-implementations/expert-iteration/expert-iteration-async.py +++ b/sota-implementations/expert-iteration/expert-iteration-async.py @@ -15,7 +15,7 @@ from torchrl import torchrl_logger from torchrl.collectors.llm.weight_update.vllm import vLLMUpdater -from torchrl.data.llm.chat import History +from torchrl.data.llm.history import History from torchrl.record.loggers.wandb import WandbLogger try: diff --git a/sota-implementations/expert-iteration/expert-iteration-sync.py b/sota-implementations/expert-iteration/expert-iteration-sync.py index 556074f3469..34565228754 100644 --- a/sota-implementations/expert-iteration/expert-iteration-sync.py +++ b/sota-implementations/expert-iteration/expert-iteration-sync.py @@ -15,7 +15,7 @@ from torchrl import torchrl_logger from torchrl.collectors.llm.weight_update.vllm import vLLMUpdater -from torchrl.data.llm.chat import History +from torchrl.data.llm.history import History from torchrl.record.loggers.wandb import WandbLogger try: diff --git a/sota-implementations/grpo/config/grpo_gsm8k.yaml b/sota-implementations/grpo/config/grpo_gsm8k.yaml index fb8d10e148b..3bd2a235ddf 100644 --- a/sota-implementations/grpo/config/grpo_gsm8k.yaml +++ b/sota-implementations/grpo/config/grpo_gsm8k.yaml @@ -13,6 +13,10 @@ env: num_envs: 8 # Reduced from 8 to save memory # Number of times to repeat the same prompt for GRPO. This does not affect the GPU memory usage. repeats: 16 + # Whether to use the reasoning prompt + reasoning: false + # Maximum number of dialog turns per episode. + max_steps: 2 # Base model configuration model: diff --git a/sota-implementations/grpo/config/grpo_ifeval.yaml b/sota-implementations/grpo/config/grpo_ifeval.yaml index 5916dc45168..adf35c4477d 100644 --- a/sota-implementations/grpo/config/grpo_ifeval.yaml +++ b/sota-implementations/grpo/config/grpo_ifeval.yaml @@ -13,6 +13,10 @@ env: num_envs: 4 # Number of times to repeat the same prompt for GRPO. This does not affect the GPU memory usage. repeats: 16 + # Whether to use the reasoning prompt + reasoning: false + # Maximum number of dialog turns per episode. + max_steps: 2 # Base model configuration model: @@ -46,12 +50,12 @@ train: optim_batch_size: 2 # Whether to include the KL coefficient in the loss function. Alternatively, the KL ref-to-train will be added to the reward. - kl_coef_in_loss: false + kl_coef_in_loss: false # KL coefficients for the KL divergence to the reference and inference policies - kl_to_ref_coeff: 1e-1 - kl_to_inference_coeff: 1e-1 - entropy_coeff: 0.01 + kl_to_ref_coeff: 1.0 + kl_to_inference_coeff: 0.0 + entropy_coeff: 0.001 # Fields used only by grpo-async.py / grpo-sync.py logging_frequency: 1 # Log metrics every N steps - here at each optimization step diff --git a/sota-implementations/grpo/grpo-async.py b/sota-implementations/grpo/grpo-async.py index 04bdb6a95bd..70323f7836a 100644 --- a/sota-implementations/grpo/grpo-async.py +++ b/sota-implementations/grpo/grpo-async.py @@ -15,7 +15,7 @@ from torchrl import torchrl_logger from torchrl.collectors.llm.weight_update.vllm import vLLMUpdater -from torchrl.data.llm.chat import History +from torchrl.data.llm.history import History from torchrl.record.loggers.wandb import WandbLogger try: @@ -30,9 +30,8 @@ from grpo_utils import ( compute_device_allocation, get_inference_model, - get_ref_model, - get_tokenizer, get_train_model, + make_env, make_weight_updater, ) from omegaconf import DictConfig @@ -49,8 +48,6 @@ from torchrl.collectors.llm import RayLLMCollector from torchrl.data import LazyStackStorage, ReplayBuffer from torchrl.data.replay_buffers.ray_buffer import RayReplayBuffer -from torchrl.envs.llm import GSM8KEnv, KLRewardTransform -from torchrl.envs.llm.datasets.ifeval import IFEvalEnv from torchrl.objectives.llm.grpo import GRPOLoss, MCAdvantage @@ -72,51 +69,6 @@ def setup_environment() -> None: torch.cuda.set_device("cuda:0") -def make_env(cfg: DictConfig, devices: list[int] | None = None): - """Create the environment with proper device allocation. - - Args: - cfg: The configuration object - - Returns: - The configured environment - """ - # Create reference model with proper device allocation - # For the collector actor, we want inference_model devices first, then ref_model devices - train_tokenizer = get_tokenizer(cfg) - - # Create a new config with adjusted device assignments - ref_cfg = DictConfig(dict(cfg)) - ref_model = get_ref_model(ref_cfg, train_tokenizer, devices=devices) - - # Setup environment - if cfg.env.dataset == "gsm8k": - env = GSM8KEnv( - repeats=cfg.env.repeats, - tokenizer=train_tokenizer, - num_envs=cfg.env.num_envs, - ) - else: # ifeval - env = IFEvalEnv( - repeats=cfg.env.repeats, - tokenizer=train_tokenizer, - num_envs=cfg.env.num_envs, - ) - - # Pass device directly to KLRewardTransform - Since, for Ray, the local device is always 0 - # we can just use 0 here. - device = torch.device("cuda:0") - env = env.append_transform( - KLRewardTransform( - actor=ref_model, - coef=cfg.train.kl_to_ref_coeff, - add_to_reward=not cfg.train.kl_coef_in_loss, - device=device, - ) - ) - return env - - def train( replay_buffer: ReplayBuffer, cfg: DictConfig, @@ -145,8 +97,13 @@ def train( kl_to_ref_coeff=cfg.train.kl_to_ref_coeff if cfg.train.kl_coef_in_loss else 0.0, kl_to_inference_coeff=cfg.train.kl_to_inference_coeff, entropy_coeff=cfg.train.entropy_coeff, + # use prompt/response masking for regular training, and assistant masking for reasoning + masking_strategy="rlhf" if cfg.env.reasoning else "sft", device=train_device, ) + if cfg.env.reasoning: + # TODO: this is clunky, we should find a way to do this more naturally + loss_fn.set_keys(sample_log_prob=("next", "log_probs", "full")) if cfg.model.compile: loss_fn = torch.compile(loss_fn) @@ -228,7 +185,7 @@ def train( batch = replay_buffer.sample(cfg.train.optim_batch_size).to(train_device) # For logging purposes, we get the last element of the history # and convert it to a string - history: History = batch.view(-1)[0]["next", "history"] + history: History = batch.view(-1)[0]["history", "full"] history_str: list[str] | str = history.apply_chat_template( tokenizer=train_tokenizer ) @@ -281,9 +238,13 @@ def train( if (step % cfg.train.logging_frequency) == 0: with torch.no_grad(): rb_content = replay_buffer[:] + step_count = ( + rb_content.get(("next", "step_count")).view(-1).float().mean() + ) batch_policy_version = batch["next", "policy_version"].view(-1).min() batch_policy_age = collector.policy_version - batch_policy_version metrics = { + "step_count from buffer": float(step_count), "reward from buffer": float( torch.cat( rb_content.get(("next", "reward"), as_list=True) @@ -298,7 +259,9 @@ def train( torch.tensor( [ t.numel() - for t in rb_content.get("tokens_response", as_list=True) + for t in rb_content.get( + ("tokens", "response"), as_list=True + ) ], dtype=torch.float, ).mean() @@ -437,7 +400,8 @@ def main(cfg): train_handler_config = dict(cfg.ray.train_handler_config) inference_policy = get_inference_model( - cfg, devices=device_config["inference_model_devices"] + cfg, + devices=device_config["inference_model_devices"], ) torchrl_logger.info(f"Inference policy: {inference_policy}") @@ -474,6 +438,8 @@ def main(cfg): weight_updater=None, # We'll create this after getting the remote LLM track_policy_version=True, remote_config=collector_config, + yield_only_last_steps=cfg.env.reasoning, + verbose=False, ) # Ensure collector is initialized by calling a method that will block until ready ray.get(collector._collector.is_initialized.remote()) diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index 24ca3f3a367..b0f9a081c7f 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -14,7 +14,7 @@ from torchrl import torchrl_logger from torchrl.collectors.llm.weight_update.vllm import vLLMUpdater -from torchrl.data.llm.chat import History +from torchrl.data.llm.history import History from torchrl.record.loggers.wandb import WandbLogger try: @@ -31,9 +31,8 @@ from grpo_utils import ( compute_device_allocation, get_inference_model, - get_ref_model, - get_tokenizer, get_train_model, + make_env, make_weight_updater, ) from omegaconf import DictConfig @@ -50,8 +49,6 @@ from torchrl.collectors.llm import RayLLMCollector from torchrl.data import LazyStackStorage, ReplayBuffer, SamplerWithoutReplacement from torchrl.data.replay_buffers.ray_buffer import RayReplayBuffer -from torchrl.envs.llm import GSM8KEnv, KLRewardTransform -from torchrl.envs.llm.datasets.ifeval import IFEvalEnv from torchrl.objectives.llm.grpo import GRPOLoss, MCAdvantage @@ -73,51 +70,6 @@ def setup_environment() -> None: torch.cuda.set_device("cuda:0") -def make_env(cfg: DictConfig, devices: list[int] | None = None): - """Create the environment with proper device allocation. - - Args: - cfg: The configuration object - - Returns: - The configured environment - """ - # Create reference model with proper device allocation - # For the collector actor, we want inference_model devices first, then ref_model devices - train_tokenizer = get_tokenizer(cfg) - - # Create a new config with adjusted device assignments - ref_cfg = DictConfig(dict(cfg)) - ref_model = get_ref_model(ref_cfg, train_tokenizer, devices=devices) - - # Setup environment - if cfg.env.dataset == "gsm8k": - env = GSM8KEnv( - repeats=cfg.env.repeats, - tokenizer=train_tokenizer, - num_envs=cfg.env.num_envs, - ) - else: # ifeval - env = IFEvalEnv( - repeats=cfg.env.repeats, - tokenizer=train_tokenizer, - num_envs=cfg.env.num_envs, - ) - - # Pass device directly to KLRewardTransform - Since, for Ray, the local device is always 0 - # we can just use 0 here. - device = torch.device("cuda:0") - env = env.append_transform( - KLRewardTransform( - actor=ref_model, - coef=cfg.train.kl_to_ref_coeff, - add_to_reward=not cfg.train.kl_coef_in_loss, - device=device, - ) - ) - return env - - def train( replay_buffer: ReplayBuffer, cfg: DictConfig, @@ -146,6 +98,8 @@ def train( kl_to_ref_coeff=cfg.train.kl_to_ref_coeff if cfg.train.kl_coef_in_loss else 0.0, kl_to_inference_coeff=cfg.train.kl_to_inference_coeff, entropy_coeff=cfg.train.entropy_coeff, + # use prompt/response masking for regular training, and assistant masking for reasoning + masking_strategy="rlhf" if cfg.env.reasoning else "sft", device=train_device, ) if cfg.model.compile: @@ -228,7 +182,7 @@ def train( ) # For logging purposes, we get the last element of the history # and convert it to a string - history: History = batch.view(-1)[0]["next", "history"] + history: History = batch.view(-1)[0]["next", "history"].prompt history_str: list[str] | str = history.apply_chat_template( tokenizer=train_tokenizer ) @@ -291,6 +245,12 @@ def train( if (global_step % cfg.train.logging_frequency) == 0: with torch.no_grad(): rb_content = replay_buffer[:] + step_count = ( + rb_content.get(("next", "step_count")) + .view(-1) + .float() + .mean() + ) batch_policy_version = ( batch["next", "policy_version"].view(-1).min() ) @@ -298,6 +258,7 @@ def train( collector.policy_version - batch_policy_version ) metrics = { + "step_count from buffer": float(step_count), "reward from buffer": float( torch.cat( rb_content.get(("next", "reward"), as_list=True) @@ -494,7 +455,8 @@ def main(cfg): track_policy_version=True, remote_config=collector_config, sync_iter=cfg.train.sync_iter, - verbose=True, + verbose=False, + yield_only_last_steps=cfg.env.reasoning, ) # Ensure collector is initialized by calling a method that will block until ready ray.get(collector._collector.is_initialized.remote()) diff --git a/sota-implementations/grpo/grpo_utils.py b/sota-implementations/grpo/grpo_utils.py index 6a99dde7cf0..a0978c5dad9 100644 --- a/sota-implementations/grpo/grpo_utils.py +++ b/sota-implementations/grpo/grpo_utils.py @@ -12,6 +12,8 @@ from torchrl._utils import logger as torchrl_logger from torchrl.collectors.llm.weight_update.vllm import vLLMUpdater +from torchrl.envs.llm import AddThinkingPrompt, GSM8KEnv, KLRewardTransform, RetrieveKL +from torchrl.envs.llm.datasets.ifeval import IFEvalEnv from torchrl.modules.llm import TransformersWrapper, vLLMWrapper from transformers.models.auto.modeling_auto import AutoModelForCausalLM from transformers.tokenization_utils import PreTrainedTokenizer @@ -93,9 +95,11 @@ def get_train_model( policy_training = TransformersWrapper( train_model, tokenizer=train_tokenizer, - from_text=False, + input_mode="tokens" if not cfg.env.reasoning else "history", generate=False, return_log_probs=True, + pad_output=False, + device=torch.device("cuda:0"), ) # Ensure model stays in eval mode after wrapping policy_training.model.eval() @@ -104,7 +108,10 @@ def get_train_model( def get_inference_model( - cfg: DictConfig, devices: list[int] | None = None, make_ray_worker: bool = True + cfg: DictConfig, + devices: list[int] | None = None, + make_ray_worker: bool = True, + tokenizer: PreTrainedTokenizer | None = None, ) -> vLLMWrapper: """Creates the vLLM-based inference model for fast generation. @@ -116,7 +123,9 @@ def get_inference_model( cfg (DictConfig): The hydra configuration object containing model settings. Expected to have inference_model section with vLLM-specific parameters like gpu_memory_utilization and generation settings. - make_ray_worker (bool, optional): Whether to make a ray worker. Default: True + devices (list[int], optional): The devices to use for the inference model. Default: `None`. + make_ray_worker (bool, optional): Whether to make a ray worker. Default: `True`. + tokenizer (PreTrainedTokenizer, optional): The tokenizer to use with the inference model. Default: `None`. Returns: vLLMWrapper: The wrapped vLLM model ready for inference. @@ -149,10 +158,20 @@ def get_inference_model( enforce_eager=cfg.inference_model.enforce_eager, ) assert inference_server is not None + if tokenizer is None: + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained(model_name) + if tokenizer.pad_token == tokenizer.eos_token: + tokenizer.pad_token = "PAD" + tokenizer.padding_side = "left" policy = vLLMWrapper( inference_server, - from_text=True, - return_log_probs=True, + input_mode="history", + chat_template_name="qwen", + return_log_probs=not cfg.env.reasoning, + tokenizer=tokenizer, + pad_output=False, generate_kwargs={ "max_tokens": cfg.inference_model.max_tokens, "include_stop_str_in_output": cfg.inference_model.include_stop_str_in_output, @@ -164,7 +183,9 @@ def get_inference_model( def get_ref_model( - cfg: DictConfig, tokenizer: PreTrainedTokenizer, devices: list[int] | None = None + cfg: DictConfig, + tokenizer: PreTrainedTokenizer, + devices: list[int] | None = None, ) -> TransformersWrapper: """Creates the reference model for KL penalty computation. @@ -218,10 +239,12 @@ def get_ref_model( TensorDict.from_module(ref_model).data.to_module(ref_model) ref_model = TransformersWrapper( ref_model, + input_mode="tokens" if not cfg.env.reasoning else "history", tokenizer=tokenizer, - from_text=False, generate=False, return_log_probs=True, + pad_output=False, + device=torch.device("cuda:0"), ) return ref_model @@ -473,3 +496,88 @@ def compute_device_allocation(cfg): "ray_num_gpus": ray_num_gpus, "cuda_visible_devices": cuda_visible_devices, } + + +def make_env(cfg: DictConfig, devices: list[int] | None = None): + """Create the environment with proper device allocation. + + Args: + cfg: The configuration object + + Returns: + The configured environment + """ + # Create reference model with proper device allocation + # For the collector actor, we want inference_model devices first, then ref_model devices + train_tokenizer = get_tokenizer(cfg) + + # Create a new config with adjusted device assignments + ref_cfg = DictConfig(dict(cfg)) + ref_model = get_ref_model( + ref_cfg, + train_tokenizer, + devices=devices, + ) + + # Setup environment + max_steps = cfg.env.max_steps if cfg.env.reasoning else 1 + if cfg.env.dataset == "gsm8k": + # Reward scale is 0.0 to 100 + reward_threshold = 20 + env = GSM8KEnv( + repeats=cfg.env.repeats, + tokenizer=train_tokenizer, + num_envs=cfg.env.num_envs, + max_steps=max_steps, + device=torch.device("cuda:0") if devices is not None else None, + ) + elif cfg.env.dataset == "ifeval": # ifeval + # Reward scale is 0.0 to 2.2 + reward_threshold = 1.0 + env = IFEvalEnv( + repeats=cfg.env.repeats, + tokenizer=train_tokenizer, + num_envs=cfg.env.num_envs, + max_steps=max_steps, + device=torch.device("cuda:0") if devices is not None else None, + ) + else: + raise NotImplementedError(f"Dataset {cfg.env.dataset} not implemented") + if cfg.env.reasoning: + env = env.append_transform( + AddThinkingPrompt( + cond=lambda td, reward_threshol=reward_threshold, max_steps=max_steps: td[ + "reward" + ] + <= reward_threshold + and td["step_count"] < max_steps, + role="assistant", + edit_last_turn=True, + zero_reward=True, + undo_done=True, + random_prompt=True, + ), + ) + env = env.append_transform( + # RetrieveKL will be lazily initialized in the collector. + # We use RetrieveKL instead of KLRewardTransform because the assistant response may change when + # adding the thinking prompt, requiring a second pass in vllm to compute the log-probs. + RetrieveKL( + ref_model=ref_model, + add_to_reward=not cfg.train.kl_coef_in_loss, + coeff=cfg.train.kl_to_ref_coeff, + ) + ) + else: + # Pass device directly to KLRewardTransform - Since, for Ray, the local device is always 0 + # we can just use 0 here. + device = torch.device("cuda:0") + env = env.append_transform( + KLRewardTransform( + ref_model=ref_model, + coef=cfg.train.kl_to_ref_coeff, + add_to_reward=not cfg.train.kl_coef_in_loss, + device=device, + ) + ) + return env diff --git a/test/llm/test_data.py b/test/llm/test_data.py index 082bdc1bc16..9e599b5c6f2 100644 --- a/test/llm/test_data.py +++ b/test/llm/test_data.py @@ -21,7 +21,7 @@ ReplayBuffer, SamplerWithoutReplacement, ) -from torchrl.data.llm.chat import ContentBase +from torchrl.data.llm.history import ContentBase from torchrl.data.llm.topk import TopKRewardSelector _has_transformers = importlib.util.find_spec("transformers") @@ -324,8 +324,12 @@ def test_content_base(self): The result is""", ] - @pytest.mark.parametrize("test_case", TEST_CASES) - def test_history_assistant_mask(self, test_case): + @pytest.mark.parametrize( + "test_case", + TEST_CASES, + ids=["case_1", "case_2", "case_3", "case_4", "case_5", "case_6"], + ) + def test_history_assistant_mask_qwen(self, test_case): from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-3B") @@ -355,13 +359,108 @@ def test_history_assistant_mask(self, test_case): assert type(decoded) is str assert last_item.endswith(decoded), (decoded, last_item) + LLAMA_TEST_CASES = [ + # Case 1: All messages complete + """<|begin_of_text|><|header_start|>system<|header_end|> + +I am a helpful assistant.<|eot|><|header_start|>user<|header_end|> + +What is the capital of France?<|eot|><|header_start|>assistant<|header_end|> + +The capital of France is Paris.<|eot|>""", + # Case 2: Last message incomplete + """<|begin_of_text|><|header_start|>system<|header_end|> + +I am a helpful assistant.<|eot|><|header_start|>user<|header_end|> + +What is the capital of France?<|eot|><|header_start|>assistant<|header_end|> + +The capital of France is""", + # Case 3: Multiple messages with mix of endings + """<|begin_of_text|><|header_start|>system<|header_end|> + +I am a helpful assistant.<|eot|><|header_start|>user<|header_end|> + +Tell me about Python.<|eot|><|header_start|>assistant<|header_end|> + +Python is a programming language.<|eot|><|header_start|>user<|header_end|> + +Can you elaborate?<|eot|><|header_start|>assistant<|header_end|> + +Python is known for its simplicity""", + # Case 4: Single incomplete message + """<|header_start|>assistant<|header_end|> + +Let me help you with that""", + # # Case 5: Empty content but complete -- not supported by LLAMA 4 + # """<|begin_of_text|><|header_start|>system<|header_end|> + # <|eot|><|header_start|>user<|header_end|> + # <|eot|>""", + # Case 6: Message with tool calls + """<|begin_of_text|><|header_start|>system<|header_end|> + +I am an assistant that can use tools.<|eot|><|header_start|>user<|header_end|> + +<|eot|><|header_start|>assistant<|header_end|> + +Let me help you with that. + +{"name": "calculator", "arguments": {"expression": "2+2"}} +<|eot|><|header_start|>user<|header_end|> + +4<|eot|><|header_start|>assistant<|header_end|> + +The result is""", + ] + + @pytest.mark.parametrize( + "test_case", + LLAMA_TEST_CASES, + ids=["case_1", "case_2", "case_3", "case_4", "case_6"], + ) + def test_history_assistant_mask_llama(self, test_case): + from transformers import AutoTokenizer + + try: + tokenizer = AutoTokenizer.from_pretrained( + "meta-llama/Llama-4-Scout-17B-16E-Instruct" + ) + except Exception: + pytest.skip("Could not load Llama tokenizer") + + history = History.from_text(test_case, chat_template_name="llama") + proc = history.apply_chat_template( + tokenizer=tokenizer, + chat_template_name="llama", + add_generation_prompt=False, + return_dict=True, + return_assistant_tokens_mask=True, + ) + role_assistant = torch.tensor([r == "assistant" for r in history.role]) + last_item: str = history[role_assistant].apply_chat_template( + tokenizer=tokenizer, + chat_template_name="llama", + add_generation_prompt=False, + ) + + if "assistant" in history.role: + assert proc["assistant_masks"].any() + else: + assert not proc["assistant_masks"].any() + if last_item: + decoded = tokenizer.decode( + proc["input_ids"][proc["assistant_masks"].bool()] + ) + assert type(decoded) is str + assert last_item.endswith(decoded), (decoded, last_item) + def test_history_completion(self): """Test the History class's handling of complete and incomplete messages.""" for i, test_case in enumerate(self.TEST_CASES): history = History.from_text(test_case, chat_template_name="qwen") - # Print details about each message + # torchrl_logger.info details about each message for j, (role, content, is_complete) in enumerate( zip(history.role, history.content, history.is_complete) ): @@ -418,6 +517,455 @@ def test_history_completion(self): ], "Case 5 should have last message incomplete" assert history[2].role == "tool" + @pytest.mark.parametrize( + "model_name, expected_template", + [ + ("Qwen/Qwen2.5-0.5B", "qwen"), + ("microsoft/phi-2", "chatml_format"), + ("mosaicml/mpt-7b-instruct", "chatml_format"), + ("facebook/opt-125m", "chatml_format"), + ("gpt2", "chatml_format"), + ("EleutherAI/pythia-70m", "chatml_format"), + ("bigscience/bloom-560m", "chatml_format"), + ("deepseek-ai/deepseek-coder-6.7b-base", "deepseek"), + ], + ) + def test_assistant_mask_model_families(self, model_name, expected_template): + """Test assistant token masking support across different model families.""" + from transformers import AutoTokenizer + + torchrl_logger.info(f"\nTesting {model_name} with {expected_template} template") + tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) + + # Create a simple history + history = History.from_chats( + [ + [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + ] + ) + + # Test with expected template + result = history.apply_chat_template( + tokenizer=tokenizer, + chat_template_name=expected_template, + add_generation_prompt=False, + return_dict=True, + return_assistant_tokens_mask=True, + ) + + # Verify assistant mask is present + assert ( + "assistant_masks" in result + ), f"Model {model_name} should support assistant masking" + assert ( + result["assistant_masks"].shape[0] == 1 + ), "Should have batch dimension of 1" + assert result["assistant_masks"].shape[1] > 0, "Should have sequence length > 0" + + # Verify some assistant tokens are masked + assistant_token_count = result["assistant_masks"].sum().item() + assert ( + assistant_token_count > 0 + ), f"Model {model_name} should have assistant tokens masked" + torchrl_logger.info( + f" ✓ {model_name}: {assistant_token_count} assistant tokens masked" + ) + + @pytest.mark.parametrize( + "template_name", ["qwen", "dialogpt", "falcon", "deepseek"] + ) + def test_assistant_mask_with_custom_templates(self, template_name): + """Test that models with custom templates can still use assistant masking.""" + from transformers import AutoTokenizer + + # Test Qwen with its custom template + tokenizer = AutoTokenizer.from_pretrained( + "Qwen/Qwen2.5-0.5B", trust_remote_code=True + ) + + history = History.from_chats( + [ + [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + ] + ) + + # Test with Qwen's custom template + result = history.apply_chat_template( + tokenizer=tokenizer, + chat_template_name=template_name, + add_generation_prompt=False, + return_dict=True, + return_assistant_tokens_mask=True, + ) + + assert "assistant_masks" in result + assert result["assistant_masks"].sum().item() > 0 + + @pytest.mark.parametrize( + "model_name, template_name", + [ + ("Qwen/Qwen2.5-0.5B", "qwen"), + ("microsoft/DialoGPT-medium", "dialogpt"), + ("tiiuae/falcon-7b-instruct", "falcon"), + ("deepseek-ai/deepseek-coder-6.7b-base", "deepseek"), + ], + ) + def test_custom_template_equivalence(self, model_name, template_name): + """Test that our custom templates produce the same output as the model's default template (except for masking).""" + import re + + import transformers + + # Simple multi-turn chat for each model + def norm(s): + if isinstance(s, list): + return [re.sub(r"\s+", " ", x.strip()) for x in s] + elif isinstance(s, str): + return re.sub(r"\s+", " ", s.strip()) + else: + return s + + chat = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + {"role": "user", "content": "How are you?"}, + {"role": "assistant", "content": "I'm good, thanks!"}, + ] + + tokenizer = transformers.AutoTokenizer.from_pretrained( + model_name, trust_remote_code=True + ) + history = History.from_chats([chat]) + + # Output with model's default template + try: + default_out = history.apply_chat_template( + tokenizer=tokenizer, + add_generation_prompt=False, + chat_template=tokenizer.chat_template, # Use model's default + chat_template_name=None, + tokenize=False, + ) + except Exception as e: + default_out = None + torchrl_logger.info( + f"[WARN] Could not get default template for {model_name}: {e}" + ) + + # Output with our custom template + custom_out = history.apply_chat_template( + tokenizer=tokenizer, + add_generation_prompt=False, + chat_template_name=template_name, + chat_template=None, + tokenize=False, + ) + + if default_out is not None: + assert norm(default_out) == norm(custom_out), ( + f"Custom template for {model_name} does not match default!\n" + f"Default: {default_out}\nCustom: {custom_out}" + ) + else: + torchrl_logger.info( + f"[INFO] Skipped equivalence check for {model_name} (no default template available)" + ) + + def test_add_chat_template_parameters_used(self): + """Test that add_chat_template actually uses inverse_parser and model_family_keywords parameters with a real tokenizer.""" + import re + import uuid + + from torchrl.data.llm.history import add_chat_template, History + from transformers import AutoTokenizer + + try: + # Track if the inverse parser is called + inverse_parser_called = {"called": False} + + template_name = f"qwen_custom_{uuid.uuid4()}" + + # Create a custom template (trivially different from Qwen) + custom_template = """ + {% for message in messages %} + {%- if message['role'] == 'user' %} + [USER] {{ message['content'] }} + {%- elif message['role'] == 'assistant' %} + {% generation %}[ASSISTANT] {{ message['content'] }}{% endgeneration %} + {%- endif %} + {% endfor %} + """ + + # Custom inverse parser + def custom_inverse_parser(text: str) -> History: + inverse_parser_called["called"] = True + user_msgs = re.findall( + r"\[USER\] (.*?)(?=\[ASSISTANT\]|$)", text, re.DOTALL + ) + assistant_msgs = re.findall( + r"\[ASSISTANT\] (.*?)(?=\[USER\]|$)", text, re.DOTALL + ) + messages = [] + for i, user_content in enumerate(user_msgs): + messages.append(History(role="user", content=user_content.strip())) + if i < len(assistant_msgs): + messages.append( + History(role="assistant", content=assistant_msgs[i].strip()) + ) + return lazy_stack(messages) + + # Register the custom template and parser for Qwen + add_chat_template( + template_name=template_name, + template=custom_template, + inverse_parser=custom_inverse_parser, + model_family_keywords=["qwen"], + ) + + # Use a real Qwen tokenizer + tokenizer = AutoTokenizer.from_pretrained( + "Qwen/Qwen2.5-3B", trust_remote_code=True + ) + history = History.from_chats( + [ + [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + ] + ] + ) + + # This should trigger auto-detection using our custom template + result = history.apply_chat_template( + tokenizer=tokenizer, + add_generation_prompt=False, + tokenize=False, + ) + # The result should use our custom format + if isinstance(result, list): + result_str = result[0] + else: + result_str = result + assert "[USER]" in result_str + assert "[ASSISTANT]" in result_str + + # Test that inverse parser works + parsed = History.from_text(result, chat_template_name=template_name) + assert inverse_parser_called["called"], "Inverse parser was not called" + assert parsed.role == history.role + assert parsed.content == history.content + finally: + from torchrl.data.llm.history import ( + _CHAT_TEMPLATES, + _CUSTOM_INVERSE_PARSERS, + _CUSTOM_MODEL_FAMILY_KEYWORDS, + ) + + if template_name in _CHAT_TEMPLATES: + del _CHAT_TEMPLATES[template_name] + if template_name in _CUSTOM_INVERSE_PARSERS: + del _CUSTOM_INVERSE_PARSERS[template_name] + if template_name in _CUSTOM_MODEL_FAMILY_KEYWORDS: + del _CUSTOM_MODEL_FAMILY_KEYWORDS[template_name] + + chats_round_trip = [ + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the capital of France?"}, + {"role": "assistant", "content": "The capital of France is Paris."}, + ], + [ + {"role": "user", "content": "Tell me a joke."}, + { + "role": "assistant", + "content": "Why did the chicken cross the road? To get to the other side!", + }, + ], + [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "Write a Python function to add two numbers."}, + {"role": "assistant", "content": "def add(a, b):\n return a + b"}, + {"role": "user", "content": "What about subtraction?"}, + {"role": "assistant", "content": "def subtract(a, b):\n return a - b"}, + ], + ] + + @pytest.mark.skipif(not _has_transformers, reason="requires transformers library") + @pytest.mark.parametrize( + "tokenizer_name", + [ + "meta-llama/Llama-4-Scout-17B-16E-Instruct", + "Qwen/Qwen2.5-0.5B", + "microsoft/phi-2", + ], + ) + @pytest.mark.parametrize( + "use_tokenizer_chat_template", + [False, True], + ids=["no_use_tokenizer_chat_template", "use_tokenizer_chat_template"], + ) + @pytest.mark.parametrize("chat", chats_round_trip) + def test_history_round_trip( + self, tokenizer_name, use_tokenizer_chat_template, chat + ): + """Test round-trip conversion: History -> string -> History for various templates and tokenizers.""" + import re + + from transformers import AutoTokenizer + + # Example chats + + tokenizer = AutoTokenizer.from_pretrained( + tokenizer_name, trust_remote_code=True + ) + + history = History.from_chats(chat) + if use_tokenizer_chat_template: + if ( + not hasattr(tokenizer, "chat_template") + or tokenizer.chat_template is None + ): + pytest.skip(f"Tokenizer {tokenizer_name} does not have a chat template") + chat_template = tokenizer.chat_template + chat_template_name = None + else: + chat_template = None + chat_template_name = None # Let History auto-detect + + # Serialize + chat_str = history.apply_chat_template( + tokenizer=tokenizer, + add_generation_prompt=False, + chat_template=chat_template, + chat_template_name=chat_template_name, + return_dict=False, + ) + # Parse back + parsed = History.from_text( + text=chat_str, + tokenizer=tokenizer, + chat_template=chat_template, + chat_template_name=chat_template_name, + ) + + # Normalize whitespace for comparison + def norm(x): + if isinstance(x, list): + return [re.sub(r"\\s+", " ", str(xx).strip()) for xx in x] + return re.sub(r"\\s+", " ", str(x).strip()) + # Compare roles and content + assert norm(parsed.role) == norm( + history.role + ), f"Roles do not match!\nOriginal: {history.role}\nParsed: {parsed.role}" + assert norm(parsed.content) == norm( + history.content + ), f"Content does not match!\nOriginal: {history.content}\nParsed: {parsed.content}" + + # All messages should be complete + assert all( + parsed.is_complete + ), f"All messages should be complete after round-trip. is_complete: {parsed.is_complete}" + + @pytest.mark.skipif(not _has_transformers, reason="requires transformers library") + @pytest.mark.parametrize( + "tokenizer_name", + [ + "Qwen/Qwen2.5-0.5B", + "microsoft/phi-2", + "meta-llama/Llama-4-Scout-17B-16E-Instruct", + ], + ) + @pytest.mark.parametrize( + "use_tokenizer_chat_template", + [False, True], + ids=["no_use_tokenizer_chat_template", "use_tokenizer_chat_template"], + ) + @pytest.mark.parametrize("chat", chats_round_trip) + def test_history_round_trip_incomplete( + self, tokenizer_name, use_tokenizer_chat_template, chat + ): + """Test that truncated strings are properly parsed with the last message marked as incomplete.""" + if chat[0]["role"] != "system": + pytest.skip("Skipping test for non-system message") + import re + + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained( + tokenizer_name, trust_remote_code=True + ) + + history = History.from_chats(chat) + + if use_tokenizer_chat_template: + if ( + not hasattr(tokenizer, "chat_template") + or tokenizer.chat_template is None + ): + pytest.skip(f"Tokenizer {tokenizer_name} does not have a chat template") + chat_template = tokenizer.chat_template + chat_template_name = None + else: + chat_template = None + chat_template_name = None # Let History auto-detect + + # Serialize + chat_str = history.apply_chat_template( + tokenizer=tokenizer, + add_generation_prompt=False, + chat_template=chat_template, + chat_template_name=chat_template_name, + return_dict=False, + ) + + # Truncate the last 10 characters to simulate incomplete response + truncated_chat_str = chat_str[:-10] + + # Parse back the truncated string + parsed = History.from_text( + text=truncated_chat_str, + tokenizer=tokenizer, + chat_template=chat_template, + chat_template_name=chat_template_name, + ) + + # Normalize whitespace for comparison + def norm(x): + if isinstance(x, list): + return [re.sub(r"\\s+", " ", str(xx).strip()) for xx in x] + return re.sub(r"\\s+", " ", str(x).strip()) + + # Check that we have the same number of messages as the original + assert len(parsed.role) == len( + history.role + ), f"Number of messages should match original. Original: {len(history.role)}, Parsed: {len(parsed.role)}" + assert len(parsed.content) == len( + history.content + ), f"Number of content items should match original. Original: {len(history.content)}, Parsed: {len(parsed.content)}" + assert len(parsed.is_complete) == len( + history.is_complete + ), f"Number of completion flags should match original. Original: {len(history.is_complete)}, Parsed: {len(parsed.is_complete)}" + + # Check that all messages except the last one are complete + if len(parsed.is_complete) > 0: + assert all( + parsed.is_complete[:-1] + ), f"All messages except the last should be complete. is_complete: {parsed.is_complete}" + assert not parsed.is_complete[ + -1 + ], f"Last message should be incomplete. is_complete: {parsed.is_complete}" + + # Check that roles match the original (except potentially the last one if it was truncated mid-message) + assert norm(parsed.role[:-1]) == norm( + history.role[:-1] + ), f"All roles except the last should match original. Original: {history.role[:-1]}, Parsed: {parsed.role[:-1]}" + class TestTopK: @pytest.mark.parametrize("per_token_reward", [True, False]) diff --git a/test/llm/test_envs.py b/test/llm/test_envs.py index c0237ca73ff..03ade320da3 100644 --- a/test/llm/test_envs.py +++ b/test/llm/test_envs.py @@ -10,6 +10,7 @@ import random import re import time +from functools import partial import pytest import torch @@ -25,7 +26,7 @@ ) from torchrl._utils import logger as torchrl_logger -from torchrl.data.llm.chat import History +from torchrl.data.llm.history import History from torchrl.envs import StepCounter from torchrl.envs.llm import ( as_padded_tensor, @@ -35,13 +36,15 @@ KLRewardTransform, LLMEnv, make_gsm8k_env, + RetrieveKL, ) -from torchrl.modules.llm import TransformersWrapper +from torchrl.modules.llm import TransformersWrapper, vLLMWrapper from transformers import AutoTokenizer _has_transformers = importlib.util.find_spec("transformers") is not None _has_datasets = importlib.util.find_spec("datasets") is not None +_has_vllm = importlib.util.find_spec("vllm") is not None _has_ifeval = ( _has_datasets and (importlib.util.find_spec("langdetect") is not None) @@ -50,6 +53,18 @@ ) +@pytest.fixture(scope="module", autouse=True) +def set_seed(): + seed = 2 + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + yield + + @pytest.fixture(scope="module", autouse=True) def list_to_stack_fixture(): import tensordict @@ -418,49 +433,91 @@ class TestChatEnv: def tokenizer(self): return AutoTokenizer.from_pretrained("Qwen/Qwen2.5-3B") - def test_chat_env(slef, tokenizer): + @pytest.mark.parametrize("input_mode", ["text", "tokens", "history"]) + def test_chat_env(self, tokenizer, input_mode): # Set list to stack for tensordict set_list_to_stack(True).set() # Initialize environment env = ChatEnv( batch_size=(1,), tokenizer=tokenizer, - apply_template=True, system_prompt="I'm system, do what I want.", + input_mode=input_mode, ) # Reset environment - td_reset = env.reset( - TensorDict( - text=["I'm the user. I'm going to tell you a little about something."], - batch_size=(1,), - ) + td_reset = TensorDict( + query=["I'm the user. I'm going to tell you a little about something."], + batch_size=(1,), + device=env.device, ) + td_reset = env.reset(td_reset) # Check history after reset - torchrl_logger.info(f'{td_reset["history"].content=}') - assert len(td_reset["history"][0].content) == 2 - assert td_reset["history"][0, 0].content == "I'm system, do what I want." - assert td_reset["history"][0, 1].content.startswith("I'm the user.") - assert td_reset["history"][0].role == ["system", "user"] + if input_mode == "history": + torchrl_logger.info(f'{td_reset["history"].prompt.content=}') + assert len(td_reset["history"][0].prompt.content) == 2 + assert ( + td_reset["history"][0].prompt[0].content + == "I'm system, do what I want." + ) + assert td_reset["history"][0].prompt[1].content.startswith("I'm the user.") + assert td_reset["history"][0].prompt.role == ["system", "user"] + elif input_mode == "tokens": + torchrl_logger.info(f'{td_reset["tokens"].prompt=}') + elif input_mode == "text": + torchrl_logger.info(f'{td_reset["text"].prompt=}') # Check text after reset expected_text = "<|im_start|>system\nI'm system, do what I want.<|im_end|>\n<|im_start|>user\nI'm the user. I'm going to tell you a little about something.<|im_end|>\n<|im_start|>assistant\n" - assert td_reset["text"][0] == expected_text + if input_mode in ("text",): + assert td_reset["text"][0].prompt == expected_text # Take step in environment - td_action = td_reset.set( - "text_response", ["This is the action from the assistant!<|im_end|>"] - ) + if input_mode == "history": + td_reset["history"].response = History( + content="This is the action from the assistant!", role="assistant" + ).view(1, 1) + td_reset["history"].full = td_reset["history"].prompt.extend( + td_reset["history"].response, dim=-1 + ) + td_action = td_reset + elif input_mode == "tokens": + td_reset["tokens"][0].response = tokenizer.encode( + "This is the action from the assistant!<|im_end|>" + ) + td_action = td_reset + elif input_mode == "text": + td_reset["text"].response = [ + "This is the action from the assistant!<|im_end|>" + ] + td_reset["text"].full = [ + td_reset["text"][0].prompt + + "This is the action from the assistant!<|im_end|>" + ] + td_action = td_reset td_next = env.step(td_action) - # Check history after step - assert len(td_next["next", "history"].content[0]) == 3 - assert td_next["next", "history"][0, 0].content == "I'm system, do what I want." - assert td_next["next", "history"][0, 1].content.startswith("I'm the user.") - assert ( - td_next["next", "history"][0, 2].content - == "This is the action from the assistant!" - ) - assert td_next["next", "history"][0].role == ["system", "user", "assistant"] - # Check text after step - expected_text = "<|im_start|>system\nI'm system, do what I want.<|im_end|>\n<|im_start|>user\nI'm the user. I'm going to tell you a little about something.<|im_end|>\n<|im_start|>assistant\nThis is the action from the assistant!<|im_end|>\n<|im_start|>assistant\n" - assert td_next["next", "text"][0] == expected_text + if input_mode == "history": + # Check history after step + assert len(td_next["next", "history"][0].prompt.content) == 3 + assert ( + td_next["next", "history"][0].prompt[0].content + == "I'm system, do what I want." + ) + assert ( + td_next["next", "history"][0] + .prompt[1] + .content.startswith("I'm the user.") + ) + assert ( + td_next["next", "history"][0].prompt[2].content + == "This is the action from the assistant!" + ) + assert td_next["next", "history"][0].prompt.role == [ + "system", + "user", + "assistant", + ] + if input_mode in ("text",): + # Check text after step + expected_text = "<|im_start|>system\nI'm system, do what I want.<|im_end|>\n<|im_start|>user\nI'm the user. I'm going to tell you a little about something.<|im_end|>\n<|im_start|>assistant\nThis is the action from the assistant!<|im_end|>" + assert td_next["next", "text"][0].prompt == expected_text @pytest.mark.skipif(not _has_datasets, reason="requires datasets") @@ -506,45 +563,6 @@ def test_env_reward(self, n_envs): assert ("next", "reward") in r assert r["next", "reward"].shape == (n_envs, 3, 1, 1) - @pytest.mark.skipif(not _has_transformers, reason="requires transformers library") - @pytest.mark.parametrize("n_envs", [1, 4]) - def test_kl_bonus(self, n_envs, ref_model): - torch.manual_seed(0) - ref_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - with torch.device(ref_device): - model, tokenizer = ref_model - ref_model = TransformersWrapper( - model, - return_log_probs=True, - generate=False, - # In practice, we should have the tokens available - from_text=False, - tokenizer=tokenizer, - ) - policy = TransformersWrapper( - model, - return_log_probs=True, - generate=True, - from_text=True, - tokenizer=tokenizer, - generate_kwargs={"max_new_tokens": 20}, - tokenizer_kwargs={"add_special_tokens": False}, - ) - - env = make_gsm8k_env(num_envs=n_envs, tokenizer=tokenizer) - env.append_transform( - KLRewardTransform( - actor=ref_model, - coef=0.1, - device=ref_device, - ) - ) - r = env.rollout(3, policy) - r = r.view(-1) - for _r in r.unbind(0): - assert _r["tokens_response"].shape + (1,) == _r["next", "reward"].shape - def test_gsm8kenv(self): import transformers @@ -553,34 +571,22 @@ def test_gsm8kenv(self): # env.check_env_specs(break_when_any_done="both") r = env.reset() assert "history" in r - assert r["history"].shape == (1, 2) - assert "text" in r + assert r["history"].prompt.shape == (1, 2) r = r.clone() response = "First, calculate the total number of snakes in the breeding balls. There are 3 breeding balls with 8 snakes each, so 3 * 8 = 24 snakes. Next, calculate the number of snakes in the additional pairs. There are 6 pairs of snakes, and each pair has 2 snakes, so 6 * 2 = 12 snakes. Finally, add the number of snakes from the breeding balls and the additional pairs: 24 + 12 = 36 snakes. Mary saw a total of 36 snakes.<|im_end|>" - r["text_response"] = [response] + text = ( + r["history"] + .prompt[0] + .apply_chat_template(tokenizer=tokenizer, add_generation_prompt=True) + + response + ) + history_full = History.from_text(text).unsqueeze(0) + assert history_full.shape[-1] == 3 + r["history"].full = history_full s = env.step(r) assert s["next", "reward"] >= 10 assert s["next", "done"].all() - def test_gsm8kenv_batch(self): - import transformers - - tokenizer = transformers.AutoTokenizer.from_pretrained("Qwen/Qwen2.5-3B") - env = GSM8KEnv(tokenizer=tokenizer, apply_template=True, num_envs=4) - # env.check_env_specs(break_when_any_done="both") - r = env.reset() - assert "history" in r - assert r["history"].shape == (4, 2) - assert "text" in r - r = r.clone() - response = "First, calculate the total number of snakes in the breeding balls. There are 3 breeding balls with 8 snakes each, so 3 * 8 = 24 snakes. Next, calculate the number of snakes in the additional pairs. There are 6 pairs of snakes, and each pair has 2 snakes, so 6 * 2 = 12 snakes. Finally, add the number of snakes from the breeding balls and the additional pairs: 24 + 12 = 36 snakes. Mary saw a total of 36 snakes.<|im_end|>" - r["text_response"] = [response] * 4 - s = env.step(r) - assert (s["next", "reward"] >= 10).all() - assert s["next", "done"].all() - - env.rollout(10, break_when_any_done=False) - @pytest.mark.skipif(not _has_ifeval, reason="requires IFEval libs") class TestIFEvalEnv: @@ -592,13 +598,14 @@ def test_ifeval(self): torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-3B") - env = IFEvalEnv(apply_template=True, tokenizer=tokenizer) + env = IFEvalEnv(apply_template=True, tokenizer=tokenizer, input_mode="history") torchrl_logger.info(env.reset()) r = env.reset() - r.set( - "text_response", - [ - """ + r["history"].full = History.from_text( + r["history"] + .prompt[0] + .apply_chat_template(tokenizer=tokenizer, add_generation_prompt=True) + + """ The task requires crafting a riddle about a 'house' that's not traditionally considered one. The answer must be included, and the response should be at least 400 words with a title wrapped in double angular brackets. Let's start by brainstorming what could be considered a 'house' in a non-traditional sense. Ideas include natural shelters, abstract concepts, or objects that serve a similar purpose to a house. One potential concept is a "womb," as it provides shelter and housing for a developing being. However, we need to ensure our riddle is engaging, meets the word count requirement, and includes the necessary elements like a title. Let's construct a narrative around the chosen concept, ensuring it's detailed and follows the required structure. @@ -640,8 +647,7 @@ def test_ifeval(self): By embracing such metaphors, we're encouraged to look beyond the obvious and appreciate the myriad ways 'shelter' manifests in our lives. And so, the riddle serves not just as a puzzle to be solved but as a reflection on the profound connections that bind us to the very essence of existence. <|im_end|> """ - ], - ) + ).unsqueeze(0) td = env.step(r) assert td["next", "ifeval_score"].all() assert td.get(("next", "reward")) is not None @@ -660,26 +666,32 @@ def test_python_interpreter_single_batch(self): base_env = ChatEnv( batch_size=(1,), system_prompt="I'm the system, do as I say", - apply_template=True, tokenizer=tokenizer, + input_mode="history", ) env = base_env.append_transform(PythonInterpreter()) - r = env.reset(TensorDict(text=["This is the user prompt"], batch_size=(1,))) + r = env.reset( + TensorDict( + {base_env.data_key: ["This is the user prompt"]}, batch_size=(1,) + ) + ) rc = r.clone() - h = r["history"] + h = r["history"].prompt history_from_text = h.apply_chat_template(tokenizer=tokenizer) assert history_from_text == [ "<|im_start|>system\nI'm the system, do as I say<|im_end|>\n<|im_start|>user\nThis is the user prompt<|im_end|>\n<|im_start|>assistant\n" ] - r["text_response"] = [ - """Here is a python code to execute: -```python -print(1 + 1) -```<|im_end|>\n -""" - ] + r["history"].full = h.extend( + History( + role="assistant", + content="Here is a python code to execute:\n```python\nprint(1 + 1)\n```", + ).view(1, 1), + dim=-1, + ) s = env.step(r) - history_str = s["next", "history"].apply_chat_template(tokenizer=tokenizer) + history_str = s["next", "history"].prompt.apply_chat_template( + tokenizer=tokenizer, add_generation_prompt=True + ) assert history_str == [ "<|im_start|>system\n" "I'm the system, do as I say<|im_end|>\n" @@ -690,7 +702,7 @@ def test_python_interpreter_single_batch(self): "```python\n" "print(1 + 1)\n" "```<|im_end|>\n" - "<|im_start|>user\n" + " <|im_start|>user\n" "\n" "Code block 1 executed successfully:\n" "2\n" @@ -719,22 +731,35 @@ def test_python_interpreter_single_batch(self): ).all() # Check what happens if there is no tool response r = rc.clone() - r["text_response"] = [ - """Here is a response without a python code to execute.<|im_end|>""" - ] + r["history"].full = h.extend( + History( + role="assistant", + content="Here is a response without a python code to execute.", + ).view(1, 1), + dim=-1, + ) s = env.step(r) - history_str = s["next", "history"].apply_chat_template(tokenizer=tokenizer) + history_str = s["next", "history"].prompt.apply_chat_template( + tokenizer=tokenizer, add_generation_prompt=True + ) assert history_str == [ "<|im_start|>system\n" "I'm the system, do as I say<|im_end|>\n" "<|im_start|>user\n" "This is the user prompt<|im_end|>\n" "<|im_start|>assistant\n" + "Here is a python code to execute:\n" + "```python\n" + "print(1 + 1)\n" + "```<|im_end|>\n" + " <|im_start|>assistant\n" "Here is a response without a python code to execute.<|im_end|>\n" - "<|im_start|>assistant\n" + " <|im_start|>assistant\n" ] def test_python_interpreter_persistent(self): + pass + from torchrl.envs.llm.transforms import PythonInterpreter from transformers import AutoTokenizer @@ -742,29 +767,35 @@ def test_python_interpreter_persistent(self): env = ChatEnv( batch_size=(1,), system_prompt="I'm the system, do as I say", - apply_template=True, tokenizer=tokenizer, + input_mode="history", ) env = env.append_transform(PythonInterpreter(persistent=True)) - r = env.reset(TensorDict(text=["This is the user prompt"], batch_size=(1,))) - r["text_response"] = [ - """Here is a python code to execute: -```python -a=1 -```<|im_end|>\n -""" - ] + r = env.reset( + TensorDict({env.data_key: ["This is the user prompt"]}, batch_size=(1,)) + ) + r["history"].full = r["history"].prompt.extend( + History( + role="assistant", + content="Here is a python code to execute:\n```python\na=1\n```", + ).view(1, 1), + dim=-1, + ) s, s_ = env.step_and_maybe_reset(r) - s_["text_response"] = [ - """Here is a python code to execute: -```python -a+=1 -assert a == 2 -```<|im_end|>\n -""" - ] + s_["history"].full = s_["history"].prompt.extend( + History( + role="assistant", + content="Here is a python code to execute:\n```python\na+=1\nassert a == 2\n```", + ).view(1, 1), + dim=-1, + inplace=False, + ) s, s_ = env.step_and_maybe_reset(s_) - assert s_["history"].apply_chat_template(tokenizer=tokenizer) == [ + response = s_["history"].prompt.apply_chat_template( + tokenizer=tokenizer, add_generation_prompt=True + ) + + assert response == [ "<|im_start|>system\n" "I'm the system, do as I say<|im_end|>\n" "<|im_start|>user\n" @@ -774,7 +805,7 @@ def test_python_interpreter_persistent(self): "```python\n" "a=1\n" "```<|im_end|>\n" - "<|im_start|>user\n" + " <|im_start|>user\n" "\n" "Code block 1 executed successfully:\n" "\n" @@ -785,7 +816,7 @@ def test_python_interpreter_persistent(self): "a+=1\n" "assert a == 2\n" "```<|im_end|>\n" - "<|im_start|>user\n" + " <|im_start|>user\n" "\n" "Code block 1 executed successfully:\n" "\n" @@ -801,30 +832,33 @@ def test_python_interpreter_persistent_error(self): env = ChatEnv( batch_size=(1,), system_prompt="I'm the system, do as I say", - apply_template=True, tokenizer=tokenizer, + input_mode="history", ) env = env.append_transform(PythonInterpreter(persistent=True)) - r = env.reset(TensorDict(text=["This is the user prompt"], batch_size=(1,))) - r["text_response"] = [ - """Here is a python code to execute: -```python -raise ValueError("This is an error") -```<|im_end|>\n -""" - ] + r = env.reset( + TensorDict({env.data_key: ["This is the user prompt"]}, batch_size=(1,)) + ) + r["history"].full = r["history"].prompt.extend( + History( + role="assistant", + content="Here is a python code to execute:\n```python\nraise ValueError('This is an error')\n```", + ).view(1, 1), + dim=-1, + ) s, s_ = env.step_and_maybe_reset(r) - s_["text_response"] = [ - """Here is a python code to execute: -```python -a=1 -assert a == 1 -```<|im_end|>\n -""" - ] + s_["history"].full = s_["history"].prompt.extend( + History( + role="assistant", + content="Here is a python code to execute:\n```python\na=1\nassert a == 1\n```", + ).view(1, 1), + dim=-1, + ) s, s_ = env.step_and_maybe_reset(s_) assert re.match( - s_["history"].apply_chat_template(tokenizer=tokenizer)[0], + s_["history"].prompt.apply_chat_template( + tokenizer=tokenizer, add_generation_prompt=True + )[0], r"<|im_start|>system\n" "I'm the system, do as I say<|im_end|>\n" "<|im_start|>user\n" @@ -834,7 +868,7 @@ def test_python_interpreter_persistent_error(self): "```python\n" 'raise ValueError("This is an error")\n' "```<|im_end|>\n" - "<|im_start|>user\n" + " <|im_start|>user\n" "\n" "Code block 1 failed:\n" "Error: This is an error\n" @@ -853,7 +887,7 @@ def test_python_interpreter_persistent_error(self): "a=1\n" "assert a == 1\n" "```<|im_end|>\n" - "<|im_start|>user\n" + " <|im_start|>user\n" "\n" "Code block 1 executed successfully:\n" "\n" @@ -869,34 +903,35 @@ def test_python_interpreter_persistent_reset(self): env = ChatEnv( batch_size=(1,), system_prompt="I'm the system, do as I say", - apply_template=True, tokenizer=tokenizer, ) env = env.append_transform(PythonInterpreter(persistent=True)) - r = env.reset(TensorDict(text=["This is the user prompt"], batch_size=(1,))) - r["text_response"] = [ - """Here is a python code to execute: -```python -a = [0] -```<|im_end|>\n -""" - ] + r = env.reset( + TensorDict({env.data_key: ["This is the user prompt"]}, batch_size=(1,)) + ) + r["history"].full = r["history"].prompt.extend( + History( + role="assistant", + content="Here is a python code to execute:\n```python\na = [0]\n```", + ).view(1, 1), + dim=-1, + ) s, s_ = env.step_and_maybe_reset(r) - r = env.reset(TensorDict(text=["This is the user prompt"], batch_size=(1,))) - r["text_response"] = [ - """Here is a python code to execute: -```python -# check if a is still defined -if "a" in globals(): - raise RuntimeError("a is still defined") -else: - print("a is not defined") -```<|im_end|>\n -""" - ] + r = env.reset( + TensorDict({env.data_key: ["This is the user prompt"]}, batch_size=(1,)) + ) + r["history"].full = r["history"].prompt.extend( + History( + role="assistant", + content="Here is a python code to execute:\n```python\n# check if a is still defined\nif 'a' in globals():\n raise RuntimeError('a is still defined')\nelse:\n print('a is not defined')\n```", + ).view(1, 1), + dim=-1, + ) s, s_ = env.step_and_maybe_reset(r) assert re.match( - s_["history"].apply_chat_template(tokenizer=tokenizer)[0], + s_["history"].prompt.apply_chat_template( + tokenizer=tokenizer, add_generation_prompt=True + )[0], "<|im_start|>system\n" "I'm the system, do as I say<|im_end|>\n" "<|im_start|>user\n" @@ -910,7 +945,7 @@ def test_python_interpreter_persistent_reset(self): "else:\n" ' print("a is not defined")\n' "```<|im_end|>\n" - "<|im_start|>user\n" + " <|im_start|>user\n" "\n" "Code block 1 executed successfully:\n" "a is not defined\n" @@ -956,41 +991,50 @@ def calculator(operation: str, a: float, b: float) -> dict: # Create environment and transform tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-3B") - env = ChatEnv( + base_env = ChatEnv( batch_size=(1,), system_prompt="You are a helpful assistant that uses a calculator.", - apply_template=True, tokenizer=tokenizer, ) transform = MCPToolTransform(tools, schemas) - env = env.append_transform(transform) + env = base_env.append_transform(transform) # Test single tool call - td = TensorDict({"text": ["Let me calculate 2 + 3"]}, batch_size=(1,)) + td = TensorDict( + {base_env.data_key: ["Let me calculate 2 + 3"]}, batch_size=(1,) + ) td = env.reset(td) - td["text_response"] = [ - 'I will help you calculate 2 + 3:\ncalculator\n{"operation": "add", "a": 2, "b": 3}<|im_end|>' - ] + td["history"].full = td["history"].prompt.extend( + History( + role="assistant", + content='I will help you calculate 2 + 3:\ncalculator\n{"operation": "add", "a": 2, "b": 3}<|im_end|>', + ).view(1, 1), + dim=-1, + ) result = env.step(td) # Check that the tool was executed and returned correct result - history = result["next", "history"] + history = result["next", "history"].prompt assert len(history[0]) == 4 # system, user, assistant, tool response assert history[0, -1].role == "tool" assert "result': 5" in history[0, -1].content # Test multiple tool calls in one response - td = TensorDict({"text": ["Calculate 2 + 3 and 4 * 5"]}, batch_size=(1,)) + td = TensorDict( + {base_env.data_key: ["Calculate 2 + 3 and 4 * 5"]}, batch_size=(1,) + ) td = env.reset(td) - td["text_response"] = [ - "I will help you calculate both:\n" - 'calculator\n{"operation": "add", "a": 2, "b": 3}\n' - 'calculator\n{"operation": "multiply", "a": 4, "b": 5}<|im_end|>' - ] + td["history"].full = td["history"].prompt.extend( + History( + role="assistant", + content='I will help you calculate both:\ncalculator\n{"operation": "add", "a": 2, "b": 3}\ncalculator\n{"operation": "multiply", "a": 4, "b": 5}<|im_end|>', + ).view(1, 1), + dim=-1, + ) result = env.step(td) # Check that both tools were executed and returned correct results - history = result["next", "history"] + history = result["next", "history"].prompt assert ( len(history[0]) == 5 ) # system, user, assistant, tool response 1, tool response 2 @@ -1000,30 +1044,38 @@ def calculator(operation: str, a: float, b: float) -> dict: assert "result': 20" in history[0, -1].content # 4 * 5 = 20 # Test error handling - td = TensorDict({"text": ["Calculate 2 ? 3"]}, batch_size=(1,)) + td = TensorDict({base_env.data_key: ["Calculate 2 ? 3"]}, batch_size=(1,)) td = env.reset(td) - td["text_response"] = [ - 'I will try to calculate:\ncalculator\n{"operation": "invalid", "a": 2, "b": 3}<|im_end|>' - ] + td["history"].full = td["history"].prompt.extend( + History( + role="assistant", + content='I will try to calculate:\ncalculator\n{"operation": "invalid", "a": 2, "b": 3}<|im_end|>', + ).view(1, 1), + dim=-1, + ) result = env.step(td) # Check that error was handled gracefully - history = result["next", "history"] + history = result["next", "history"].prompt assert len(history[0]) == 4 assert history[0, -1].role == "tool" assert "failed" in history[0, -1].content assert "Unknown operation: invalid" in history[0, -1].content # Test invalid JSON - td = TensorDict({"text": ["Calculate something"]}, batch_size=(1,)) + td = TensorDict({base_env.data_key: ["Calculate something"]}, batch_size=(1,)) td = env.reset(td) - td["text_response"] = [ - "Let me calculate:\ncalculator\ninvalid json<|im_end|>" - ] + td["history"].full = td["history"].prompt.extend( + History( + role="assistant", + content="Let me calculate:\ncalculator\ninvalid json<|im_end|>", + ).view(1, 1), + dim=-1, + ) result = env.step(td) # Check that JSON error was handled gracefully - history = result["next", "history"] + history = result["next", "history"].prompt assert len(history[0]) == 4 assert history[0, -1].role == "tool" assert "failed" in history[0, -1].content @@ -1066,7 +1118,6 @@ def make_env(cls): env = ChatEnv( batch_size=(1,), system_prompt="I'm a calculator assistant", - apply_template=True, tokenizer=tokenizer, ) tools = {"calculator": cls.delayed_calculator} @@ -1086,20 +1137,30 @@ def test_async_mcp_tools(self): try: # Reset both environments tdreset = TensorDict( - text=[["Let me calculate 2 + 3"], ["Let me calculate 4 * 5"]], + query=[["Let me calculate 2 + 3"], ["Let me calculate 4 * 5"]], batch_size=(2, 1), ) td = env_pool.reset(tdreset) # Send async steps to both environments - td["text_response"] = [ + td["history"].full = torch.stack( [ - 'Let me calculate 2 + 3:\ncalculator\n{"operation": "add", "a": 2, "b": 3}<|im_end|>' - ], - [ - 'Let me calculate 4 * 5:\ncalculator\n{"operation": "multiply", "a": 4, "b": 5}<|im_end|>' - ], - ] + td[0]["history"].prompt.extend( + History( + role="assistant", + content='Let me calculate 2 + 3:\ncalculator\n{"operation": "add", "a": 2, "b": 3}<|im_end|>', + ).view(1, 1), + dim=-1, + ), + td[1]["history"].prompt.extend( + History( + role="assistant", + content='Let me calculate 4 * 5:\ncalculator\n{"operation": "multiply", "a": 4, "b": 5}<|im_end|>', + ).view(1, 1), + dim=-1, + ), + ] + ) env_pool.async_step_send(td) # Get results as they complete @@ -1116,7 +1177,7 @@ def test_async_mcp_tools(self): all_results = torch.stack(list(results) + list(remaining)) # Verify results - history = all_results["next", "history"] + history = all_results["next", "history"].prompt assert len(history[0, 0]) == 4 # system, user, assistant, tool response assert history[0, 0, -1].role == "tool" assert any( @@ -1174,14 +1235,18 @@ def test_thinking_prompt_wrong_answer( ) ) reset = env.reset() - assert reset[0]["history"][-1].content.startswith( - "Natalia sold clips to 48 of her friends in April" + assert ( + reset[0]["history"] + .prompt[-1] + .content.startswith("Natalia sold clips to 48 of her friends in April") ) - policy_anser = ( + policy_answer = ( "Let me solve this step by step. Natalia sold clips to 48 friends in April. Then she sold half as many in May. Half of 48 is 24. So in May she sold 24 clips. " "To find the total, I need to add April and May: 48 + 24 = 72. Therefore, Natalia sold 72 clips altogether in April and May.\n322 clips<|im_end|>" ) - reset["text_response"] = [policy_anser] + reset["history"].full = reset["history"].prompt.extend( + History(role="assistant", content=policy_answer).view(1, 1), dim=-1 + ) s = env.step(reset) if zero_reward: assert (s["next", "reward"] == 0).all() @@ -1192,13 +1257,13 @@ def test_thinking_prompt_wrong_answer( else: assert (s["next", "done"] != 0).all() if edit_last_turn: - assert s["next", "history"].shape == (1, 3) + assert s["next", "history"].prompt.shape == (1, 3) else: - assert s["next", "history"].shape == (1, 4) + assert s["next", "history"].prompt.shape == (1, 4) if role == "assistant": - assert s[0]["next", "history", "role"][-1] == "assistant" + assert s[0]["next", "history"].prompt[-1].role == "assistant" else: - assert s[0]["next", "history", "role"][-1] == "user" + assert s[0]["next", "history"].prompt[-1].role == "user" @pytest.mark.skipif(not _has_transformers, reason="requires transformers") @pytest.mark.skipif(not _has_datasets, reason="requires gsm8k") @@ -1237,19 +1302,269 @@ def test_thinking_prompt_correct_answer( ) ) reset = env.reset() - assert reset[0]["history"][-1].content.startswith( - "Natalia sold clips to 48 of her friends in April" + assert ( + reset[0]["history"] + .prompt[-1] + .content.startswith("Natalia sold clips to 48 of her friends in April") ) - policy_anser = ( + policy_answer = ( "Let me solve this step by step. Natalia sold clips to 48 friends in April. Then she sold half as many in May. Half of 48 is 24. So in May she sold 24 clips. " "To find the total, I need to add April and May: 48 + 24 = 72. Therefore, Natalia sold 72 clips altogether in April and May.\n72<|im_end|>" ) - reset["text_response"] = [policy_anser] + reset["history"].full = reset["history"].prompt.extend( + History(role="assistant", content=policy_answer).view(1, 1), dim=-1 + ) s = env.step(reset) assert (s["next", "reward"] != 0).all(), s["next", "reward"] - assert s[0]["next", "history", "role"][-1] == "assistant" + assert s[0]["next", "history"].prompt[-1].role == "assistant" assert s["next", "done"].all() - assert len(s[0]["next", "history", "content"]) == 3 + assert len(s[0]["next", "history"].prompt) == 3 + + +class TestChatEnvIntegration: + @pytest.fixture(scope="module") + def transformers_instance(self): + """Create transformers model and tokenizer for testing.""" + if not _has_transformers: + pytest.skip("transformers not available") + from transformers import AutoModelForCausalLM, AutoTokenizer + + model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B") + tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") + tokenizer.pad_token = tokenizer.eos_token + return model, tokenizer + + @pytest.fixture(scope="module") + def vllm_instance(self): + """Create vLLM model and tokenizer for testing.""" + if not _has_vllm: + pytest.skip("vllm not available") + + import vllm.envs as envs + from transformers import AutoTokenizer + from vllm import LLM + + envs.VLLM_HOST_IP = "0.0.0.0" or "127.0.0.1" + + try: + model = LLM("Qwen/Qwen2.5-0.5B") + tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") + tokenizer.pad_token = tokenizer.eos_token + return model, tokenizer + except Exception as e: + pytest.skip(f"Failed to load vLLM model: {e}") + + @pytest.mark.skipif(not _has_vllm, reason="vllm not available") + @pytest.mark.skipif(not _has_datasets, reason="datasets not available") + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + @pytest.mark.parametrize( + "input_mode,compute_reward", + [["history", True], ["history", False], ["text", False], ["tokens", False]], + ids=[ + "history_compute_reward", + "history_no_compute_reward", + "text_no_compute_reward", + "tokens_no_compute_reward", + ], + ) + def test_chat_env_integration_ifeval(self, compute_reward, pad_output, input_mode): + """Test that the wrapper works correctly with the ChatEnv.""" + import vllm.envs as envs + from torchrl.envs.llm import IFEvalEnv + + envs.VLLM_HOST_IP = "0.0.0.0" or "127.0.0.1" + + policy = vLLMWrapper( + model="Qwen/Qwen2.5-0.5B", + tokenizer="Qwen/Qwen2.5-0.5B", + input_mode=input_mode, + pad_output=pad_output, + generate=True, + ) + env = IFEvalEnv( + max_steps=1, + compute_reward=compute_reward, + input_mode=input_mode, + tokenizer=policy.tokenizer, + ) + r = env.reset() + prompt = None + if input_mode == "history": + assert r["history", "prompt"].shape == (1, 2) + elif input_mode == "text": + prompt = r["text", "prompt"][0] + r = policy(r) + if input_mode == "history": + assert r["history", "response"].shape == (1, 1) + assert r["history", "full"].shape == (1, 3) + elif input_mode == "text": + assert r["text", "full"][0].startswith(prompt) + r, r_ = env.step_and_maybe_reset(r) + if input_mode == "history": + assert r["next", "history", "prompt"].shape == (1, 3) + assert r_["history", "prompt"] is not None + assert r_.get(("history", "response"), as_list=True) is None + assert r_.get(("history", "full"), as_list=True) is None + assert r["next", "done"].all() + r = policy(r_) + r, r_ = env.step_and_maybe_reset(r) + + @pytest.mark.skipif(not _has_vllm, reason="vllm not available") + @pytest.mark.skipif(not _has_datasets, reason="datasets not available") + @pytest.mark.parametrize( + "compute_reward", [False, True], ids=["no_compute_reward", "compute_reward"] + ) + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + @pytest.mark.parametrize( + "input_mode", ["history", "text", "tokens"], ids=["history", "text", "tokens"] + ) + def test_chat_env_integration_gsm8k(self, compute_reward, pad_output, input_mode): + """Test that the wrapper works correctly with the ChatEnv.""" + import vllm.envs as envs + from torchrl.envs.llm import GSM8KEnv + + envs.VLLM_HOST_IP = "0.0.0.0" or "127.0.0.1" + + policy = vLLMWrapper( + model="Qwen/Qwen2.5-0.5B", + tokenizer="Qwen/Qwen2.5-0.5B", + input_mode=input_mode, + pad_output=pad_output, + generate=True, + ) + env = GSM8KEnv( + max_steps=1, + compute_reward=compute_reward, + input_mode=input_mode, + tokenizer=policy.tokenizer, + ) + r = env.reset() + prompt = None + if input_mode == "history": + assert r["history", "prompt"].shape == (1, 2) + elif input_mode == "text": + prompt = r["text", "prompt"][0] + r = policy(r) + if input_mode == "history": + assert r["history", "response"].shape == (1, 1) + assert r["history", "full"].shape == (1, 3) + elif input_mode == "text": + assert r["text", "full"][0].startswith(prompt) + r, r_ = env.step_and_maybe_reset(r) + if input_mode == "history": + assert r["next", "history", "prompt"].shape == (1, 3) + assert r_["history", "prompt"] is not None + assert r_.get(("history", "response"), as_list=True) is None + assert r_.get(("history", "full"), as_list=True) is None + assert r["next", "done"].all() + r = policy(r_) + r, r_ = env.step_and_maybe_reset(r) + + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + @pytest.mark.parametrize("ref_input_mode", ["tokens"], ids=["tokens"]) + @pytest.mark.parametrize( + "env_class", ["GSM8KEnv", "IFEvalEnv"], ids=["gsm8k", "ifeval"] + ) + def test_chat_env_kl( + self, + transformers_instance, + vllm_instance, + pad_output, + ref_input_mode, + env_class, + ): + """Test that the wrapper works correctly with the ChatEnv.""" + import vllm.envs as envs + from torchrl.envs.llm import GSM8KEnv, IFEvalEnv + + envs.VLLM_HOST_IP = "0.0.0.0" or "127.0.0.1" + + vllm_model, vllm_tokenizer = vllm_instance + tf_model, tf_tokenizer = transformers_instance + + # a policy + policy = vLLMWrapper( + vllm_model, + tokenizer=vllm_tokenizer, + input_mode="history", + generate=True, + pad_output=pad_output, + ) + ref_model = TransformersWrapper( + tf_model, + tokenizer=tf_tokenizer, + input_mode="tokens", + # TODO: check that generate=True causes an error + generate=False, + return_log_probs=True, + pad_output=pad_output, + ) + + if env_class == "GSM8KEnv": + env = GSM8KEnv(max_steps=10, num_envs=3, input_mode="history") + elif env_class == "IFEvalEnv": + env = IFEvalEnv(max_steps=10, num_envs=3, input_mode="history") + else: + raise ValueError(f"Invalid environment class: {env_class}") + env = env.append_transform(KLRewardTransform(ref_model)) + r = env.rollout(1, policy) + reward = r.get(("next", "reward"), as_list=not pad_output) + assert reward is not None + if pad_output: + assert reward.shape[0] == 3 + assert reward.shape[1] == 1 + assert reward.shape[2] > 1 + assert reward.shape[3] == 1 + else: + assert len(reward) == 3 + for r in reward: + assert r.shape[0] == 1 + assert r.shape[1] > 1 + assert r.shape[2] == 1 + + @pytest.mark.parametrize( + "env_class", ["GSM8KEnv", "IFEvalEnv"], ids=["gsm8k", "ifeval"] + ) + def test_retrievekl_transform( + self, transformers_instance, vllm_instance, env_class + ): + """Test that the RetrieveKL transform works correctly.""" + from torchrl.collectors.llm.base import LLMCollector + from torchrl.envs.llm import GSM8KEnv, IFEvalEnv + + model, tokenizer = transformers_instance + vllm_model, vllm_tokenizer = vllm_instance + ref_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode="history", + generate=False, + pad_output=True, + ) + if env_class == "GSM8KEnv": + env = GSM8KEnv(max_steps=1, num_envs=3) + elif env_class == "IFEvalEnv": + env = IFEvalEnv(max_steps=1, num_envs=3) + else: + raise ValueError(f"Invalid environment class: {env_class}") + env = env.append_transform(RetrieveKL("from_collector", ref_model)) + c = LLMCollector( + env, + policy_factory=partial( + vLLMWrapper, + vllm_model, + tokenizer=vllm_tokenizer, + input_mode="history", + generate=True, + pad_output=True, + ), + dialog_turns_per_batch=6, + ) + for d in c: + assert ("history", "full") in d + assert ("next", "history", "prompt") in d + break + return if __name__ == "__main__": diff --git a/test/llm/test_modules.py b/test/llm/test_modules.py deleted file mode 100644 index faaa1ff3fa2..00000000000 --- a/test/llm/test_modules.py +++ /dev/null @@ -1,616 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -from __future__ import annotations - -import argparse -import importlib.util - -import pytest -import torch - -from mocking_classes_llm import DummyStrDataLoader -from tensordict import ( - lazy_stack, - LazyStackedTensorDict, - NonTensorStack, - set_list_to_stack, - TensorDict, -) -from torchrl.collectors.llm import LLMCollector -from torchrl.data.llm import LLMData -from torchrl.envs.llm import LLMEnv -from torchrl.modules.llm import TransformersWrapper, vLLMWrapper -from transformers import OPTForCausalLM - -_has_transformers = importlib.util.find_spec("transformers") -_has_vllm = importlib.util.find_spec("vllm") - - -@pytest.mark.skipif(not _has_transformers, reason="missing transformers dependencies") -@pytest.mark.skipif(not _has_vllm, reason="missing vllm dependencies") -class TestLLMActor: - @pytest.fixture(scope="module") - def vllm_instance(self): - try: - import vllm - except ImportError: - pytest.skip(reason="missing vllm") - - llm_model = vllm.LLM("facebook/opt-125m") - tokenizer = llm_model.get_tokenizer() - tokenizer.pad_token = tokenizer.eos_token - return llm_model - - @pytest.fixture(scope="module") - def transformers_instance(self): - from transformers import AutoTokenizer - - # tokenizer = AutoTokenizer.from_pretrained("gpt2") - # model = GPT2LMHeadModel(GPT2Config()).eval() - # tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") - # model = OPTModel(OPTConfig("facebook/opt-125m")) - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") - model = OPTForCausalLM.from_pretrained("facebook/opt-125m") - - tokenizer.pad_token = tokenizer.eos_token - tokenizer.padding_side = "left" - - return model, tokenizer - - @pytest.fixture(scope="module") - def transformers_instance_pretrained(self): - from transformers import AutoTokenizer, OPTForCausalLM - - # tokenizer = AutoTokenizer.from_pretrained("gpt2") - # model = GPT2LMHeadModel(GPT2Config()) - # tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") - # model = OPTModel(OPTConfig("facebook/opt-125m")) - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") - model = OPTForCausalLM.from_pretrained("facebook/opt-125m") - - tokenizer.pad_token = tokenizer.eos_token - tokenizer.padding_side = "left" - - return model, tokenizer - - @pytest.mark.parametrize( - "from_text, generate, return_log_probs, tokens, attention_mask", - [ - (True, True, True, None, None), - (True, True, False, None, None), - (True, False, None, None, None), - ( - False, - True, - True, - torch.randint(1024, (1, 10)), - torch.ones(1, 10, dtype=torch.int64), - ), - (False, True, True, torch.randint(1024, (1, 10)), None), - ( - False, - True, - False, - torch.randint(1024, (1, 10)), - torch.ones(1, 10, dtype=torch.int64), - ), - (False, True, False, torch.randint(1024, (1, 10)), None), - ], - ) - def test_transformers_wrapper( - self, - from_text, - generate, - return_log_probs, - tokens, - attention_mask, - transformers_instance, - ): - torch.manual_seed(0) - - model, tokenizer = transformers_instance - - m = TransformersWrapper( - model, - tokenizer=tokenizer, - from_text=from_text, - generate=generate, - return_log_probs=return_log_probs, - ) - self._run_check( - m, - tokens, - attention_mask, - generate, - return_log_probs, - from_text, - has_logits=True, - ) - - @pytest.mark.skip_if_nightly - @pytest.mark.parametrize( - "from_text, generate, return_log_probs, tokens, attention_mask", - [ - (True, True, True, None, None), - (True, True, False, None, None), - (True, False, None, None, None), - ( - False, - True, - True, - torch.randint(1024, (1, 10)), - torch.ones(1, 10, dtype=torch.int64), - ), - (False, True, True, torch.randint(1024, (1, 10)), None), - ( - False, - True, - False, - torch.randint(1024, (1, 10)), - torch.ones(1, 10, dtype=torch.int64), - ), - (False, True, False, torch.randint(1024, (1, 10)), None), - ], - ) - def test_vllm_wrapper( - self, - from_text, - generate, - return_log_probs, - tokens, - attention_mask, - vllm_instance, - ): - torch.manual_seed(0) - - model = vllm_instance - m = vLLMWrapper( - model, - from_text=from_text, - generate=generate, - return_log_probs=return_log_probs, - ) - self._run_check( - m, - tokens, - attention_mask, - generate, - return_log_probs, - from_text, - has_logits=False, - ) - - def _make_data( - self, - m, - tokens, - attention_mask, - generate, - from_text, - has_logits, - batch_size=1, - text_response=None, - tokens_response=None, - ): - lp_kwargs = {} - if from_text: - if not generate: - text_response = ( - NonTensorStack(" and another text that follows") - if text_response is None - else text_response - ) - if not isinstance(text_response, NonTensorStack): - if isinstance(text_response, list): - text_response = NonTensorStack(*text_response) - else: - text_response = NonTensorStack(text_response) - lp_kwargs.update({"text_response": text_response}) - tdin = LLMData( - text=NonTensorStack("Somewhere, I lost"), - **lp_kwargs, - batch_size=batch_size, - ) - else: - if not generate: - if tokens_response is None: - shape_response = tokens.shape - shape_response = shape_response[:-1] + (shape_response[-1] * 2,) - tokens_response = torch.randint(1024, shape_response) - lp_kwargs.update({"tokens_response": tokens_response}) - tdin = LLMData( - tokens=tokens, - attention_mask=attention_mask, - **lp_kwargs, - batch_size=batch_size, - ) - return tdin - - def _run_check( - self, - m, - tokens, - attention_mask, - generate, - return_log_probs, - from_text, - has_logits, - ): - tdin = self._make_data( - m, tokens, attention_mask, generate, from_text, has_logits - ) - if from_text and generate: - assert tdin.text_response is None - elif from_text and not generate: - assert tdin.text_response is not None - - tdin.copy() - td = m(tdin) - assert td is tdin - assert isinstance(td, LLMData) - if from_text and generate: - assert td.text_response is not None - - # TODO: vLLM may produce an attention mask when hf does not - explore consistency! - # if generate and (from_text or tdincopy.attention_mask is not None): - # assert td.attention_mask is not None, (generate, from_text, tdincopy.attention_mask is not None) - # if isinstance(td.attention_mask, torch.Tensor): - # assert td.attention_mask.shape == td.tokens.shape - # else: - # assert td.attention_mask is None, (generate, from_text) - - if not generate: - # logprobs are computed on text response of tokens_response - assert td.text_response is not None or td.tokens_response is not None - assert td.log_probs is not None - if has_logits: - assert td.logits is not None - if generate: - if return_log_probs: - assert td.log_probs is not None - assert td.log_probs.shape[-1] == td.tokens_response.shape[-1] - else: - assert td.log_probs is None - - # Test the shapes - assert td.tokens_response is not None, (generate, has_logits, from_text) - - # If from text and not generating, the tokens are not returned for now - if not (from_text and not generate): - assert td.tokens_response is not None - assert td.tokens is not None - assert td.tokens_response.shape[:-1] == td.tokens.shape[:-1] - # The convention is that the response only has new tokens - assert ( - td.tokens_response[..., : td.tokens.shape[-1]] - != td.tokens[..., : td.tokens_response.shape[-1]] - ).any(), (generate, from_text) - - @pytest.mark.parametrize( - "from_text, tokens, attention_mask", - [ - ( - False, - torch.randint(1024, (1, 10)), - torch.ones(1, 10, dtype=torch.int64), - ), - (False, torch.randint(1024, (1, 10)), None), - (True, None, None), - ], - ) - def test_transformers_logprobs( - self, from_text, tokens, attention_mask, transformers_instance - ): - torch.manual_seed(0) - model, tokenizer = transformers_instance - - m_generate = TransformersWrapper( - model, - tokenizer=tokenizer, - from_text=from_text, - generate=True, - return_log_probs=True, - ) - m_logprobs = TransformersWrapper( - model, tokenizer=tokenizer, from_text=from_text, generate=False - ) - self._check_lps( - m_generate, - m_logprobs, - tokens, - attention_mask, - from_text, - has_logits=False, - ) - - @pytest.mark.skip_if_nightly - @pytest.mark.parametrize( - "pad_output, from_text, tokens, attention_mask", - [ - (True, True, None, None), - (False, True, None, None), - ( - True, - False, - torch.randint(1024, (1, 10)), - torch.ones(1, 10, dtype=torch.int64), - ), - (True, False, torch.randint(1024, (1, 10)), None), - ], - ) - def test_vllm_logprobs( - self, from_text, tokens, attention_mask, pad_output, vllm_instance - ): - torch.manual_seed(0) - - model = vllm_instance - m_generate = vLLMWrapper( - model, - from_text=from_text, - generate=True, - return_log_probs=True, - pad_output=pad_output, - ) - m_logprobs = vLLMWrapper( - model, from_text=from_text, generate=False, pad_output=pad_output - ) - self._check_lps( - m_generate, - m_logprobs, - tokens, - attention_mask, - from_text, - has_logits=False, - tol=1e-1, - ) - - def _check_lps( - self, - model_generate, - model_logprobs, - tokens, - attention_mask, - from_text, - has_logits, - tol=1e-2, - ): - # Checks that the log-probs gathered with generate=False equate those with generate=True - tdin_genetate = self._make_data( - model_generate, tokens, attention_mask, True, from_text, has_logits - ) - td_generate = model_generate(tdin_genetate) - tdin_logprobs = self._make_data( - model_logprobs, - tokens, - attention_mask, - False, - from_text, - has_logits, - tokens_response=td_generate.tokens_response, - text_response=td_generate.text_response, - ) - td_logprobs = model_logprobs(tdin_logprobs) - assert td_generate.tokens_response.shape == td_logprobs.tokens_response.shape - assert (td_generate.tokens_response == td_logprobs.tokens_response).all(), ( - td_generate.tokens_response == td_logprobs.tokens_response - ) - assert td_generate.log_probs.shape == td_generate.tokens_response.shape - assert td_logprobs.log_probs.shape == td_logprobs.tokens_response.shape - assert td_logprobs.log_probs.shape == td_generate.tokens_response.shape - torch.testing.assert_close( - td_generate.log_probs, td_logprobs.log_probs, rtol=tol, atol=tol - ) - - @pytest.mark.skip_if_nightly - @pytest.mark.parametrize("pad", [True, False]) - @pytest.mark.parametrize("generate", [True, False]) - @pytest.mark.parametrize("use_tensorclass", [True, False]) - def test_vllm_batch_run(self, pad, generate, use_tensorclass, vllm_instance): - # Test generate - padding combinations - policy = vLLMWrapper( - vllm_instance, - from_text=True, - generate=generate, - return_log_probs=True, - pad_output=pad, - generate_kwargs={"max_tokens": 10000}, - ) - if generate: - data = LazyStackedTensorDict( - *TensorDict( - text=NonTensorStack("a string", "another very long string"), - batch_size=[2], - ).unbind(0) - ) - else: - data = LazyStackedTensorDict( - *TensorDict( - text=NonTensorStack("a string", "another very long string"), - text_response=NonTensorStack( - " is a string", " is still a very long string" - ), - batch_size=[2], - ).unbind(0) - ) - if use_tensorclass: - data = LLMData.from_tensordict(data) - output = policy(data) - try: - log_probs = output.get("log_probs") - except Exception: - log_probs = output.get("log_probs", as_list=True) - if pad: - assert isinstance(log_probs, torch.Tensor) - else: - assert isinstance(log_probs, list) - text = output.get("text", as_list=True) - # TODO: this is not ideal... - if use_tensorclass: - assert isinstance(text, list) - else: - assert isinstance(text, NonTensorStack) - text_response = output.get("text_response", as_list=True) - if use_tensorclass: - assert isinstance(text_response, list) - else: - assert isinstance(text_response, NonTensorStack) - try: - tokens_response = output.get("tokens_response") - except Exception: - tokens_response = output.get("tokens_response", as_list=True) - if pad: - assert isinstance(tokens_response, torch.Tensor) - else: - assert isinstance(tokens_response, list) - try: - tokens = output.get("tokens") - except Exception: - tokens = output.get("tokens", as_list=True) - if not generate: - assert tokens is None - elif pad: - assert isinstance(tokens, torch.Tensor), tokens - else: - assert isinstance(tokens, list) - - @pytest.mark.skip_if_nightly - @pytest.mark.parametrize("from_text", [True]) - def test_vllm_collection(self, vllm_instance, from_text): - policy = vLLMWrapper( - vllm_instance, - return_log_probs=True, - generate_kwargs={"max_tokens": 32}, - from_text=from_text in (True, None), - ) - tokenizer = vllm_instance.get_tokenizer() - self._run_check_collector(policy, from_text=from_text, tokenizer=tokenizer) - - def test_transformers_collection(self): - ... - - @classmethod - def env_constructor(cls, **kwargs): - def make(): - # if kwargs.get("from_text", True): - dl = DummyStrDataLoader(batch_size=32) - # else: - # dl = DummyTensorDataLoader(batch_size=32) - env = LLMEnv.from_dataloader( - dl, - batch_size=4, - repeats=4, - **kwargs, - ) - assert env.batch_size == (16,) - return env - - return make - - def _run_check_collector(self, policy, from_text, tokenizer): - if from_text is None: - kwargs = {"eos_token_id": tokenizer.eos_token_id} - else: - kwargs = { - "from_text": from_text, - "tokenizer": tokenizer, - "eos_token_id": tokenizer.eos_token_id, - } - collector = LLMCollector( - self.env_constructor(**kwargs), - policy=policy, - dialog_turns_per_batch=32, - total_dialog_turns=128, - ) - t = 0 - for data in collector: - assert isinstance(data, LazyStackedTensorDict) - assert isinstance(data.reshape(-1).get("text_response"), NonTensorStack) - # action - assert "text_response" in data - assert "tokens_response" in data - # obs - assert "text" in data - assert ("next", "text") in data - # tokens - assert "tokens" in data - - t += data.numel() - assert collector._frames == t - assert t < 512, t # assert ("next", "tokens") in data - - @pytest.mark.skip_if_nightly - def test_vllm_generate_multiple_trajs(self, vllm_instance): - policy = vLLMWrapper( - vllm_instance, - return_log_probs=True, - generate_kwargs={"n": 10, "max_tokens": 1024}, - inplace=False, - ) - data = TensorDict( - text=NonTensorStack("a string", "another very long string"), batch_size=2 - ) - data = policy(data) - - @set_list_to_stack(True) - @pytest.mark.parametrize("from_text", [True, False]) - @pytest.mark.parametrize("generate", [True, False]) - def test_transformers_long_sequences( - self, from_text, generate, transformers_instance_pretrained - ): - torch.manual_seed(42) - model, tokenizer = transformers_instance_pretrained - prompts = [ - "The quick brown fox jumps over the lazy dog.", # Likely to finish soon - "Once upon a time in a land far, far away, there was a", # Likely to continue longer - "In the beginning, the universe was created. This has made a lot of people very angry and been widely regarded as a bad move.", - ] - data = lazy_stack([TensorDict() for _ in range(len(prompts))]) - data["text"] = prompts - eos_token_id = tokenizer.convert_tokens_to_ids(",") - if not from_text: - data["tokens"] = tokenizer(data["text"])["input_ids"] - data["attention_mask"] = ( - 0 * data.get("tokens", as_nested_tensor=True, layout=torch.strided) + 1 - ) - if not generate: - # we need responses - responses = prompts[1:] + [" et dolore magna aliqua."] - data["text_response"] = responses - if not from_text: - data["tokens_response"] = tokenizer(data["text_response"])["input_ids"] - # make sure dimensions are ragged for tokens entries - if "tokens" in data: - assert data.get_item_shape("tokens")[-1] == -1 - if "tokens_response" in data: - assert data.get_item_shape("tokens_response")[-1] == -1 - generate_kwargs = {} - if generate: - generate_kwargs = { - "max_new_tokens": 128, # Set a reasonable number of new tokens to generate - "min_length": 20, # Ensure a minimum length for the generated sequence - "pad_token_id": tokenizer.pad_token_id, # Use the tokenizer's pad token - "forced_eos_token_id": eos_token_id, # Use comma as an EOS token - } - policy = TransformersWrapper( - model, - tokenizer=tokenizer, - from_text=from_text, - generate=generate, - return_log_probs=True, - # TODO: use n trajs - generate_kwargs=generate_kwargs, - ) - data_policy = policy(data) - if "tokens" in data_policy: - assert data_policy.get_item_shape("tokens")[-1] == -1 - if "tokens_response" in data_policy: - assert ( - data_policy.get_item_shape("tokens_response")[-1] == -1 - ) # TODO: this fails - - -if __name__ == "__main__": - args, unknown = argparse.ArgumentParser().parse_known_args() - pytest.main([__file__, "--capture", "no", "--exitfirst"] + unknown) diff --git a/test/llm/test_objectives.py b/test/llm/test_objectives.py index baf301e5f33..9dd0ffb9367 100644 --- a/test/llm/test_objectives.py +++ b/test/llm/test_objectives.py @@ -10,19 +10,17 @@ import numpy as np import pytest import torch -from mocking_classes_llm import DummyStrDataLoader -from tensordict import lazy_stack, set_capture_non_tensor_stack, TensorDict -from torchrl.data import History, LazyStackStorage, ReplayBuffer, Unbounded -from torchrl.envs import Transform -from torchrl.envs.llm import LLMEnv +from tensordict import lazy_stack, TensorDict +from torchrl.data import History, LazyStackStorage, ReplayBuffer from torchrl.envs.llm.transforms.kl import RetrieveLogProb -from torchrl.modules.llm import TransformersWrapper -from torchrl.objectives import ClipPPOLoss -from torchrl.objectives.llm.grpo import GRPOLoss, GRPOLossOutput, MCAdvantage +from torchrl.modules.llm import Text, TransformersWrapper, vLLMWrapper +from torchrl.modules.llm.policies.common import ChatHistory, Masks, Tokens +from torchrl.objectives.llm.grpo import MCAdvantage from torchrl.objectives.llm.sft import SFTLoss _has_transformers = importlib.util.find_spec("transformers") is not None +_has_vllm = importlib.util.find_spec("vllm") is not None prompts = [ "Lorem ipsum dolor sit amet,", "consectetur adipiscing elit,", @@ -55,7 +53,7 @@ def make_silly_trajectory(n_steps=None): rewards = [torch.randn(n_tokens, 1)] prompt = np.random.choice(prompts) td = TensorDict( - text=prompt, + text=Text(prompt=prompt), next=TensorDict( reward=rewards, done=torch.zeros(1, dtype=torch.bool) ), @@ -89,80 +87,6 @@ def test_grpo(): ... -class TestPPO4LLMs: - @pytest.mark.skipif( - not _has_transformers, reason="transformers lib required to test PPO with LLMs" - ) - @set_capture_non_tensor_stack(False) - @pytest.mark.parametrize("from_text", [True, False]) - @pytest.mark.parametrize("cls", [ClipPPOLoss, GRPOLoss]) - def test_hf(self, from_text, cls): - from transformers import AutoTokenizer, OPTConfig, OPTForCausalLM - - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") - tokenizer.pad_token = tokenizer.eos_token - - model = OPTForCausalLM(OPTConfig()).eval() - policy_inference = TransformersWrapper( - model, - tokenizer=tokenizer, - generate=True, - from_text=from_text, - return_log_probs=True, - ) - policy_train = TransformersWrapper( - model, tokenizer=tokenizer, generate=False, from_text=False - ) - for p in policy_train.parameters(): - assert p.requires_grad - # Create some fake data - dl = DummyStrDataLoader(batch_size=32) - llm_env = LLMEnv.from_dataloader( - dl, - tokenizer=tokenizer if not from_text else None, - batch_size=(32,), - from_text=True, - eos_token_id=tokenizer.eos_token_id, - ) - - class RewardTransform(Transform): - def _step(self, td, next_td): - next_td["reward"] = torch.randn_like( - td["tokens_response"], dtype=torch.float - ).unsqueeze(-1) - return next_td - - def transform_reward_spec(self, reward_spec): - return reward_spec.set( - "reward", Unbounded((*reward_spec.shape, -1, 1), dtype=torch.float) - ) - - llm_env = llm_env.append_transform(RewardTransform()) - with torch.no_grad(): - data = llm_env.rollout(3, policy_inference) - data = data.view(-1) - assert data["tokens_response"].shape[-1] == 20 - # Make some fake advantages: - data["advantage"] = torch.randn_like(data["next", "reward"]) - - loss = cls( - actor_network=policy_train, - ) - loss_vals = loss(data) - if cls is ClipPPOLoss: - assert "loss_objective" in loss_vals - assert "loss_entropy" in loss_vals - assert loss_vals["loss_objective"].requires_grad - assert loss_vals["loss_entropy"].requires_grad - assert "clip_fraction" in loss_vals - assert "kl_approx" in loss_vals - assert "entropy" in loss_vals - assert "ESS" in loss_vals - assert "loss_critic" not in loss_vals - else: - assert isinstance(loss_vals, GRPOLossOutput) - - class TestSFT: @pytest.fixture(scope="class") def data(self): @@ -190,20 +114,21 @@ def data(self): text = history[:, :-1].apply_chat_template( tokenizer=tokenizer, chat_template_name="qwen", add_generation_prompt=True ) - text_response = history.apply_chat_template( + full_text = history.apply_chat_template( tokenizer=tokenizer, chat_template_name="qwen", add_generation_prompt=False ) text_response = [ - txt[len(txt_start) :] for txt, txt_start in zip(text_response, text) + txt[len(txt_start) :] for txt, txt_start in zip(full_text, text) ] td = TensorDict( - text=text, - text_response=text_response, - history=history, + text=Text(prompt=text, response=text_response, full=full_text), + history=ChatHistory( + full=history, prompt=history[..., :-1], response=history[..., -1:] + ), next=TensorDict( reward=torch.randn(2, 1), done=torch.zeros(2, dtype=torch.bool), - history=history, + history=ChatHistory(prompt=history), ), batch_size=(2,), ) @@ -227,8 +152,9 @@ def policy_train(self): model, tokenizer=tokenizer, generate=False, - from_text=True, chat_template_name="qwen", + input_mode="history", + pad_output=False, ) return policy_train, tokenizer @@ -249,8 +175,6 @@ def test_sft( data, policy_train, ): - pass - policy_train, tokenizer = policy_train loss = SFTLoss( actor_network=policy_train, @@ -269,20 +193,21 @@ def test_sft( policy_train.model, tokenizer=tokenizer, generate=False, - from_text=True, return_log_probs=True, chat_template_name="qwen", + input_mode="history", + pad_output=False, ) transform = RetrieveLogProb( policy_ref, assistant_only=True, tokenizer_kwargs={"chat_template_name": "qwen"}, tokenizer=tokenizer, + log_probs_key=("ref_log_prob", "full"), ) with torch.no_grad(): # Compute ref log-probs transform(td) - loss_vals = loss(td) if kl_to_ref_coeff is not None and loss_function != "minor_sft": assert loss_vals.loss_kl_to_ref.shape == () @@ -296,7 +221,7 @@ def test_sft( assert loss_vals.sum(reduce=True).shape == () def test_sft_assistant_only(self, data): - from torchrl.data.llm.chat import _CHAT_TEMPLATES + from torchrl.data.llm.history import _CHAT_TEMPLATES from transformers import AutoTokenizer, OPTConfig, OPTForCausalLM tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") @@ -308,14 +233,12 @@ def test_sft_assistant_only(self, data): model, tokenizer=tokenizer, generate=False, - from_text=True, chat_template_name="qwen", ) policy_ref = TransformersWrapper( model, tokenizer=tokenizer, generate=False, - from_text=True, return_log_probs=True, chat_template_name="qwen", ) @@ -324,6 +247,7 @@ def test_sft_assistant_only(self, data): assistant_only=True, tokenizer_kwargs={"chat_template_name": "qwen"}, tokenizer=tokenizer, + log_probs_key=("ref_log_prob", "full"), ) td = transform(data) assert td is data @@ -338,6 +262,181 @@ def test_sft_assistant_only(self, data): loss(td) +class TestGRPOLossIntegration: + """Test GRPOLoss integration with the new distribution methods.""" + + @pytest.fixture(scope="module") + def transformers_instance(self): + """Create transformers model and tokenizer for testing.""" + if not _has_transformers: + pytest.skip("transformers not available") + from transformers import AutoModelForCausalLM, AutoTokenizer + + model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B") + tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") + tokenizer.pad_token = tokenizer.eos_token + return model, tokenizer + + @pytest.fixture(scope="module") + def vllm_instance(self): + """Create vLLM model and tokenizer for testing.""" + if not _has_vllm: + pytest.skip("vllm not available") + + import vllm.envs as envs + from transformers import AutoTokenizer + from vllm import LLM + + envs.VLLM_HOST_IP = "0.0.0.0" or "127.0.0.1" + + try: + model = LLM("Qwen/Qwen2.5-0.5B") + tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") + tokenizer.pad_token = tokenizer.eos_token + return model, tokenizer + except Exception as e: + pytest.skip(f"Failed to load vLLM model: {e}") + + @pytest.fixture(scope="module") + def sample_tokens(self, vllm_instance): + """Create sample tokens for testing.""" + model, tokenizer = vllm_instance + text = [ + "Are you happy? Say yes or no.", + "Explain the difference between a cat and a dog. Be very detailed.", + ] + tokenized = tokenizer( + text, return_tensors="pt", padding=True, padding_side="left" + ) + return tokenized["input_ids"], tokenized["attention_mask"] + + @pytest.fixture(scope="module") + def sample_text(self): + """Create sample text for testing.""" + return [ + "Are you happy? Say yes or no.", + "Explain the difference between a cat and a dog. Be very detailed.", + ] + + @pytest.fixture(scope="module") + def sample_history(self): + """Create sample conversation history for testing.""" + chats = [ + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Are you happy? Say yes or no."}, + ], + [ + { + "role": "system", + "content": "You are a very helpful assistant, but more handsome.", + }, + { + "role": "user", + "content": "Explain the difference between a cat and a dog. Be very detailed.", + }, + ], + ] + return History.from_chats(chats) + + @pytest.fixture(scope="module") + def sample_history_assistant(self): + """Create sample conversation history for testing.""" + chats = [ + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Are you happy? Say yes or no."}, + {"role": "assistant", "content": "Yes."}, + ], + [ + { + "role": "system", + "content": "You are a very helpful assistant, but more handsome.", + }, + { + "role": "user", + "content": "Explain the difference between a cat and a dog. Be very detailed.", + }, + { + "role": "assistant", + "content": "A cat is a small animal that meows, while a dog is a larger animal that barks.", + }, + ], + ] + return History.from_chats(chats) + + @pytest.mark.skipif(not _has_vllm, reason="vllm not available") + @pytest.mark.parametrize("masking_strategy", ["sft", "rlhf"]) + def test_grpo_loss_with_transformers( + self, + vllm_instance, + transformers_instance, + sample_history, + sample_tokens, + masking_strategy, + ): + """Test GRPOLoss with vLLM wrapper and different masking strategies.""" + from torchrl.objectives.llm.grpo import GRPOLoss + + model, tokenizer = transformers_instance + vllm_model, vllm_tokenizer = vllm_instance + + # Use tokens input mode for SFT, history for RLHF/generic + if masking_strategy == "sft": + input_mode = "tokens" + input_ids, attention_mask = sample_tokens + input_data = { + "tokens": Tokens(prompt=input_ids), + "masks": Masks(all_attention_mask=attention_mask), + } + else: + input_mode = "history" + input_data = {"history": ChatHistory(prompt=sample_history)} + + wrapper_gen = vLLMWrapper( + vllm_model, + tokenizer=vllm_tokenizer, + input_mode=input_mode, + generate=True, + return_log_probs=True, + pad_output=True, + generate_kwargs={"max_tokens": 10}, + ) + + # Create test data with advantage and correct batch size + td = TensorDict(input_data, batch_size=(2,)).to_lazystack(0) + td = wrapper_gen(td) + # use a shape that can be broadcast + td["advantage"] = torch.randn(2, 1, 1) + + wrapper = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode=input_mode, + generate=False, + return_log_probs=True, + pad_output=True, + ) + + # Create GRPOLoss with specified masking strategy + loss_fn = GRPOLoss( + actor_network=wrapper, + masking_strategy=masking_strategy, + ) + + # This should work without shape mismatch errors + try: + result = loss_fn(td) + assert result is not None + except ValueError as e: + if "Shape mismatch" in str(e): + # This is expected if the advantage shape doesn't match the log-prob shape + # due to different masking strategies + assert masking_strategy in str(e) + else: + raise + + if __name__ == "__main__": args, unknown = argparse.ArgumentParser().parse_known_args() pytest.main([__file__, "--capture", "no", "--exitfirst"] + unknown) diff --git a/test/llm/test_wrapper.py b/test/llm/test_wrapper.py new file mode 100644 index 00000000000..49197952972 --- /dev/null +++ b/test/llm/test_wrapper.py @@ -0,0 +1,1710 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from __future__ import annotations + +import argparse +import importlib.util + +import os +from functools import partial + +import pytest +import torch +from tensordict import lazy_stack, set_list_to_stack, TensorDict + +from tensordict.utils import _zip_strict +from torchrl.data.llm import History +from torchrl.envs.llm.transforms.kl import KLComputation, RetrieveKL, RetrieveLogProb +from torchrl.modules.llm.policies.common import ( + ChatHistory, + LogProbs, + Masks, + Text, + Tokens, +) +from torchrl.modules.llm.policies.transformers_wrapper import TransformersWrapper +from torchrl.modules.llm.policies.vllm_wrapper import vLLMWrapper +from transformers import AutoTokenizer + + +# Set environment variable for vLLM V0 engine +os.environ["VLLM_USE_V1"] = "0" + +_has_transformers = importlib.util.find_spec("transformers") is not None +_has_vllm = importlib.util.find_spec("vllm") is not None +_has_datasets = importlib.util.find_spec("datasets") is not None + +TransformersWrapperMaxTokens = partial( + TransformersWrapper, generate_kwargs={"max_new_tokens": 10, "do_sample": True} +) + + +@pytest.fixture(scope="function", autouse=True) +def set_seed(): + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed(0) + torch.cuda.manual_seed_all(0) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + yield + + +@pytest.fixture(scope="module", autouse=True) +def set_list_to_stack_fixture(): + with set_list_to_stack(True): + yield + + +@pytest.fixture(scope="module") +def vllm_instance(): + """Create vLLM model and tokenizer for testing.""" + if not _has_vllm: + pytest.skip("vllm not available") + + import vllm.envs as envs + from vllm import LLM + + envs.VLLM_HOST_IP = "0.0.0.0" or "127.0.0.1" + + assert os.environ.get("VLLM_USE_V1") == "0" + + try: + model = LLM("Qwen/Qwen2.5-0.5B") + tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") + tokenizer.pad_token = tokenizer.eos_token + return model, tokenizer + except Exception as e: + pytest.skip(f"Failed to load vLLM model: {e}") + + +@pytest.fixture(scope="module") +def transformers_instance(): + """Create transformers model and tokenizer for testing.""" + if not _has_transformers: + pytest.skip("transformers not available") + from transformers import AutoModelForCausalLM, AutoTokenizer + + model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B") + tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") + tokenizer.pad_token = tokenizer.eos_token + return model, tokenizer + + +@pytest.fixture +def sample_history(): + """Create sample conversation history for testing.""" + chats = [ + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Are you happy? Say yes or no."}, + ], + [ + { + "role": "system", + "content": "You are a very helpful assistant, but more handsome.", + }, + { + "role": "user", + "content": "Explain the difference between a cat and a dog. Be very detailed.", + }, + ], + ] + return History.from_chats(chats) + + +@pytest.fixture +def sample_history_assistant(): + """Create sample conversation history for testing.""" + chats = [ + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Are you happy? Say yes or no."}, + {"role": "assistant", "content": "Yes."}, + ], + [ + { + "role": "system", + "content": "You are a very helpful assistant, but more handsome.", + }, + { + "role": "user", + "content": "Explain the difference between a cat and a dog. Be very detailed.", + }, + { + "role": "assistant", + "content": "A cat is a small animal that meows, while a dog is a larger animal that barks.", + }, + ], + ] + return History.from_chats(chats) + + +@pytest.fixture +def sample_text(): + """Create sample text for testing.""" + return [ + "Are you happy? Say yes or no.", + "Explain the difference between a cat and a dog. Be very detailed.", + ] + + +@pytest.fixture +def sample_tokens(vllm_instance): + """Create sample tokens for testing.""" + model, tokenizer = vllm_instance + text = [ + "Are you happy? Say yes or no.", + "Explain the difference between a cat and a dog. Be very detailed.", + ] + tokenized = tokenizer(text, return_tensors="pt", padding=True, padding_side="left") + return tokenized["input_ids"], tokenized["attention_mask"] + + +def check_output_shapes(out, pad_output, requested_log_probs=False): + if pad_output: + # We can get all tensors or they are none + log_probs = out.get("log_probs") + masks = out.get("masks") + tokens = out.get("tokens") + text = out.get("text") + history = out.get("history") + + # Test the all_ tensors + if log_probs is not None: + assert isinstance(log_probs, LogProbs) + all_logprobs = log_probs.full + else: + all_logprobs = None + if masks is not None: + assert isinstance(masks, Masks) + all_attention_masks = masks.all_attention_mask + all_assistant_masks = masks.all_assistant_mask + else: + all_attention_masks = None + all_assistant_masks = None + if tokens is not None: + assert isinstance(tokens, Tokens) + all_tokens = tokens.full + else: + all_tokens = None + if text is not None: + assert isinstance(text, Text) + text.full + else: + pass + if history is not None: + assert isinstance(history, ChatHistory) + history.full + else: + pass + + shapes = set() + if all_logprobs is not None: + shapes.add(all_logprobs.shape) + if all_attention_masks is not None: + shapes.add(all_attention_masks.shape) + if all_assistant_masks is not None: + shapes.add(all_assistant_masks.shape) + if all_tokens is not None: + shapes.add(all_tokens.shape) + assert len(shapes) <= 1, ("all_tensors shapes differ", out) + + # Check the response tensors + shapes = set() + if log_probs is not None and log_probs.response is not None: + shapes.add(log_probs.response.shape) + if tokens is not None and tokens.response is not None: + shapes.add(tokens.response.shape) + assert len(shapes) <= 1, (shapes, out) + + # Check the prompt tensors + shapes = set() + if log_probs is not None and log_probs.prompt is not None: + shapes.add(log_probs.prompt.shape) + if tokens is not None and tokens.prompt is not None: + shapes.add(tokens.prompt.shape) + + if ( + log_probs is not None + and log_probs.response is not None + and log_probs.prompt is not None + ): + assert ( + log_probs.response.shape[-1] + log_probs.prompt.shape[-1] + == log_probs.full.shape[-1] + ) + if ( + tokens is not None + and tokens.response is not None + and tokens.prompt is not None + ): + assert ( + tokens.response.shape[-1] + tokens.prompt.shape[-1] + == tokens.full.shape[-1] + ) + + assert len(shapes) <= 1, shapes + + # Check that if 'full' is defined, either both 'prompt' and 'response' must be set or neither of them + if requested_log_probs: + for obj_name, obj in [ + ("log_probs", log_probs), + ("tokens", tokens), + ("text", text), + ]: + if obj is not None and obj.get("full", as_list=True) is not None: + has_prompt = obj.get("prompt", as_list=True) is not None + has_response = obj.get("response", as_list=True) is not None + assert (has_prompt and has_response) or ( + not has_prompt and not has_response + ), ( + f"{obj_name}: if 'full' is defined, either both 'prompt' and 'response' must be set or neither of them. " + f"prompt={has_prompt}, response={has_response}, full={obj.full is not None}" + ) + else: + # we can simply iterate over out + for _out in out.unbind(0): + check_output_shapes( + _out, pad_output=not _out.ndim, requested_log_probs=requested_log_probs + ) + + +@pytest.mark.skipif(not _has_vllm, reason="vllm not available") +class TestWrappers: + """Comprehensive tests for vLLMWrapper and TransformersWrapper covering all modalities and configurations.""" + + # ================================================ + # History Input Mode Tests + # ================================================ + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + @pytest.mark.parametrize("generate", [True, False], ids=["generate", "no_generate"]) + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + def test_history_input_mode( + self, + wrapper_class, + vllm_instance, + transformers_instance, + sample_history, + sample_history_assistant, + generate, + pad_output, + ): + """Test history input mode with various configurations.""" + + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="history", + generate=generate, + pad_output=pad_output, + ) + + # Check input keys + assert ( + wrapper.in_keys == [("history", "prompt")] + if generate + else [("history", "full")] + ) + + # Check output keys - always return everything + expected_out_keys = ["text", "masks", "tokens", "log_probs", "history"] + assert wrapper.out_keys == expected_out_keys + + # Create input data + if generate: + data = TensorDict( + history=ChatHistory(prompt=sample_history), + batch_size=(2,), + ) + else: + data = TensorDict( + history=ChatHistory(full=sample_history_assistant), + batch_size=(2,), + ) + + # Run wrapper + result = wrapper(data) + check_output_shapes(result, pad_output, requested_log_probs=not generate) + + # Check output structure + for key in expected_out_keys: + assert key in result + assert hasattr(result[key], "__class__") + + # Check specific outputs - always check everything + text_obj = result["text"] + assert hasattr(text_obj, "prompt") + assert hasattr(text_obj, "response") + assert hasattr(text_obj, "full") + + if generate: + assert text_obj.response is not None + assert isinstance(text_obj.response, list) + assert isinstance(text_obj.response[0], str) + + tokens_obj = result["tokens"] + if pad_output: + assert hasattr(tokens_obj, "prompt") + assert hasattr(tokens_obj, "response") + assert hasattr(tokens_obj, "full") + assert hasattr(tokens_obj, "padded") + assert all(tokens_obj.padded) == pad_output + + if generate: + if pad_output: + assert tokens_obj.response is not None + else: + assert tokens_obj.get("response", as_list=True) is not None + if not pad_output: + response_tokens = result["tokens"].get("response", as_list=True) + assert isinstance(response_tokens, list) + else: + assert isinstance(tokens_obj.response, torch.Tensor) + + masks_obj = result["masks"] + if pad_output: + assert hasattr(masks_obj, "all_attention_mask") + assert hasattr(masks_obj, "all_assistant_mask") + assert hasattr(masks_obj, "padded") + assert all(masks_obj.padded) == pad_output + + log_probs_obj = result["log_probs"] + if pad_output: + assert hasattr(log_probs_obj, "prompt") + assert hasattr(log_probs_obj, "response") + assert hasattr(log_probs_obj, "full") + assert hasattr(log_probs_obj, "padded") + assert all(log_probs_obj.padded) == pad_output + + # ================================================ + # Text Input Mode Tests + # ================================================ + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + @pytest.mark.parametrize("generate", [True, False], ids=["generate", "no_generate"]) + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + def test_text_input_mode( + self, + wrapper_class, + vllm_instance, + transformers_instance, + sample_text, + generate, + pad_output, + ): + """Test text input mode with various configurations.""" + model, tokenizer = vllm_instance + + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="text", + generate=generate, + pad_output=pad_output, + ) + + # Check input keys + if generate: + assert wrapper.in_keys == [("text", "prompt")] + else: + assert wrapper.in_keys == [("text", "full")] + + # Create input data + if generate: + data = TensorDict(text=Text(prompt=sample_text), batch_size=(2,)) + else: + data = TensorDict(text=Text(full=sample_text), batch_size=(2,)) + + # Run wrapper + result = wrapper(data) + check_output_shapes(result, pad_output, requested_log_probs=not generate) + + # Check output structure - always return everything + expected_keys = ["text", "masks", "tokens", "log_probs"] + for key in expected_keys: + assert key in result + + # Check text output + text_obj = result["text"] + if generate: + assert text_obj.prompt == sample_text + else: + assert text_obj.full == sample_text + if generate: + assert text_obj.response is not None + + # Check tokens output + tokens_obj = result["tokens"] + if generate: + if not pad_output: + response_tokens = tokens_obj.get("response", as_list=True) + assert isinstance(tokens_obj.get("response", as_list=True), list) + else: + assert isinstance(tokens_obj.response, torch.Tensor) + + # ================================================ + # Tokens Input Mode Tests + # ================================================ + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + @pytest.mark.parametrize("generate", [True, False], ids=["generate", "no_generate"]) + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + def test_tokens_input_mode( + self, + wrapper_class, + vllm_instance, + transformers_instance, + sample_tokens, + generate, + pad_output, + ): + """Test tokens input mode with various configurations.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + input_ids, attention_mask = sample_tokens + + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="tokens", + attention_mask_key="attention_mask", + generate=generate, + pad_output=pad_output, + ) + + # Check input keys + assert ( + wrapper.in_keys == [("tokens", "prompt")] + if generate + else [("tokens", "full")] + ) + + # Create input data + data = TensorDict( + tokens=Tokens(prompt=input_ids) if generate else Tokens(full=input_ids), + attention_mask=attention_mask, + batch_size=(2,), + ) + + # Run wrapper + result = wrapper(data) + check_output_shapes(result, pad_output, requested_log_probs=not generate) + + # Check output structure + expected_keys = ["masks", "tokens", "log_probs"] + for key in expected_keys: + assert key in result + + # Check tokens output + tokens_obj = result["tokens"] + if generate: + if not pad_output: + response_tokens = result["tokens"].get("response", as_list=True) + assert isinstance(response_tokens, list) + else: + assert isinstance(tokens_obj.response, torch.Tensor) + + # ================================================ + # Error Handling Tests + # ================================================ + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_invalid_input_mode( + self, wrapper_class, vllm_instance, transformers_instance + ): + """Test that invalid input_mode raises an error.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + with pytest.raises(ValueError, match="input_mode must be one of"): + wrapper_class( + model, + tokenizer=tokenizer, + input_mode="invalid_mode", + ) + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_missing_input_key( + self, wrapper_class, vllm_instance, transformers_instance, sample_history + ): + """Test that missing input key raises an error.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="history", + input_key="history", + ) + + # Create data without the required key + data = TensorDict(batch_size=(2,)) + + with pytest.raises(ValueError, match="Expected 'history' key"): + wrapper(data) + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_invalid_history_type( + self, wrapper_class, vllm_instance, transformers_instance + ): + """Test that invalid history type raises an error.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="history", + ) + + # Create data with wrong type + data = TensorDict( + history=ChatHistory(prompt="not a history object"), batch_size=(2,) + ) + + with pytest.raises(TypeError, match="Expected History object"): + wrapper(data) + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_generate_false_without_log_probs( + self, wrapper_class, vllm_instance, transformers_instance + ): + """Test that generate=False without return_log_probs=True raises an error.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + with pytest.raises(ValueError, match="return_log_probs must be True"): + wrapper_class( + model, + tokenizer=tokenizer, + generate=False, + return_log_probs=False, + ) + + # ================================================ + # Batch Size Tests + # ================================================ + + @pytest.mark.parametrize( + "batch_size", [1, 2, 3], ids=["batch_size_1", "batch_size_2", "batch_size_3"] + ) + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_batch_sizes( + self, + wrapper_class, + vllm_instance, + transformers_instance, + batch_size, + pad_output, + ): + """Test wrapper with different batch sizes.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + # Create history with specified batch size + chats = [ + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": f"Question {i}?"}, + ] + for i in range(batch_size) + ] + history = History.from_chats(chats) + + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="history", + generate=True, + return_log_probs=True, + pad_output=pad_output, + ) + + data = TensorDict(history=ChatHistory(prompt=history), batch_size=(batch_size,)) + result = wrapper(data) + check_output_shapes( + result, pad_output=wrapper.pad_output, requested_log_probs=False + ) + + # Check that all expected keys are present + expected_keys = ["text", "masks", "tokens", "log_probs"] + for key in expected_keys: + assert key in result + + # Check batch size consistency + if pad_output: + # For padded output, tensors should have the correct batch dimension + assert len(result["text"].response) == batch_size + assert len(result["tokens"].response) == batch_size + else: + # For unpadded output, use as_list=True to get lists + response_text = result["text"].get("response", as_list=True) + response_tokens = result["tokens"].get("response", as_list=True) + assert len(response_text) == batch_size + assert len(response_tokens) == batch_size + + # ================================================ + # Custom Input Key Tests + # ================================================ + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_custom_input_key( + self, wrapper_class, vllm_instance, transformers_instance, sample_history + ): + """Test wrapper with custom input key.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="history", + input_key=("custom_history_key", "prompt"), + generate=True, + return_log_probs=True, + ) + + # Check input keys + assert wrapper.in_keys == [("custom_history_key", "prompt")] + + # Create data with custom key + data = TensorDict( + custom_history_key=ChatHistory(prompt=sample_history), batch_size=(2,) + ) + result = wrapper(data) + check_output_shapes( + result, pad_output=wrapper.pad_output, requested_log_probs=False + ) + + # Check that wrapper works correctly + expected_keys = ["text", "masks", "tokens", "log_probs"] + for key in expected_keys: + assert key in result + + # ================================================ + # Selective Output Tests + # ================================================ + + @pytest.mark.parametrize( + "return_log_probs", [True, False], ids=["log_probs", "no_log_probs"] + ) + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_selective_outputs( + self, + wrapper_class, + vllm_instance, + transformers_instance, + sample_history, + return_log_probs, + ): + """Test wrapper with selective output configurations.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="history", + generate=True, + return_log_probs=return_log_probs, + ) + + # Check output keys + expected_out_keys = [] + if wrapper.return_text: + expected_out_keys.append("text") + if wrapper.return_masks: + expected_out_keys.append("masks") + if wrapper.return_tokens: + expected_out_keys.append("tokens") + if return_log_probs: + expected_out_keys.append("log_probs") + if wrapper.return_history: + expected_out_keys.append("history") + + assert wrapper.out_keys == expected_out_keys + + # Run wrapper + data = TensorDict(history=ChatHistory(prompt=sample_history), batch_size=(2,)) + result = wrapper(data) + check_output_shapes( + result, pad_output=wrapper.pad_output, requested_log_probs=False + ) + + # Check that only expected keys are present + for key in expected_out_keys: + assert key in result + + # Check that unexpected keys are not present + all_possible_keys = ["text", "masks", "tokens", "log_probs"] + for key in all_possible_keys: + if key not in expected_out_keys: + assert key not in result + + # ================================================ + # Log-probs Only Mode Tests + # ================================================ + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_log_probs_only_mode( + self, + wrapper_class, + vllm_instance, + transformers_instance, + sample_history_assistant, + ): + """Test wrapper in log-probs only mode (generate=False).""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="history", + generate=False, # Only compute log-probs + return_log_probs=True, # Must be True when generate=False + ) + + data = TensorDict( + history=ChatHistory(full=sample_history_assistant), batch_size=(2,) + ) + result = wrapper(data) + check_output_shapes( + result, pad_output=wrapper.pad_output, requested_log_probs=True + ) + + # Check that log_probs are present + assert "log_probs" in result + + # Check that response_text is None (no generation) + assert result["text"].response is None + + # Check that prompt_logprobs are present + log_probs_obj = result["log_probs"] + assert log_probs_obj.get("full", as_list=True) is not None + + # ================================================ + # TensorClass Structure Tests + # ================================================ + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_tensorclass_structure( + self, wrapper_class, vllm_instance, transformers_instance, sample_history + ): + """Test that TensorClass objects have the correct structure.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + pad_output = False + + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="history", + generate=True, + return_log_probs=True, + ) + + data = TensorDict(history=ChatHistory(prompt=sample_history), batch_size=(2,)) + result = wrapper(data) + + # Test Text TensorClass + text_obj = result["text"] + assert hasattr(text_obj, "prompt") + assert hasattr(text_obj, "response") + assert hasattr(text_obj, "full") + + # Test Tokens TensorClass + tokens_obj = result["tokens"] + if pad_output: + # if not padded, we will fail to stack + assert hasattr(tokens_obj, "prompt") + assert hasattr(tokens_obj, "response") + assert hasattr(tokens_obj, "full") + assert hasattr(tokens_obj, "padded") + else: + assert ( + tokens_obj.get("response", as_list=True) is not None + ) # if not padded, we will fail to stack + + # Test LogProbs TensorClass + log_probs_obj = result["log_probs"] + if pad_output: + # if not padded, we will fail to stack + assert hasattr(log_probs_obj, "prompt") + assert hasattr(log_probs_obj, "response") + assert hasattr(log_probs_obj, "full") + assert hasattr(log_probs_obj, "padded") + else: + assert ( + log_probs_obj.get("response", as_list=True) is not None + ) # if not padded, we will fail to stack + + # Test Masks TensorClass + masks_obj = result["masks"] + if pad_output: + # if not padded, we will fail to stack + assert hasattr(masks_obj, "all_attention_mask") + assert hasattr(masks_obj, "all_assistant_mask") + assert hasattr(masks_obj, "padded") + + # ================================================ + # Unpadded Output Tests (with as_list=True) + # ================================================ + + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_unpadded_output_with_as_list( + self, wrapper_class, vllm_instance, transformers_instance, sample_history + ): + """Test unpadded output using as_list=True to avoid stacking issues.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode="history", + generate=True, + return_log_probs=True, + pad_output=False, # Unpadded output + ) + + data = TensorDict(history=ChatHistory(prompt=sample_history), batch_size=(2,)) + result = wrapper(data) + check_output_shapes( + result, pad_output=wrapper.pad_output, requested_log_probs=False + ) + + # Use as_list=True to get lists instead of trying to stack + text_list = result.get("text", as_list=True) + tokens_list = result.get("tokens", as_list=True) + masks_list = result.get("masks", as_list=True) + log_probs_list = result.get("log_probs", as_list=True) + + # Check that we get lists + assert isinstance(text_list.response, list) + assert isinstance(tokens_list.get("response", as_list=True), list) + assert isinstance(log_probs_list.get("response", as_list=True), list) + + # Check list lengths + assert len(text_list.response) == 2 + assert len(tokens_list.get("response", as_list=True)) == 2 + assert len(log_probs_list.get("response", as_list=True)) == 2 + + # Check that individual elements are tensors + assert isinstance(text_list.response[0], str) + assert isinstance(tokens_list.get("response", as_list=True)[0], torch.Tensor) + assert isinstance(log_probs_list.get("response", as_list=True)[0], torch.Tensor) + + @pytest.mark.parametrize("num_samples", [2], ids=["num_samples_2"]) + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + @pytest.mark.parametrize( + "return_log_probs", [True, False], ids=["log_probs", "no_log_probs"] + ) + @pytest.mark.parametrize( + "input_mode", ["history", "text", "tokens"], ids=["history", "text", "tokens"] + ) + @pytest.mark.parametrize( + "wrapper_class", + [vLLMWrapper, TransformersWrapperMaxTokens], + ids=["vllm", "transformers"], + ) + def test_num_samples( + self, + wrapper_class, + vllm_instance, + transformers_instance, + sample_history, + sample_text, + sample_tokens, + num_samples, + pad_output, + return_log_probs, + input_mode, + ): + """Test wrapper with num_samples.""" + if wrapper_class == vLLMWrapper: + model, tokenizer = vllm_instance + else: + model, tokenizer = transformers_instance + + wrapper = wrapper_class( + model, + tokenizer=tokenizer, + input_mode=input_mode, + generate=True, + return_log_probs=return_log_probs, + pad_output=pad_output, + num_samples=num_samples, + ) + if input_mode == "history": + data = TensorDict( + history=ChatHistory(prompt=sample_history), batch_size=(2,) + ) + elif input_mode == "text": + data = TensorDict(text=Text(prompt=sample_text), batch_size=(2,)) + elif input_mode == "tokens": + data = TensorDict(tokens=Tokens(prompt=sample_tokens[0]), batch_size=(2,)) + else: + raise ValueError(f"Invalid input mode: {input_mode}") + result = wrapper(data) + assert result.batch_size == (2, num_samples) + check_output_shapes( + result, pad_output=wrapper.pad_output, requested_log_probs=False + ) + + +class TestKLTransforms: + """Comprehensive tests for KL-related transforms with different input modes and configurations.""" + + @pytest.mark.skipif(not _has_transformers, reason="transformers not available") + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + @pytest.mark.parametrize( + "assistant_only", [True, False], ids=["assistant_only", "all_tokens"] + ) + @pytest.mark.parametrize( + "input_mode", ["history", "text", "tokens"], ids=["history", "text", "tokens"] + ) + def test_retrieve_log_prob_input_modes( + self, + transformers_instance, + sample_history_assistant, + sample_text, + sample_tokens, + pad_output, + assistant_only, + input_mode, + ): + """Test RetrieveLogProb with different input modes and assistant_only settings.""" + model, tokenizer = transformers_instance + + # Skip invalid combinations + if assistant_only and input_mode != "history": + pytest.skip("assistant_only=True requires input_mode='history'") + + # Create test data based on input mode + if input_mode == "history": + history = sample_history_assistant + data = TensorDict(history=ChatHistory(full=history), batch_size=(2,)) + elif input_mode == "text": + history = None # Not used in text mode + prompts = sample_text + data = TensorDict(text=Text(full=prompts), batch_size=(2,)) + elif input_mode == "tokens": + history = None # Not used in tokens mode + prompts = sample_tokens + data = TensorDict( + tokens=Tokens(full=prompts[0]), + masks=Masks(all_attention_mask=prompts[1]), + batch_size=(2,), + ) + else: + raise ValueError(f"Invalid input_mode: {input_mode}") + + # Create reference model with appropriate input mode + ref_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode=input_mode, + generate=False, + pad_output=pad_output, + ) + + # Create RetrieveLogProb transform + transform = RetrieveLogProb( + ref_model, + assistant_only=assistant_only, + tokenizer=tokenizer, + ) + + # Apply transform + result = transform(data) + + # The log-probs key should be based on the model's log_probs_key + log_probs_key = (ref_model.log_probs_key, "full") + assert log_probs_key in result + + # Check log-probs structure + if pad_output: + log_probs = result.get(log_probs_key) + assert isinstance(log_probs, torch.Tensor) + assert log_probs.shape[0] == 2 # batch size + else: + # For unpadded output, we get a list of tensors + log_probs = result.get(log_probs_key, as_list=True) + assert isinstance(log_probs, list) + assert len(log_probs) == 2 # batch size + + @pytest.mark.skipif(not _has_transformers, reason="transformers not available") + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + @pytest.mark.parametrize( + "assistant_only", [True, False], ids=["assistant_only", "all_tokens"] + ) + @pytest.mark.parametrize( + "input_mode", ["history", "text", "tokens"], ids=["history", "text", "tokens"] + ) + def test_retrieve_kl_input_modes( + self, + transformers_instance, + sample_history_assistant, + sample_text, + sample_tokens, + pad_output, + assistant_only, + input_mode, + ): + """Test RetrieveKL with different input modes and assistant_only settings.""" + model, tokenizer = transformers_instance + + # Skip invalid combinations + if assistant_only and input_mode != "history": + pytest.skip("assistant_only=True requires input_mode='history'") + + # Create test data based on input mode + if input_mode == "history": + history = sample_history_assistant + data = TensorDict(history=ChatHistory(full=history), batch_size=(2,)) + elif input_mode == "text": + history = None # Not used in text mode + prompts = sample_text + data = TensorDict(text=Text(full=prompts), batch_size=(2,)) + elif input_mode == "tokens": + history = None # Not used in tokens mode + prompts = sample_tokens + data = TensorDict( + tokens=Tokens(full=prompts[0]), + masks=Masks(all_attention_mask=prompts[1]), + batch_size=(2,), + ) + else: + raise ValueError(f"Invalid input_mode: {input_mode}") + + # Create generation and reference models with appropriate input mode + gen_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode=input_mode, + generate=False, + pad_output=pad_output, + log_probs_key="gen_log_probs", + ) + + ref_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode=input_mode, + generate=False, + pad_output=pad_output, + log_probs_key="ref_log_probs", + ) + + # Create RetrieveKL transform + transform = RetrieveKL( + gen_model=gen_model, + ref_model=ref_model, + assistant_only=assistant_only, + tokenizer=tokenizer, + ) + + # Apply transform + data = data.to_lazystack(0) + result = transform(data) + + # Check that KL is present + # Check that both log-probs and KL are present + assert ("gen_log_probs", "full") in result + assert ("ref_log_probs", "full") in result + assert "kl" in result + + # Check KL structure + if pad_output: + kl = result.get("kl") + assert isinstance(kl, torch.Tensor) + assert kl.shape[0] == 2 # batch size + else: + kl = result.get("kl", as_list=True) + # For unpadded output, we get a list of tensors + assert isinstance(kl, list) + assert len(kl) == 2 # batch size + + @pytest.mark.skipif(not _has_transformers, reason="transformers not available") + def test_retrieve_log_prob_assistant_only_validation( + self, transformers_instance, sample_text + ): + """Test that assistant_only=True with non-history input_mode raises an error.""" + model, tokenizer = transformers_instance + + # Create reference model with text input mode + ref_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode="text", + generate=False, + return_log_probs=True, + pad_output=True, + ) + + # This should raise an error + with pytest.raises( + ValueError, match="The model must have `input_mode='history'` when" + ): + RetrieveLogProb( + ref_model, + assistant_only=True, # This should fail with text input_mode + tokenizer=tokenizer, + ) + + @pytest.mark.skipif(not _has_transformers, reason="transformers not available") + def test_retrieve_kl_assistant_only_validation( + self, transformers_instance, sample_text + ): + """Test that assistant_only=True with non-history input_mode raises an error.""" + model, tokenizer = transformers_instance + + # Create models with text input mode + gen_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode="text", + generate=False, + return_log_probs=True, + pad_output=True, + log_probs_key="gen_log_probs", + ) + + ref_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode="text", + generate=False, + return_log_probs=True, + pad_output=True, + log_probs_key="ref_log_probs", + ) + + # This should raise an error + with pytest.raises( + ValueError, match="The model must have `input_mode='history'` when" + ): + RetrieveKL( + gen_model=gen_model, + ref_model=ref_model, + assistant_only=True, # This should fail with text input_mode + tokenizer=tokenizer, + ) + + @pytest.mark.skipif(not _has_transformers, reason="transformers not available") + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + def test_retrieve_kl_pad_output_consistency( + self, transformers_instance, sample_history_assistant, pad_output + ): + """Test that RetrieveKL enforces pad_output consistency between models.""" + model, tokenizer = transformers_instance + + # Create models with different pad_output settings + gen_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode="history", + generate=False, + return_log_probs=True, + pad_output=pad_output, + log_probs_key="gen_log_probs", + ) + + ref_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode="history", + generate=False, + return_log_probs=True, + pad_output=not pad_output, # Different pad_output setting + log_probs_key="ref_log_probs", + ) + + # This should raise an error + with pytest.raises(ValueError, match="pad_output mismatch"): + RetrieveKL( + gen_model=gen_model, + ref_model=ref_model, + assistant_only=False, + tokenizer=tokenizer, + ) + + @pytest.mark.skipif(not _has_transformers, reason="transformers not available") + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + def test_kl_computation_transform( + self, transformers_instance, sample_history_assistant, pad_output + ): + """Test the KLComputation transform directly.""" + model, tokenizer = transformers_instance + + # Create models + gen_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode="history", + generate=False, + return_log_probs=True, + pad_output=pad_output, + log_probs_key="gen_log_probs", + ) + + ref_model = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode="history", + generate=False, + return_log_probs=True, + pad_output=pad_output, + log_probs_key="ref_log_probs", + ) + + # Create data + data = TensorDict( + history=ChatHistory(full=sample_history_assistant), batch_size=(2,) + ) + + # Get log-probs from both models + data = data.to_lazystack(0) + gen_result = gen_model(data) + ref_result = ref_model(data) + + # Create next tensordict with log-probs and reward + next_td = TensorDict(batch_size=(2,)).to_lazystack(0) + next_td.update(gen_result, keys_to_update=[("gen_log_probs", "full")]) + next_td.update(ref_result, keys_to_update=[("ref_log_probs", "full")]) + next_td.update({"reward": torch.randn(2, 1, 1)}) + + # Create KLComputation transform + kl_transform = KLComputation( + gen_log_probs_full_key=("gen_log_probs", "full"), + ref_log_probs_full_key=("ref_log_probs", "full"), + kl_key="kl", + add_to_reward=True, + coeff=1.0, + ) + + # Apply transform + result = kl_transform(data.set("next", next_td)) + + # Check that KL is computed + result = result["next"] + assert "kl" in result + + if pad_output: + kl = result.get("kl") + assert isinstance(kl, torch.Tensor) + assert kl.shape[0] == 2 # batch size + else: + kl = result.get("kl", as_list=True) + assert isinstance(kl, list) + assert len(kl) == 2 # batch size + + # Check that reward is modified + assert "reward" in result + reward = result.get("reward") + assert reward is not None + + +class TestLogProbsComparison: + """Test log-probability consistency between vLLM and Transformers wrappers.""" + + @pytest.mark.skipif(not _has_vllm, reason="vllm not available") + @pytest.mark.skipif(not _has_transformers, reason="transformers not available") + @pytest.mark.parametrize( + "input_mode", ["history", "text", "tokens"], ids=["history", "text", "tokens"] + ) + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + def test_log_probs_consistency( + self, + vllm_instance, + transformers_instance, + input_mode, + pad_output, + sample_history, + sample_text, + sample_tokens, + ): + """Test that log-probabilities are consistent between vLLM and Transformers wrappers.""" + vllm_model, vllm_tokenizer = vllm_instance + tf_model, tf_tokenizer = transformers_instance + + # Create test data based on input mode + if input_mode == "history": + history = sample_history + data = TensorDict(history=history, batch_size=(2,)) + input_key = "history" + elif input_mode == "text": + history = None # Not used in text mode + prompts = sample_text + data = TensorDict(text=prompts, batch_size=(2,)) + input_key = "text" + elif input_mode == "tokens": + history = None # Not used in tokens mode + prompts = sample_tokens + data = TensorDict( + input_ids=prompts[0], + attention_mask=prompts[1], + batch_size=(2,), + ) + input_key = "input_ids" + else: + raise ValueError(f"Invalid input_mode: {input_mode}") + + # Create vLLM wrapper for generation + vllm_gen_wrapper = vLLMWrapper( + vllm_model, + tokenizer=vllm_tokenizer, + input_mode=input_mode, + input_key=input_key, + generate=True, + pad_output=pad_output, + generate_kwargs={"max_tokens": 5, "temperature": 0.0}, # Deterministic + ) + + # Create Transformers wrapper for generation + tf_gen_wrapper = TransformersWrapper( + tf_model, + tokenizer=tf_tokenizer, + input_mode=input_mode, + input_key=input_key, + generate=True, + pad_output=pad_output, + generate_kwargs={ + "max_new_tokens": 5, + "do_sample": False, + "temperature": 0.0, + }, # Deterministic + ) + + # Step 1: Generate tokens with both wrappers + vllm_gen_result = vllm_gen_wrapper(data.copy()) + tf_gen_wrapper(data.copy()) + + # Step 2: Extract generated tokens and create new input for log-probs computation + if input_mode == "history": + # For history mode, we need to create new history with generated responses + generated_texts = vllm_gen_result["text"].response + new_chats = [] + assert history is not None # Type assertion for linter + for chat, gen_text in _zip_strict(history.unbind(0), generated_texts): + new_chat = chat.copy().append( + History(role="assistant", content=gen_text) + ) + new_chats.append(new_chat) + new_history = lazy_stack(new_chats) + new_data = TensorDict(history=new_history, batch_size=(2,)) + elif input_mode == "text": + # For text mode, concatenate original text with generated text + original_texts = data["text"] + generated_texts = vllm_gen_result["text"].response + new_texts = [ + orig + gen for orig, gen in zip(original_texts, generated_texts) + ] + new_data = TensorDict(text=new_texts, batch_size=(2,)) + elif input_mode == "tokens": + # For tokens mode, concatenate original tokens with generated tokens + original_tokens = data["input_ids"] + generated_tokens = vllm_gen_result["tokens"].response + if pad_output: + # Remove padding from generated tokens + mask = generated_tokens != vllm_tokenizer.pad_token_id + new_tokens = [] + for i in range(len(original_tokens)): + valid_tokens = generated_tokens[i][mask[i]] + combined = torch.cat([original_tokens[i], valid_tokens]) + new_tokens.append(combined) + new_tokens = torch.stack(new_tokens) + else: + new_tokens = [] + for i in range(len(original_tokens)): + combined = torch.cat([original_tokens[i], generated_tokens[i]]) + new_tokens.append(combined) + new_data = TensorDict(input_ids=new_tokens, batch_size=(2,)) + else: + raise ValueError(f"Invalid input_mode: {input_mode}") + + # Step 3: Create log-probs only wrappers + vllm_lp_wrapper = vLLMWrapper( + vllm_model, + tokenizer=vllm_tokenizer, + input_mode=input_mode, + input_key=input_key, + generate=False, + pad_output=pad_output, + ) + + tf_lp_wrapper = TransformersWrapper( + tf_model, + tokenizer=tf_tokenizer, + input_mode=input_mode, + input_key=input_key, + generate=False, + pad_output=pad_output, + ) + + # Step 4: Compute log-probs for the full sequence (original + generated) + vllm_lp_result = vllm_lp_wrapper(new_data.copy()) + tf_lp_result = tf_lp_wrapper(new_data.copy()) + + from tensordict import assert_close + + assert_close( + vllm_lp_result, tf_lp_result, atol=1e-1, rtol=1e-1, intersection=True + ) + + +class TestDistributionMethods: + """Test the new distribution methods and masking strategies.""" + + @pytest.mark.skipif(not _has_vllm, reason="vllm not available") + @pytest.mark.parametrize("masking_strategy", ["sft", "rlhf", "generic"]) + def test_vllm_distribution_methods( + self, vllm_instance, sample_history_assistant, sample_tokens, masking_strategy + ): + """Test that vLLM wrapper distribution methods work correctly.""" + model, tokenizer = vllm_instance + + # vLLM doesn't support get_dist methods + wrapper = vLLMWrapper( + model, + tokenizer=tokenizer, + input_mode="history", + generate=False, + return_log_probs=True, + ) + + # Create test data + td = TensorDict({"history": sample_history_assistant}, batch_size=(2,)) + + # Test that all distribution methods raise NotImplementedError + with pytest.raises(NotImplementedError, match="vLLM does not return logits"): + wrapper.get_dist(td) + + with pytest.raises(NotImplementedError, match="vLLM does not return logits"): + wrapper._get_sft_dist(td) + + with pytest.raises(NotImplementedError, match="vLLM does not return logits"): + wrapper._get_rlhf_dist(td) + + with pytest.raises(NotImplementedError, match="vLLM does not return logits"): + wrapper._get_generic_dist(td) + + @pytest.mark.skipif(not _has_transformers, reason="transformers not available") + @pytest.mark.parametrize("masking_strategy", ["sft", "rlhf", "generic"]) + @pytest.mark.parametrize("pad_output", [True, False], ids=["padded", "unpadded"]) + def test_transformers_distribution_methods( + self, + transformers_instance, + sample_history_assistant, + sample_tokens, + masking_strategy, + pad_output, + ): + """Test that Transformers wrapper distribution methods work correctly.""" + model, tokenizer = transformers_instance + + # Use tokens input mode for SFT, history for RLHF/generic + if masking_strategy == "sft": + input_mode = "tokens" + input_ids, attention_mask = sample_tokens + assistant_mask = attention_mask.bool().clone() + assistant_mask[:, : attention_mask.shape[-1] // 2] = False + input_data = { + "tokens": Tokens(full=input_ids), + "masks": Masks( + all_attention_mask=attention_mask.bool(), + all_assistant_mask=assistant_mask, + ), + } + + # Create test data with correct batch size + td = TensorDict(input_data, batch_size=(2,)).to_lazystack(0) + if not pad_output: + for _td in td.unbind(0): + _td["tokens"].full = _td["tokens"].full[ + _td["masks"].all_attention_mask + ] + _td["masks"].all_assistant_mask = _td["masks"].all_assistant_mask[ + _td["masks"].all_attention_mask + ] + _td["masks"].all_attention_mask = _td["masks"].all_attention_mask[ + _td["masks"].all_attention_mask + ] + else: + input_mode = "history" + input_data = {"history": ChatHistory(full=sample_history_assistant)} + + # Create test data with correct batch size + td = TensorDict(input_data, batch_size=(2,)).to_lazystack(0) + + wrapper = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode=input_mode, + generate=False, + pad_output=pad_output, + ) + + # Test the appropriate distribution method + if masking_strategy == "sft": + dist = wrapper._get_sft_dist(td) + elif masking_strategy == "rlhf": + dist = wrapper._get_rlhf_dist(td) + elif masking_strategy == "generic": + dist = wrapper._get_generic_dist(td) + + # Verify that we get a distribution + assert dist is not None + assert hasattr(dist, "log_prob") + assert hasattr(dist, "sample") + + # Test that logits are available in the output + td_out = wrapper(td.copy()) + + # Test log_prob computation + if masking_strategy == "sft": + # For SFT, we need tokens to compute log_prob + tokens = td_out.get( + ("tokens", "full"), + as_padded_tensor=True, + padding_side="left", + padding_value=tokenizer.pad_token_id, + ) + if tokens is not None: + log_probs = dist.log_prob(tokens.long()) + assert log_probs.shape == tokens.shape + else: + # For RLHF/generic, we can test with dummy tokens + logits = td_out.get("logits") + if logits is not None: + dummy_tokens = torch.randint(0, logits.shape[-1], logits.shape[:-1]) + log_probs = dist.log_prob(dummy_tokens) + assert log_probs.shape == dummy_tokens.shape + + @pytest.mark.skipif(not _has_transformers, reason="transformers not available") + def test_transformers_custom_masking( + self, transformers_instance, sample_history_assistant + ): + """Test custom masking functionality.""" + model, tokenizer = transformers_instance + + wrapper = TransformersWrapper( + model, + tokenizer=tokenizer, + input_mode="history", + generate=False, + return_log_probs=True, + pad_output=True, + ) + + td = TensorDict( + {"history": ChatHistory(full=sample_history_assistant)}, batch_size=(2,) + ) + + # Get the actual logits shape from the wrapper + result = wrapper(td) + lp = result["log_probs"].get("full") + + # Create a custom mask matching the logits shape + custom_mask = torch.zeros_like(lp, dtype=torch.bool) + custom_mask[:, :5] = True # Only first 5 tokens + + dist = wrapper._get_dist_with_custom_mask(td, custom_mask) + + assert dist is not None + assert hasattr(dist, "log_prob") + + +if __name__ == "__main__": + args, unknown = argparse.ArgumentParser().parse_known_args() + pytest.main([__file__, "--capture", "no", "--exitfirst"] + unknown) diff --git a/test/test_specs.py b/test/test_specs.py index d984db64c3a..44a10ea0a3e 100644 --- a/test/test_specs.py +++ b/test/test_specs.py @@ -3938,6 +3938,16 @@ def test_sample(self): assert nts.rand((2,)).data == "example_data" assert nts.zero((2,)).data == "example_data" + def test_feature_dims(self): + nts = NonTensor(shape=(3, 4), example_data="example_data") + assert nts.feature_dims == 2 + nts = NonTensor(shape=(3, 4), example_data="example_data", feature_dims=1) + assert nts.feature_dims == 1 + assert isinstance(nts.zeros(), NonTensorStack) + assert isinstance(nts.zeros(2), NonTensorStack) + assert isinstance(nts.zeros()[0], NonTensorData) + assert nts.rand((2,)).shape == (2, 3, 4) + def test_example_data_ineq(self): nts0 = NonTensor(shape=(3, 4), example_data="example_data") nts1 = NonTensor(shape=(3, 4), example_data="example_data 2") diff --git a/torchrl/collectors/collectors.py b/torchrl/collectors/collectors.py index 0d76124b73f..d3ad80aa829 100644 --- a/torchrl/collectors/collectors.py +++ b/torchrl/collectors/collectors.py @@ -56,7 +56,6 @@ WeightUpdaterBase, ) from torchrl.data import ReplayBuffer -from torchrl.data.tensor_specs import TensorSpec from torchrl.data.utils import CloudpickleWrapper, DEVICE_TYPING from torchrl.envs.common import _do_nothing, EnvBase from torchrl.envs.env_creator import EnvCreator @@ -176,7 +175,6 @@ def weight_updater(self, value: WeightUpdaterBase | None): def _get_policy_and_device( self, policy: Callable[[Any], Any] | None = None, - observation_spec: TensorSpec = None, policy_device: Any = NO_DEFAULT, env_maker: Any | None = None, env_maker_kwargs: dict[str, Any] | None = None, @@ -187,7 +185,6 @@ def _get_policy_and_device( Args: policy (TensorDictModule, optional): a policy to be used - observation_spec (TensorSpec, optional): spec of the observations policy_device (torch.device, optional): the device where the policy should be placed. Defaults to self.policy_device env_maker (a callable or a batched env, optional): the env_maker function for this device/policy pair. @@ -201,7 +198,7 @@ def _get_policy_and_device( env = getattr(self, "env", None) policy = _make_compatible_policy( policy, - observation_spec, + self.env.observation_spec, env=env, env_maker=env_maker, env_maker_kwargs=env_maker_kwargs, @@ -800,9 +797,13 @@ def __init__( self.reset_when_done = reset_when_done self.n_env = self.env.batch_size.numel() + if hasattr(policy, "register_collector"): + policy.register_collector(self) + if hasattr(self.env, "register_collector"): + self.env.register_collector(self) + (self.policy, self.get_weights_fn,) = self._get_policy_and_device( policy=policy, - observation_spec=self.env.observation_spec, ) if isinstance(self.policy, nn.Module): self.policy_weights = TensorDict.from_module( @@ -1271,7 +1272,8 @@ def cuda_check(tensor: torch.Tensor): self.replay_buffer.extend(tensordict_out) if self.verbose: torchrl_logger.info( - f"Collector: Added {tensordict_out.numel()} frames to replay buffer. Yielding." + f"Collector: Added {tensordict_out.numel()} frames to replay buffer. " + "Buffer write count: {self.replay_buffer.write_count}. Yielding." ) yield else: diff --git a/torchrl/collectors/llm/base.py b/torchrl/collectors/llm/base.py index 830eff36b85..6d6bcb50a19 100644 --- a/torchrl/collectors/llm/base.py +++ b/torchrl/collectors/llm/base.py @@ -189,6 +189,9 @@ def __init__( extend_buffer=True, postproc=postproc, ) + if hasattr(self.policy, "register_collector"): + self.policy.register_collector(self) + if yield_only_last_steps is None: yield_only_last_steps = False diff --git a/torchrl/data/llm/__init__.py b/torchrl/data/llm/__init__.py index b7a5d1323f2..4ecf4d61098 100644 --- a/torchrl/data/llm/__init__.py +++ b/torchrl/data/llm/__init__.py @@ -3,7 +3,6 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from .chat import ContentBase, History from .common import LLMData from .dataset import ( create_infinite_iterator, @@ -11,6 +10,7 @@ TensorDictTokenizer, TokenizedDatasetLoader, ) +from .history import add_chat_template, ContentBase, History from .prompt import PromptData, PromptTensorDictTokenizer from .reward import PairwiseDataset, RewardData from .topk import TopKRewardSelector @@ -24,6 +24,7 @@ "LLMData", "PairwiseDataset", "PromptData", + "add_chat_template", "PromptTensorDictTokenizer", "RewardData", "RolloutFromModel", diff --git a/torchrl/data/llm/chat.py b/torchrl/data/llm/history.py similarity index 54% rename from torchrl/data/llm/chat.py rename to torchrl/data/llm/history.py index 5391b883c11..8cfe713f386 100644 --- a/torchrl/data/llm/chat.py +++ b/torchrl/data/llm/history.py @@ -21,7 +21,13 @@ from tensordict.utils import _maybe_correct_neg_dim from torchrl._utils import logger as torchrl_logger +try: + import transformers +except ImportError: + transformers = None + +# Global storage for custom templates and their metadata _CHAT_TEMPLATES = { "chatml_format": """{% for message in messages %} {%- if message['role'] == 'assistant' %} @@ -40,7 +46,7 @@ {%- if messages[0]['role'] == 'system' %} {{- messages[0]['content'] }} {%- else %} - {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }} + {{- 'You are a helpful assistant.' }} {%- endif %} {{- "\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n" }} {%- for tool in tools %} @@ -52,7 +58,7 @@ {%- if messages[0]['role'] == 'system' %} {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }} {%- else %} - {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }} + {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }} {%- endif %} {%- endif %} {%- for message in messages %} @@ -92,8 +98,176 @@ {% generation %}{{- '<|im_start|>assistant\\n' }}{% endgeneration %} {%- endif %} """, + "dialogpt": """{% for message in messages %}{% if message['role'] == 'assistant' %}{% generation %}{{ message['content'] }}{% endgeneration %}{{ eos_token }}{% elif message['role'] == 'user' %}{{ message['content'] }}{{ eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{% generation %}{{ ' ' }}{% endgeneration %}{% endif %}""", + "falcon": """{% for message in messages %}{% if message['role'] == 'assistant' %}{% generation %}{{ 'Assistant: ' + message['content'] }}{% endgeneration %}\n\n{% elif message['role'] == 'user' %}{{ 'User: ' + message['content'] }}\n\n{% elif message['role'] == 'system' %}{{ message['content'] }}\n\n{% endif %}{% endfor %}{% if add_generation_prompt %}{% generation %}{{ 'Assistant: ' }}{% endgeneration %}{% endif %}""", + "deepseek": """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{% generation %}{{ 'Assistant: ' + message['content'] + eos_token }}{% endgeneration %}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{% generation %}{{ 'Assistant:' }}{% endgeneration %}{% endif %}""", + "llama": """{{- bos_token }} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} +{%- else %} + {%- set system_message = "" %} +{%- endif %} +{%- if system_message %} + {{- "<|header_start|>system<|header_end|>\n\n" }} + {{- system_message }} + {{- "<|eot|>" }} +{%- endif %} +{%- for message in messages %} + {%- if message['role'] == 'assistant' %} + {% generation %}{{- '<|header_start|>' + message['role'] + '<|header_end|>\n\n' }} + {%- if message['content'] is string %} + {{- message['content'] }} + {%- else %} + {%- for content in message['content'] %} + {%- if content['type'] == 'text' %} + {{- content['text'] | trim }} + {%- endif %} + {%- endfor %} + {%- endif %} + {{- "<|eot|>" }}{% endgeneration %} + {%- else %} + {{- '<|header_start|>' + message['role'] + '<|header_end|>\n\n' }} + {%- if message['content'] is string %} + {{- message['content'] }} + {%- else %} + {%- for content in message['content'] %} + {%- if content['type'] == 'text' %} + {{- content['text'] | trim }} + {%- endif %} + {%- endfor %} + {%- endif %} + {{- "<|eot|>" }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {% generation %}{{- '<|header_start|>assistant<|header_end|>\n\n' }}{% endgeneration %} +{%- endif %}""", } +# Global storage for custom template metadata +_CUSTOM_INVERSE_PARSERS = {} +_CUSTOM_MODEL_FAMILY_KEYWORDS = {} + + +def add_chat_template( + template_name: str, + template: str, + inverse_parser: callable | None = None, + model_family_keywords: list[str] | None = None, +) -> None: + r"""Add a custom chat template to the global template dictionary. + + This function allows you to add custom chat templates for new model families + that support assistant token masking via the `{% generation %}` keyword. + + Args: + template_name (str): The name of the template (e.g., "llama", "mistral"). + This name will be used in the `chat_template_name` parameter of + `History.apply_chat_template()` and `History.from_text()`. + template (str): The Jinja2 template string. Must include `{% generation %}` + blocks around assistant message content to enable token masking. + inverse_parser (callable, optional): A function that parses formatted text back + into a History object. Should have signature `(text: str) -> History`. + If None, a basic parser will be used. + model_family_keywords (list[str], optional): Keywords to detect this model family + in the auto-detection logic. For example, ["llama", "meta-llama"] for Llama models. + If provided, the template will be automatically selected for models containing + these keywords in their name. + + Example: + >>> from torchrl.data.llm.chat import add_chat_template, History + >>> from transformers import AutoTokenizer + >>> + >>> # Add a custom template for Llama models + >>> llama_template = ''' + ... {% for message in messages %} + ... {%- if message['role'] == 'user' %} + ... {{ '[INST] ' + message['content'] + ' [/INST]' }} + ... {%- elif message['role'] == 'assistant' %} + ... {% generation %}{{ message['content'] + '' }}{% endgeneration %} + ... {%- endif %} + ... {% endfor %} + ... {%- if add_generation_prompt %} + ... {% generation %}{{ ' ' }}{% endgeneration %} + ... {%- endif %} + ... ''' + >>> + >>> def parse_llama_text(text: str) -> History: + ... # Custom parser for Llama format + ... import re + ... pattern = r'\[INST\]\s*(.*?)\s*\[/INST\]\s*(.*?)' + ... matches = re.findall(pattern, text, re.DOTALL) + ... messages = [] + ... for user_content, assistant_content in matches: + ... messages.append(History(role="user", content=user_content.strip())) + ... messages.append(History(role="assistant", content=assistant_content.strip())) + ... return lazy_stack(messages) + >>> + >>> # Add the template with auto-detection + >>> add_chat_template( + ... template_name="llama", + ... template=llama_template, + ... inverse_parser=parse_llama_text, + ... model_family_keywords=["llama", "meta-llama"] + ... ) + >>> + >>> # Now you can use it with auto-detection + >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") + >>> history = History.from_chats([[ + ... {"role": "user", "content": "Hello"}, + ... {"role": "assistant", "content": "Hi there!"} + ... ]]) + >>> + >>> # Auto-detection will use the llama template + >>> result = history.apply_chat_template( + ... tokenizer=tokenizer, + ... add_generation_prompt=False, + ... return_dict=True, + ... return_assistant_tokens_mask=True, + ... ) + >>> + >>> # Or use it explicitly + >>> result = history.apply_chat_template( + ... tokenizer=tokenizer, + ... chat_template_name="llama", + ... add_generation_prompt=False, + ... return_dict=True, + ... return_assistant_tokens_mask=True, + ... ) + + .. note: + - The template must include `{% generation %}` blocks around assistant message + content to enable assistant token masking. + - The inverse parser should handle the specific format of your template. + - Model family keywords are case-insensitive and matched against the tokenizer's + `name_or_path` attribute. + - Templates are stored globally and persist for the duration of the Python session. + """ + global _CHAT_TEMPLATES, _CUSTOM_INVERSE_PARSERS, _CUSTOM_MODEL_FAMILY_KEYWORDS + + # Validate template contains generation blocks + if "{% generation %}" not in template: + raise ValueError( + f"Template '{template_name}' must include '{{% generation %}}' blocks " + "around assistant message content to enable token masking." + ) + + # Add template to dictionary + _CHAT_TEMPLATES[template_name] = template + + # Store inverse parser if provided + if inverse_parser is not None: + _CUSTOM_INVERSE_PARSERS[template_name] = inverse_parser + + # Store model family keywords if provided + if model_family_keywords is not None: + _CUSTOM_MODEL_FAMILY_KEYWORDS[template_name] = model_family_keywords + + torchrl_logger.info( + f"Added custom chat template '{template_name}' with assistant token masking support" + ) + # We need the 'shadow' flag to avoid having tensordict complaining about 'type'/'size' etc. fields class ContentBase(TensorClass["nocast", "shadow"]): @@ -197,12 +371,93 @@ class History(TensorClass["nocast"]): - Efficient methods to append, extend, and reshape history elements, enabling dynamic construction of conversation trajectories, especially useful in reinforcement learning environments. - Interoperability with the `transformers` API, allowing for easy tokenization and preparation of input data. + - **Assistant token masking support** across multiple model families for reinforcement learning applications. + + **Recent Changes:** + - **ChatHistory Integration**: History objects are now used within :class:`~torchrl.modules.llm.policies.ChatHistory` + containers for structured conversation management in LLM environments. + - **Modular Wrapper Support**: Both vLLMWrapper and TransformersWrapper now use History objects when `input_mode="history"` + is specified, providing consistent conversation state management. + - **Environment Integration**: ChatEnv and related environments use History objects for state management and conversation tracking. .. note:: The `""` role is used to indicate that the element is a placeholder, for example when the tool call was not executed but a stack requires a certain number of elements per batch to have congruent shapes. The :meth:`~torchrl.data.llm.chat.History.apply_chat_template` method will remove the `` role from the history. + **Assistant Token Masking Support:** + + The class supports assistant token masking across multiple model families, allowing you to identify which tokens + in a conversation were generated by the assistant. This is crucial for reinforcement learning applications. + + **Supported Model Families:** + + - **Qwen family** (e.g., `Qwen/Qwen2.5-0.5B`): Custom template with full tool calling support + - **DialoGPT family** (e.g., `microsoft/DialoGPT-medium`): Custom template for conversation format + - **Falcon family** (e.g., `tiiuae/falcon-7b-instruct`): Custom template for instruction format + - **DeepSeek family** (e.g., `deepseek-ai/deepseek-coder-6.7b-base`): Custom template with native format + - **Other models** (OPT, GPT, MPT, BLOOM, Pythia, Phi, etc.): Default `chatml_format` template + + **Example with Assistant Token Masking:** + + .. code-block:: python + + >>> from torchrl.data.llm.chat import History + >>> from torchrl.modules.llm.policies import ChatHistory + >>> from transformers import AutoTokenizer + >>> + >>> # Create a conversation history + >>> history = History.from_chats([[ + ... {"role": "user", "content": "Hello"}, + ... {"role": "assistant", "content": "Hi there!"}, + ... {"role": "user", "content": "How are you?"}, + ... {"role": "assistant", "content": "I'm doing well, thanks!"} + ... ]]) + >>> + >>> # Create ChatHistory container for LLM wrapper + >>> chat_history = ChatHistory(prompt=history) + >>> + >>> # Load any supported tokenizer + >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") + >>> + >>> # Apply chat template with assistant token masking + >>> result = history.apply_chat_template( + ... tokenizer=tokenizer, + ... add_generation_prompt=False, + ... return_dict=True, + ... return_assistant_tokens_mask=True, + ... ) + >>> + >>> # The result contains an assistant_masks tensor + >>> assistant_masks = result["assistant_masks"] + >>> print(f"Assistant tokens: {assistant_masks.sum().item()}") + + **Integration with LLM Wrappers:** + + History objects work seamlessly with the new modular wrapper design: + + .. code-block:: python + + >>> from torchrl.modules.llm import TransformersWrapper + >>> from torchrl.modules.llm.policies import ChatHistory + >>> + >>> # Create wrapper with history input mode + >>> wrapper = TransformersWrapper( + ... model, tokenizer=tokenizer, + ... input_mode="history", + ... generate=True, + ... return_log_probs=True + ... ) + >>> + >>> # Use History with ChatHistory container + >>> history = History.from_chats([[ + ... {"role": "user", "content": "Hello"}, + ... {"role": "assistant", "content": "Hi there!"} + ... ]]) + >>> chat_history = ChatHistory(prompt=history) + >>> result = wrapper(TensorDict(history=chat_history, batch_size=(1,))) + >>> print(result["history"].response) # New response from LLM + Attributes: role (str): The role of the message sender. content (str): The content of the message. @@ -256,6 +511,10 @@ class History(TensorClass["nocast"]): <|im_start|>assistant + .. seealso:: + :class:`~torchrl.modules.llm.policies.ChatHistory`: Container for managing conversation data in LLM environments. + :class:`~torchrl.modules.llm.policies.Text`: Container for text data. + :class:`~torchrl.modules.llm.policies.Tokens`: Container for token data. """ role: str @@ -277,7 +536,7 @@ def apply_chat_template( tokenizer: transformers.AutoTokenizer | transformers.AutoProcessor, # noqa add_generation_prompt: bool = True, chat_template: str | None = None, - chat_template_name: Literal["chatml_format", "qwen"] | None = None, + chat_template_name: str | None = None, continue_final_message: bool = False, tokenize: bool | None = None, padding: bool | str = False, @@ -286,15 +545,16 @@ def apply_chat_template( return_dict: bool | None = None, return_assistant_tokens_mask: bool = False, **kwargs, - ): + ) -> str | list[str] | TensorDict: """Applies a chat template to the history. Keyword Args: tokenizer (transformers.PreTrainedTokenizer | transformers.AutoProcessor): The tokenizer to use. add_generation_prompt (bool, optional): Whether to add a generation prompt (e.g. `"<|im_start|>assistant"`). Defaults to `True`. chat_template (str, optional): The chat template to use. Defaults to the tokenizer's default template. - chat_template_name (Literal["chatml_format", "qwen"], optional): The name of the chat template to use. - Prevalent over `tokenizer.chat_template`. Defaults to `None`. + chat_template_name (str, optional): The name of the chat template to use. + Prevalent over `tokenizer.chat_template`. If `None`, the method will automatically detect the model family and use the appropriate template. + Defaults to `None`. continue_final_message (bool, optional): Whether to continue the final message. Defaults to `False`. tokenize (bool, optional): Whether to tokenize the output. Defaults to `False`. padding (bool | str, optional): The padding strategy to use. Defaults to `False`. @@ -308,9 +568,14 @@ def apply_chat_template( This functionality is only available for chat templates that support it via the `{% generation %}` keyword. Defaults to `False`. - .. note:: By default, the `"qwen"` chat template does not support this functionality. A modified version of the template - can be used by setting `chat_template_name="qwen"`, which will override the default template from the tokenizer. - For other tokenizers, similar edits can be made to the template and passed to the method via the `chat_template` argument. + .. note:: Assistant token masking is supported across multiple model families: + - **Qwen family**: Uses custom template with full tool calling support + - **DialoGPT family**: Uses custom template for conversation format + - **Falcon family**: Uses custom template for instruction format + - **DeepSeek family**: Uses custom template with native format + - **Other models**: Use the default `chatml_format` template + + The method automatically detects the model family and selects the appropriate template. **kwargs: Additional keyword arguments to pass to the tokenizer `apply_chat_template` method. @@ -325,13 +590,54 @@ def apply_chat_template( raise RuntimeError( "You must specify a tokenizer to use when chat_template is not specified." ) - elif "qwen" in getattr(tokenizer, "name_or_path", "").lower(): - # We prefer our implementation of the Qwen template, - # since it accounts for the assistant's masking. - chat_template = _CHAT_TEMPLATES["qwen"] - chat_template_name = None else: - chat_template = tokenizer.chat_template + # Auto-detect model family and use appropriate template + model_name = getattr(tokenizer, "name_or_path", "").lower() + + # First check for custom model family keywords + custom_template_found = False + for template_name, keywords in _CUSTOM_MODEL_FAMILY_KEYWORDS.items(): + if any(keyword.lower() in model_name for keyword in keywords): + chat_template = _CHAT_TEMPLATES[template_name] + chat_template_name = None + custom_template_found = True + break + + if not custom_template_found: + # Fall back to built-in model family detection + if "qwen" in model_name: + # We prefer our implementation of the Qwen template, + # since it accounts for the assistant's masking. + chat_template = _CHAT_TEMPLATES["qwen"] + chat_template_name = None + elif "dialogpt" in model_name or "microsoft/dialo" in model_name: + # DialoGPT family - use our custom template + chat_template = _CHAT_TEMPLATES["dialogpt"] + chat_template_name = None + elif "falcon" in model_name or "tiiuae/falcon" in model_name: + # Falcon family - use our custom template + chat_template = _CHAT_TEMPLATES["falcon"] + chat_template_name = None + elif "deepseek" in model_name: + # DeepSeek family - use our custom template with generation keyword + chat_template = _CHAT_TEMPLATES["deepseek"] + chat_template_name = None + elif "llama" in model_name: + # Llama family - use our custom template + chat_template = _CHAT_TEMPLATES["llama"] + chat_template_name = None + else: + # For other models, check if their default template supports generation + if ( + hasattr(tokenizer, "chat_template") + and tokenizer.chat_template + and "{% generation %}" in tokenizer.chat_template + ): + # Use the model's own template if it supports generation + chat_template = tokenizer.chat_template + else: + # Use our default chatml_format template + chat_template = _CHAT_TEMPLATES["chatml_format"] if chat_template is None: chat_template = _CHAT_TEMPLATES["chatml_format"] if tokenize is None: @@ -402,26 +708,65 @@ def apply_chat_template( def from_text( cls, text: str | list[str], - chat_template_name: Literal["chatml_format", "qwen"] | None = None, + chat_template_name: str | None = None, + # currently without effect chat_template: str | None = None, tokenizer: transformers.AutoTokenizer # noqa: F821 | transformers.AutoProcessor # noqa: F821 | None = None, ) -> History: - if chat_template_name is None and chat_template is None: - if "qwen" in getattr(tokenizer, "name_or_path", "").lower(): - # We can automatically detect the template name from the tokenizer - # and use the precoded parser. - chat_template_name = "qwen" - else: - chat_template_name = "chatml_format" - elif chat_template_name in ("chatml_format",): + if chat_template_name is None: + if chat_template is not None: + # TODO: find best match given template + pass + + model_name = getattr(tokenizer, "name_or_path", "").lower() + # First check for custom model family keywords + custom_template_found = False + for template_name, keywords in _CUSTOM_MODEL_FAMILY_KEYWORDS.items(): + if any(keyword.lower() in model_name for keyword in keywords): + chat_template_name = template_name + custom_template_found = True + break + + if not custom_template_found: + # Fall back to built-in model family detection + if "qwen" in model_name: + # We can automatically detect the template name from the tokenizer + # and use the precoded parser. + chat_template_name = "qwen" + elif "dialogpt" in model_name or "microsoft/dialo" in model_name: + chat_template_name = "dialogpt" + elif "falcon" in model_name or "tiiuae/falcon" in model_name: + chat_template_name = "falcon" + elif "deepseek" in model_name: + chat_template_name = "deepseek" + elif "llama" in model_name: + chat_template_name = "llama" + else: + chat_template_name = "chatml_format" + + # Get the appropriate inverse parser function + if chat_template_name in ("chatml_format",): func = cls._inv_chatml elif chat_template_name in ("qwen",): func = cls._inv_qwen + elif chat_template_name in ("dialogpt",): + func = cls._inv_dialogpt + elif chat_template_name in ("falcon",): + func = cls._inv_falcon + elif chat_template_name in ("deepseek",): + func = cls._inv_deepseek + elif chat_template_name in ("llama",): + func = cls._inv_llama + elif chat_template_name in _CUSTOM_INVERSE_PARSERS: + # Use custom inverse parser + func = _CUSTOM_INVERSE_PARSERS[chat_template_name] else: raise NotImplementedError( - "chat_template_name must be one of ('chatml_format', 'qwen')" + f"chat_template_name '{chat_template_name}' is not supported. " + "Supported templates: 'chatml_format', 'qwen', 'dialogpt', 'falcon', 'deepseek'. " + "Use add_chat_template() to add custom templates." ) if isinstance(text, list): list_of_histories = [func(t) for t in text] @@ -598,6 +943,218 @@ def _inv_qwen(cls, template): return lazy_stack(parsed_messages) + @classmethod + def _inv_dialogpt(cls, text: str) -> History: + """Inverts a DialogPT string into a History object. + + Args: + text (str): The DialogPT string to invert. + + Returns: + History: The inverted History object. + """ + torchrl_logger.debug(f"Inverting DialogPT:\n{text}") + + # DialogPT format is simple: alternating user/assistant messages + # Split by lines and parse + lines = text.strip().split("\n") + parsed_messages = [] + + for line in lines: + line = line.strip() + if not line: + continue + + # Determine role based on content + if line.startswith("Assistant:"): + role = "assistant" + content = line[len("Assistant:") :].strip() + elif line.startswith("User:"): + role = "user" + content = line[len("User:") :].strip() + else: + # Default to user if no prefix + role = "user" + content = line + + message_dict = { + "role": role, + "content": content, + "is_complete": True, # DialogPT doesn't have explicit end tokens + "tool_calls": None, + "tool_responses": None, + } + + parsed_messages.append(cls(**message_dict)) + + if not parsed_messages: + raise RuntimeError(f"Couldn't get a single item out of text {text}.") + + return lazy_stack(parsed_messages) + + @classmethod + def _inv_falcon(cls, text: str) -> History: + """Inverts a Falcon string into a History object. + + Args: + text (str): The Falcon string to invert. + + Returns: + History: The inverted History object. + """ + torchrl_logger.debug(f"Inverting Falcon:\n{text}") + + # Falcon format: "User: ... Assistant: ..." + # Split by "User:" and "Assistant:" prefixes + import re + + # Pattern to match User: and Assistant: messages + pattern = r"(User:|Assistant:)\s*(.*?)(?=(User:|Assistant:)|$)" + matches = re.findall(pattern, text, re.DOTALL) + + parsed_messages = [] + for match in matches: + if len(match) != 2: + continue + prefix, content = match + content = content.strip() + if not content: + continue + + if prefix == "User:": + role = "user" + elif prefix == "Assistant:": + role = "assistant" + else: + continue + + message_dict = { + "role": role, + "content": content, + "is_complete": True, # Falcon doesn't have explicit end tokens + "tool_calls": None, + "tool_responses": None, + } + + parsed_messages.append(cls(**message_dict)) + + if not parsed_messages: + raise RuntimeError(f"Couldn't get a single item out of text {text}.") + + return lazy_stack(parsed_messages) + + @classmethod + def _inv_deepseek(cls, text: str) -> History: + """Inverts a DeepSeek string into a History object. + + Args: + text (str): The DeepSeek string to invert. + + Returns: + History: The inverted History object. + """ + torchrl_logger.debug(f"Inverting DeepSeek:\n{text}") + import re + + # Remove leading/trailing special tokens (e.g. + text = re.sub(r"^<[^>]+>", "", text) # Remove leading <...> + text = re.sub(r"<[^>]+>$", "", text) # Remove trailing <...> + # Remove any REDACTED_SPECIAL_TOKEN if present + text = re.sub(r"REDACTED_SPECIAL_TOKEN", "", text) + # Pattern to match User: and Assistant: messages + pattern = r"(User:|Assistant:)\s*(.*?)(?=(User:|Assistant:)|$)" + matches = re.findall(pattern, text, re.DOTALL) + parsed_messages = [] + for match in matches: + if len(match) < 2: + continue + prefix, content = match[0], match[1] + content = content.strip() + if not content: + continue + if prefix == "User:": + role = "user" + elif prefix == "Assistant:": + role = "assistant" + else: + continue + message_dict = { + "role": role, + "content": content, + "is_complete": True, # DeepSeek doesn't have explicit end tokens + "tool_calls": None, + "tool_responses": None, + } + parsed_messages.append(cls(**message_dict)) + if not parsed_messages: + raise RuntimeError(f"Couldn't get a single item out of text {text}.") + return lazy_stack(parsed_messages) + + @classmethod + def _inv_llama(cls, text: str) -> History: + import re + + messages = [] + + # Remove BOS token if present + if text.startswith("<|begin_of_text|>"): + text = text[len("<|begin_of_text|>") :] + + # Pattern to match complete message blocks: <|header_start|>role<|header_end|>\n\ncontent<|eot|> + complete_pattern = r"<\|header_start\|>(\w+)<\|header_end\|>\n\n(.*?)<\|eot\|>" + complete_matches = re.findall(complete_pattern, text, re.DOTALL) + + # Pattern to match incomplete message blocks: <|header_start|>role<|header_end|>\n\ncontent (without <|eot|>) + incomplete_pattern = r"<\|header_start\|>(\w+)<\|header_end\|>\n\n(.*?)$" + + # Find any incomplete message at the end + incomplete_matches = [] + if complete_matches: + # Look for incomplete message after the last complete one + last_complete_end = text.rfind("<|eot|>") + if last_complete_end != -1: + remaining_text = text[last_complete_end + len("<|eot|>") :] + if remaining_text.strip(): + incomplete_match = re.search( + incomplete_pattern, remaining_text, re.DOTALL + ) + if incomplete_match: + incomplete_matches = [ + ( + incomplete_match.group(1), + incomplete_match.group(2), + False, + ) + ] + else: + # No complete messages, check entire text for incomplete message + incomplete_match = re.search(incomplete_pattern, text, re.DOTALL) + if incomplete_match: + incomplete_matches = [ + (incomplete_match.group(1), incomplete_match.group(2), False) + ] + + # Process complete messages + for role, content in complete_matches: + if content.strip(): + messages.append( + cls(role=role, content=content.strip(), is_complete=True) + ) + + # Process incomplete messages + for role, content, is_complete in incomplete_matches: + if content.strip(): + messages.append( + cls(role=role, content=content.strip(), is_complete=is_complete) + ) + + if not messages: + raise RuntimeError(f"Couldn't parse Llama format from text: {text}") + + from tensordict import lazy_stack + + return lazy_stack(messages) + def append( self, history: History, *, inplace: bool = True, dim: int = -1 ) -> History: diff --git a/torchrl/data/tensor_specs.py b/torchrl/data/tensor_specs.py index b9546a485a9..02e1572aec1 100644 --- a/torchrl/data/tensor_specs.py +++ b/torchrl/data/tensor_specs.py @@ -36,6 +36,7 @@ import torch from tensordict import ( is_tensor_collection, + lazy_stack, LazyStackedTensorDict, NonTensorData, NonTensorStack, @@ -2747,6 +2748,13 @@ class NonTensor(TensorSpec): batched (bool, optional): Indicates whether the data is batched. If `True`, the `rand`, `zero`, and `one` methods will generate data with an additional batch dimension, stacking copies of the `example_data` across this dimension. Defaults to `False`. + Exclusive with `feature_dims`. + feature_dims (int, optional): The number of dimensions that are features. + The feature dimensions are the trailing dimensions that are not batch dimensions. + Every feature dimension is included in a single NonTensorData object, whereas these + are stacked across the batch dimension. + Exclusive with `batched`. + Defaults to `None` (all if batched=False, none if batched=True). **kwargs: Additional keyword arguments passed to the parent class. .. seealso:: :class:`~torchrl.data.Choice` which allows to randomly choose among different specs when calling @@ -2773,7 +2781,8 @@ def __init__( device: DEVICE_TYPING | None = None, dtype: torch.dtype | None = None, example_data: Any = None, - batched: bool = False, + batched: bool | None = None, + feature_dims: int | None = None, **kwargs, ): if isinstance(shape, int): @@ -2784,7 +2793,17 @@ def __init__( shape=shape, space=None, device=device, dtype=dtype, domain=domain, **kwargs ) self.example_data = example_data + if batched is None and feature_dims is None: + batched = False + feature_dims = len(self.shape) + elif batched is None and feature_dims is not None: + batched = False + elif batched is not None and feature_dims is not None: + raise ValueError("Cannot specify both batched and feature_dims.") + else: + feature_dims = 0 if batched else len(self.shape) self.batched = batched + self.feature_dims = feature_dims self.encode = self._encode_eager def __repr__(self): @@ -2835,7 +2854,7 @@ def to(self, dest: torch.dtype | DEVICE_TYPING) -> NonTensor: device=dest_device, dtype=None, example_data=self.example_data, - batched=self.batched, + feature_dims=self.feature_dims, ) def clone(self) -> NonTensor: @@ -2844,29 +2863,32 @@ def clone(self) -> NonTensor: device=self.device, dtype=self.dtype, example_data=self.example_data, - batched=self.batched, + feature_dims=self.feature_dims, ) def rand(self, shape=None): if shape is None: shape = () if self.batched: - with set_capture_non_tensor_stack(False): - val = NonTensorData( - data=self.example_data, - batch_size=(), - device=self.device, - ) - shape = (*shape, *self._safe_shape) - if shape: - for i in shape: - val = torch.stack([val.copy() for _ in range(i)], -1) - return val - return NonTensorData( - data=self.example_data, - batch_size=(*shape, *self._safe_shape), - device=self.device, - ) + # feature dim is None + feature_dims = 0 + else: + feature_dims = self.feature_dims + if isinstance(shape, int): + shape = _size([shape]) + total_shape = (*shape, *self._safe_shape) + batch_shape = total_shape[:-feature_dims] + feature_shape = total_shape[-feature_dims:] + with set_capture_non_tensor_stack(False): + val = NonTensorData( + data=self.example_data, + batch_size=feature_shape, + device=self.device, + ) + if batch_shape: + for i in reversed(batch_shape): + val = lazy_stack([val.copy() for _ in range(i)]) + return val def zero(self, shape=None): return self.rand(shape=shape) @@ -2877,10 +2899,18 @@ def one(self, shape=None): def is_in(self, val: Any) -> bool: if not isinstance(val, torch.Tensor) and not is_tensor_collection(val): return True - shape = torch.broadcast_shapes(self._safe_shape, val.shape) + # Since we don't really share Nontensor across processes, it's ok to modify the shape + # We do this when the shape has been determined by a single sample gathered + # from a dataloader, but shapes of the non-tensor may actually vary. + if any(v < 0 for v in val.shape): + self.shape = torch.Size( + (self.shape[i] if s >= 0 else -1 for i, s in enumerate(val.shape)) + ) + _safe_val_shape = torch.Size(s if s >= 0 else 1 for s in val.shape) + shape = torch.broadcast_shapes(self._safe_shape, _safe_val_shape) return ( is_non_tensor(val) - and val.shape == shape + and _safe_val_shape == shape # We relax constrains on device as they're hard to enforce for non-tensor # tensordicts and pointless # and val.device == self.device @@ -2904,18 +2934,20 @@ def expand(self, *shape): device=self.device, dtype=None, example_data=self.example_data, - batched=self.batched, + feature_dims=self.feature_dims, ) def unsqueeze(self, dim: int) -> NonTensor: unsq = super().unsqueeze(dim=dim) unsq.example_data = self.example_data + unsq.feature_dims = self.feature_dims unsq.batched = self.batched return unsq def squeeze(self, dim: int | None = None) -> NonTensor: sq = super().squeeze(dim=dim) sq.example_data = self.example_data + sq.feature_dims = self.feature_dims sq.batched = self.batched return sq @@ -2925,7 +2957,7 @@ def _reshape(self, shape): device=self.device, dtype=self.dtype, example_data=self.example_data, - batched=self.batched, + feature_dims=self.feature_dims, ) def _unflatten(self, dim, sizes): @@ -2935,7 +2967,7 @@ def _unflatten(self, dim, sizes): device=self.device, dtype=self.dtype, example_data=self.example_data, - batched=self.batched, + feature_dims=self.feature_dims, ) def __getitem__(self, idx: SHAPE_INDEX_TYPING): @@ -2946,7 +2978,7 @@ def __getitem__(self, idx: SHAPE_INDEX_TYPING): device=self.device, dtype=self.dtype, example_data=self.example_data, - batched=self.batched, + feature_dims=self.feature_dims, ) def unbind(self, dim: int = 0): @@ -2964,7 +2996,7 @@ def unbind(self, dim: int = 0): device=self.device, dtype=self.dtype, example_data=self.example_data, - batched=self.batched, + feature_dims=self.feature_dims, ) for i in range(self.shape[dim]) ) @@ -2980,7 +3012,12 @@ def _encode_eager( *, ignore_device: bool = False, ) -> torch.Tensor | TensorDictBase: - return NonTensorData(val, device=self.device, batch_size=self.shape) + return NonTensorData( + val, + device=self.device, + batch_size=self.shape, + feature_dims=self.feature_dims, + ) class _UnboundedMeta(abc.ABCMeta): @@ -4969,6 +5006,9 @@ class Composite(TensorSpec): to the batch-size of the corresponding tensordicts. data_cls (type, optional): the tensordict subclass (TensorDict, TensorClass, tensorclass...) that should be enforced in the env. Defaults to ``None``. + step_mdp_static (bool, optional): whether the spec is static under step_mdp. Defaults to ``False``. + Defining a `Composite` as a step_mdp_static spec will make it so that the entire related TensorDict/TensorClass + instance is copied during calls to `step_mdp` - and not updated in-place. Examples: >>> pixels_spec = Bounded( @@ -5044,6 +5084,7 @@ def __init__( shape: tuple | torch.Size | None = None, device: torch.device | None = None, data_cls: type | None = None, + step_mdp_static: bool = False, **kwargs, ): # For compatibility with TensorDict @@ -5057,6 +5098,7 @@ def __init__( shape = _size(()) self._shape = _size(shape) self._specs = {} + self.step_mdp_static = step_mdp_static _device = ( _make_ordinal_device(torch.device(device)) if device is not None else device @@ -5548,6 +5590,7 @@ def keys( leaves_only: bool = False, *, is_leaf: Callable[[type], bool] | None = None, + step_mdp_static_only: bool = False, ) -> _CompositeSpecKeysView: # noqa: D417 """Keys of the Composite. @@ -5568,6 +5611,8 @@ def keys( is_leaf (callable, optional): reads a type and returns a boolean indicating if that type should be seen as a leaf. By default, all non-Composite nodes are considered as leaves. + step_mdp_static_only (bool, optional): if ``True``, only keys that are static under step_mdp will be returned. + Default is ``False``. """ return _CompositeSpecItemsView( @@ -5575,6 +5620,7 @@ def keys( include_nested=include_nested, leaves_only=leaves_only, is_leaf=is_leaf, + step_mdp_static_only=step_mdp_static_only, )._keys() def items( @@ -5583,6 +5629,7 @@ def items( leaves_only: bool = False, *, is_leaf: Callable[[type], bool] | None = None, + step_mdp_static_only: bool = False, ) -> _CompositeSpecItemsView: # noqa: D417 """Items of the Composite. @@ -5601,12 +5648,15 @@ def items( is_leaf (callable, optional): reads a type and returns a boolean indicating if that type should be seen as a leaf. By default, all non-Composite nodes are considered as leaves. + step_mdp_static_only (bool, optional): if ``True``, only keys that are static under step_mdp will be returned. + Default is ``False``. """ return _CompositeSpecItemsView( self, include_nested=include_nested, leaves_only=leaves_only, is_leaf=is_leaf, + step_mdp_static_only=step_mdp_static_only, ) def values( @@ -5615,6 +5665,7 @@ def values( leaves_only: bool = False, *, is_leaf: Callable[[type], bool] | None = None, + step_mdp_static_only: bool = False, ) -> _CompositeSpecValuesView: # noqa: D417 """Values of the Composite. @@ -5633,24 +5684,31 @@ def values( is_leaf (callable, optional): reads a type and returns a boolean indicating if that type should be seen as a leaf. By default, all non-Composite nodes are considered as leaves. + step_mdp_static_only (bool, optional): if ``True``, only keys that are static under step_mdp will be returned. + Default is ``False``. """ return _CompositeSpecItemsView( self, include_nested=include_nested, leaves_only=leaves_only, is_leaf=is_leaf, + step_mdp_static_only=step_mdp_static_only, )._values() - def _reshape(self, shape): + def _reshape(self, shape: torch.Size) -> Composite: _specs = { key: val.reshape((*shape, *val.shape[self.ndimension() :])) for key, val in self._specs.items() } return self.__class__( - _specs, shape=shape, device=self.device, data_cls=self.data_cls + _specs, + shape=shape, + device=self.device, + data_cls=self.data_cls, + step_mdp_static=self.step_mdp_static, ) - def _unflatten(self, dim, sizes): + def _unflatten(self, dim: int, sizes: tuple[int, ...]) -> Composite: shape = torch.zeros(self.shape, device="meta").unflatten(dim, sizes).shape return self._reshape(shape) @@ -5669,7 +5727,11 @@ def to(self, dest: torch.dtype | DEVICE_TYPING) -> Composite: continue kwargs[key] = value.to(dest) return self.__class__( - **kwargs, device=self.device, shape=self.shape, data_cls=self.data_cls + **kwargs, + device=self.device, + shape=self.shape, + data_cls=self.data_cls, + step_mdp_static=self.step_mdp_static, ) if not isinstance(dest, (str, int, torch.device)): raise ValueError( @@ -5687,7 +5749,11 @@ def to(self, dest: torch.dtype | DEVICE_TYPING) -> Composite: continue kwargs[key] = value.to(dest) return self.__class__( - **kwargs, device=_device, shape=self.shape, data_cls=self.data_cls + **kwargs, + device=_device, + shape=self.shape, + data_cls=self.data_cls, + step_mdp_static=self.step_mdp_static, ) def clone(self) -> Composite: @@ -5707,6 +5773,7 @@ def clone(self) -> Composite: device=device, shape=self.shape, data_cls=self.data_cls, + step_mdp_static=self.step_mdp_static, ) def cardinality(self) -> int: @@ -5754,7 +5821,7 @@ def enumerate(self, use_mask: bool = False) -> TensorDictBase: samples = cls.from_dict({}, batch_size=self.shape, device=self.device) return samples - def empty(self): + def empty(self) -> Composite: """Create a spec like self, but with no entries.""" try: device = self.device @@ -5765,6 +5832,7 @@ def empty(self): device=device, shape=self.shape, data_cls=self.data_cls, + step_mdp_static=self.step_mdp_static, ) def to_numpy(self, val: TensorDict, safe: bool | None = None) -> dict: @@ -5793,7 +5861,7 @@ def zero(self, shape: torch.Size = None) -> TensorDictBase: device=device, ) - def __eq__(self, other): + def __eq__(self, other: object) -> bool: return ( type(self) == type(other) and self.shape == other.shape @@ -5828,7 +5896,7 @@ def update(self, dict_or_spec: Composite | dict[str, TensorSpec]) -> None: self[key] = item return self - def expand(self, *shape): + def expand(self, *shape: tuple[int, ...] | torch.Size) -> Composite: if len(shape) == 1 and isinstance(shape[0], (tuple, list, torch.Size)): shape = shape[0] if any(s1 != s2 and s2 != 1 for s1, s2 in zip(shape[-self.ndim :], self.shape)): @@ -5851,10 +5919,11 @@ def expand(self, *shape): shape=shape, device=device, data_cls=self.data_cls, + step_mdp_static=self.step_mdp_static, ) return out - def squeeze(self, dim: int | None = None): + def squeeze(self, dim: int | None = None) -> Composite: if dim is not None: if dim < 0: dim += len(self.shape) @@ -5873,6 +5942,7 @@ def squeeze(self, dim: int | None = None): shape=shape, device=device, data_cls=self.data_cls, + step_mdp_static=self.step_mdp_static, ) if self.shape.count(1) == 0: @@ -5884,7 +5954,7 @@ def squeeze(self, dim: int | None = None): out = self.squeeze(self.shape.index(1)) return out.squeeze() - def unsqueeze(self, dim: int): + def unsqueeze(self, dim: int) -> Composite: if dim < 0: dim += len(self.shape) + 1 @@ -5903,9 +5973,10 @@ def unsqueeze(self, dim: int): shape=shape, device=device, data_cls=self.data_cls, + step_mdp_static=self.step_mdp_static, ) - def unbind(self, dim: int = 0): + def unbind(self, dim: int = 0) -> tuple[Composite, ...]: orig_dim = dim if dim < 0: dim = len(self.shape) + dim @@ -5921,6 +5992,7 @@ def unbind(self, dim: int = 0): shape=shape, device=self.device, data_cls=self.data_cls, + step_mdp_static=self.step_mdp_static, ) for i in range(self.shape[dim]) ) @@ -5937,14 +6009,14 @@ def is_locked(self, value: bool) -> None: else: self.unlock_() - def __getstate__(self): + def __getstate__(self) -> dict: result = self.__dict__.copy() __lock_parents_weakrefs = result.pop("__lock_parents_weakrefs", None) if __lock_parents_weakrefs is not None: result["_lock_recurse"] = True return result - def __setstate__(self, state): + def __setstate__(self, state: dict) -> None: _lock_recurse = state.pop("_lock_recurse", False) for key, value in state.items(): setattr(self, key, value) @@ -5953,8 +6025,12 @@ def __setstate__(self, state): self.lock_(recurse=_lock_recurse) def _propagate_lock( - self, *, recurse: bool, lock_parents_weakrefs=None, is_compiling - ): + self, + *, + recurse: bool, + lock_parents_weakrefs: list[weakref.ref] | None = None, + is_compiling: bool, + ) -> None: """Registers the parent composite that handles the lock.""" self._is_locked = True if lock_parents_weakrefs is not None: @@ -5984,7 +6060,7 @@ def _propagate_lock( ) @property - def _lock_parents_weakrefs(self): + def _lock_parents_weakrefs(self) -> list[weakref.ref]: _lock_parents_weakrefs = self.__dict__.get("__lock_parents_weakrefs") if _lock_parents_weakrefs is None: self.__dict__["__lock_parents_weakrefs"] = [] @@ -5992,10 +6068,10 @@ def _lock_parents_weakrefs(self): return _lock_parents_weakrefs @_lock_parents_weakrefs.setter - def _lock_parents_weakrefs(self, value: list): + def _lock_parents_weakrefs(self, value: list[weakref.ref]) -> None: self.__dict__["__lock_parents_weakrefs"] = value - def lock_(self, recurse: bool | None = None) -> T: + def lock_(self, recurse: bool | None = None) -> None: """Locks the Composite and prevents modification of its content. The recurse argument control whether the lock will be propagated to sub-specs. @@ -6045,7 +6121,7 @@ def lock_(self, recurse: bool | None = None) -> T: self._propagate_lock(recurse=recurse, is_compiling=is_comp) return self - def _propagate_unlock(self, recurse: bool): + def _propagate_unlock(self, recurse: bool) -> list[Composite]: # if we end up here, we can clear the graph associated with this td self._is_locked = False @@ -6061,7 +6137,7 @@ def _propagate_unlock(self, recurse: bool): return sub_specs return [] - def _check_unlock(self, first_attempt=True): + def _check_unlock(self, first_attempt: bool = True) -> None: if not first_attempt: gc.collect() obj = None @@ -6208,12 +6284,14 @@ def keys( leaves_only: bool = False, *, is_leaf: Callable[[type], bool] | None = None, + step_mdp_static_only: bool = False, ) -> _CompositeSpecKeysView: return _CompositeSpecItemsView( self, include_nested=include_nested, leaves_only=leaves_only, is_leaf=is_leaf, + step_mdp_static_only=step_mdp_static_only, )._keys() def items( @@ -6222,6 +6300,7 @@ def items( leaves_only: bool = False, *, is_leaf: Callable[[type], bool] | None = None, + step_mdp_static_only: bool = False, ) -> _CompositeSpecItemsView: return list( _CompositeSpecItemsView( @@ -6229,6 +6308,7 @@ def items( include_nested=include_nested, leaves_only=leaves_only, is_leaf=is_leaf, + step_mdp_static_only=step_mdp_static_only, ) ) @@ -6238,12 +6318,14 @@ def values( leaves_only: bool = False, *, is_leaf: Callable[[type], bool] | None = None, + step_mdp_static_only: bool = False, ) -> _CompositeSpecValuesView: return _CompositeSpecItemsView( self, include_nested=include_nested, leaves_only=leaves_only, is_leaf=is_leaf, + step_mdp_static_only=step_mdp_static_only, )._values() def project(self, val: TensorDictBase) -> TensorDictBase: @@ -6634,15 +6716,17 @@ class _CompositeSpecItemsView: def __init__( self, composite: Composite, - include_nested, - leaves_only, + include_nested: bool, + leaves_only: bool, *, - is_leaf, + is_leaf: Callable[[type], bool] | None, + step_mdp_static_only: bool = False, ): self.composite = composite self.leaves_only = leaves_only self.include_nested = include_nested self.is_leaf = is_leaf + self.step_mdp_static_only = step_mdp_static_only def __iter__(self): from tensordict.base import _NESTED_TENSORS_AS_LISTS @@ -6662,23 +6746,29 @@ def _iter_from_item(key, item): include_nested=True, leaves_only=self.leaves_only, is_leaf=is_leaf, + step_mdp_static_only=self.step_mdp_static_only, ): if not isinstance(subkey, tuple): subkey = (subkey,) yield (key, *subkey), subitem - if not self.leaves_only and not _is_leaf(type(item)): + if ( + (self.step_mdp_static_only and getattr(item, "step_mdp_static", False)) + or (not self.leaves_only and not _is_leaf(type(item))) + or (not self.leaves_only or _is_leaf(type(item))) + ): yield (key, item) - elif not self.leaves_only or _is_leaf(type(item)): - yield key, item - for key, item in self._get_composite_items(is_leaf): - if is_leaf is _NESTED_TENSORS_AS_LISTS and isinstance( - item, _LazyStackedMixin - ): - for (i, spec) in enumerate(item._specs): - yield from _iter_from_item(unravel_key((key, str(i))), spec) - else: - yield from _iter_from_item(key, item) + if not self.step_mdp_static_only or not getattr( + self.composite, "step_mdp_static", False + ): + for key, item in self._get_composite_items(is_leaf): + if is_leaf is _NESTED_TENSORS_AS_LISTS and isinstance( + item, _LazyStackedMixin + ): + for (i, spec) in enumerate(item._specs): + yield from _iter_from_item(unravel_key((key, str(i))), spec) + else: + yield from _iter_from_item(key, item) def _get_composite_items(self, is_leaf): diff --git a/torchrl/envs/common.py b/torchrl/envs/common.py index b84cee1f75f..9f0d6a6b7b2 100644 --- a/torchrl/envs/common.py +++ b/torchrl/envs/common.py @@ -7,6 +7,7 @@ import abc import warnings +import weakref from copy import deepcopy from functools import partial, wraps from typing import Any, Callable, Iterator @@ -539,6 +540,25 @@ def __init__( self._run_type_checks = run_type_checks self._allow_done_after_reset = allow_done_after_reset + _collector: weakref.ReferenceType[ + LLMCollector # noqa: F821 # type: ignore + ] | None = None + + def register_collector( + self, collector: DataCollectorBase # noqa: F821 # type: ignore + ): + """Registers a collector with the environment. + + Args: + collector (DataCollectorBase): The collector to register. + """ + self._collector = weakref.ref(collector) + + @property + def collector(self) -> DataCollectorBase | None: # noqa: F821 # type: ignore + """Returns the collector associated with the container, if it exists.""" + return self._collector() if self._collector is not None else None + def set_spec_lock_(self, mode: bool = True) -> EnvBase: """Locks or unlocks the environment's specs. @@ -1222,6 +1242,56 @@ def observation_keys(self) -> list[NestedKey]: ) return observation_keys + @property + @_cache_value + def _observation_keys_step_mdp(self) -> list[NestedKey]: + """The observation keys of an environment that are static under step_mdp (i.e. to be copied as-is during step_mdp).""" + observation_keys_leaves = sorted( + self.full_observation_spec.keys(True, True, step_mdp_static_only=True), + key=_repr_by_depth, + ) + return observation_keys_leaves + + @property + @_cache_value + def _state_keys_step_mdp(self) -> list[NestedKey]: + """The state keys of an environment that are static under step_mdp (i.e. to be copied as-is during step_mdp).""" + state_keys_leaves = sorted( + self.full_state_spec.keys(True, True, step_mdp_static_only=True), + key=_repr_by_depth, + ) + return state_keys_leaves + + @property + @_cache_value + def _action_keys_step_mdp(self) -> list[NestedKey]: + """The action keys of an environment that are static under step_mdp (i.e. to be copied as-is during step_mdp).""" + action_keys_leaves = sorted( + self.full_action_spec.keys(True, True, step_mdp_static_only=True), + key=_repr_by_depth, + ) + return action_keys_leaves + + @property + @_cache_value + def _done_keys_step_mdp(self) -> list[NestedKey]: + """The done keys of an environment that are static under step_mdp (i.e. to be copied as-is during step_mdp).""" + done_keys_leaves = sorted( + self.full_done_spec.keys(True, True, step_mdp_static_only=True), + key=_repr_by_depth, + ) + return done_keys_leaves + + @property + @_cache_value + def _reward_keys_step_mdp(self) -> list[NestedKey]: + """The reward keys of an environment that are static under step_mdp (i.e. to be copied as-is during step_mdp).""" + reward_keys_leaves = sorted( + self.full_reward_spec.keys(True, True, step_mdp_static_only=True), + key=_repr_by_depth, + ) + return reward_keys_leaves + @property def reward_key(self): """The reward key of an environment. @@ -3409,7 +3479,7 @@ def step_mdp(self, next_tensordict: TensorDictBase) -> TensorDictBase: @property @_cache_value - def _step_mdp(self): + def _step_mdp(self) -> Callable[[TensorDictBase], TensorDictBase]: return _StepMDP(self, exclude_action=False) def _rollout_stop_early( @@ -3586,9 +3656,13 @@ def step_and_maybe_reset( # done and truncated are in done_keys # We read if any key is done. tensordict_ = self._step_mdp(tensordict) + # if self._post_step_mdp_hooks is not None: + # tensordict_ = self._post_step_mdp_hooks(tensordict_) tensordict_ = self.maybe_reset(tensordict_) return tensordict, tensordict_ + # _post_step_mdp_hooks: Callable[[TensorDictBase], TensorDictBase] | None = None + @property @_cache_value def _simple_done(self): diff --git a/torchrl/envs/llm/__init__.py b/torchrl/envs/llm/__init__.py index 42d1098f9d6..38457d0dd62 100644 --- a/torchrl/envs/llm/__init__.py +++ b/torchrl/envs/llm/__init__.py @@ -20,9 +20,11 @@ as_padded_tensor, BrowserTransform, DataLoadingPrimer, + KLComputation, KLRewardTransform, MCPToolTransform, PythonInterpreter, + RetrieveKL, RetrieveLogProb, TemplateTransform, Tokenizer, @@ -33,12 +35,14 @@ "RetrieveLogProb", "ChatEnv", "DataLoadingPrimer", + "KLComputation", "DatasetChatEnv", "AddThinkingPrompt", "GSM8KEnv", "GSM8KPrepareQuestion", "GSM8KRewardParser", "IFEvalData", + "RetrieveKL", "IFEvalEnv", "IFEvalScoreData", "IfEvalScorer", diff --git a/torchrl/envs/llm/chat.py b/torchrl/envs/llm/chat.py index e89cde26d7c..5c4961e1c1d 100644 --- a/torchrl/envs/llm/chat.py +++ b/torchrl/envs/llm/chat.py @@ -4,134 +4,152 @@ # LICENSE file in the root directory of this source tree. from __future__ import annotations -import warnings - from typing import Any, Callable, Literal import torch -from tensordict import lazy_stack, TensorDict, TensorDictBase +from tensordict import lazy_stack, TensorDictBase +from tensordict.utils import _zip_strict from torch.utils.data import DataLoader from torchrl.data import Composite, NonTensor - -from torchrl.data.llm.chat import History +from torchrl.data.llm.history import History from torchrl.envs import EnvBase, TransformedEnv from torchrl.envs.llm.transforms.dataloading import DataLoadingPrimer +from torchrl.modules.llm.policies.common import ChatHistory, Text, Tokens + + +def _default_collate_fn(batch): + # We want to rename the "text" key to "query" + # otherwise it will conflict with the "text" key in the tensordict returned by TorchRL components + if isinstance(batch, dict) and "text" in batch: + batch["query"] = batch.pop("text") + elif isinstance(batch, list): + for item in batch: + if "text" in item: + item["query"] = item.pop("text") + return batch class ChatEnv(EnvBase): - r"""A chat-based environment. + r"""A chat-based environment for LLMs, designed as a blank canvas for conversation and RL. + + This environment is designed to work seamlessly with both :class:`~torchrl.modules.llm.policies.TransformersWrapper` and + :class:`~torchrl.modules.llm.policies.vLLMWrapper`. It provides the fundamental structure for managing conversation state + using the :class:`~torchrl.data.llm.History` format (or, alternatively, tokens or text), but is intentionally minimal to allow + maximum flexibility through transforms. + + Core Functionality + The environment operates in three main modes: + + - **History mode**: Uses :class:`~torchrl.data.llm.History` objects for conversation management + - **Text mode**: Uses simple text strings for input/output + - **Tokens mode**: Uses tokenized data for input/output + + Reset Operation + During reset, the environment: + + 1. Takes input text from the `data_key` (default: `"query"`) in the tensordict + 2. Creates a :class:`~torchrl.data.llm.History` object with the user's message + 3. Optionally prepends a system prompt if provided + 4. Formats the conversation according to the selected input mode (history, text, or tokens) + 5. Returns the formatted prompt ready for the LLM + + Step Operation + During step, the environment: + + 1. Takes the LLM's response (containing both prompt and generated text) + 2. Extracts the full conversation history + 3. Prepares the next prompt by setting the full history as the new prompt + 4. Returns the updated conversation state - ChatEnv relies on the :class:`~torchrl.data.llm.History` format to output observations framed as a chat between - various entities (typically with roles such as `"system"`, `"user"`, `"assistant"` etc.) + This design enables natural multi-turn conversations where each step extends the conversation + history, making it ideal for dialogue systems and reinforcement learning applications. - The step function will execute the following operations: + Integration with Transforms + ChatEnv is designed to be extended with transforms that add specific capabilities: - - Given a prompt (key `"text"`) and an answer string (key `"text_response"`, which is our action), the environment - will generate a single string that is the concatenation of the two. - - The text is fed to :meth:`torchrl.data.llm.History.from_text` to produce a full history of the chat so far. This - should hopefully match the state of the history in the previous step, plus an extra step generated by the new - action. - - The last item of that history is then appended to the previous history (we don't replace the history in case - it contains metadata that cannot be inferred directly from the prompt and response). - - Optionally, the history is mapped back to a `"text"` entry that can be used to query the LLM in the next round - of the policy. + - **Reward computation**: :class:`~torchrl.envs.llm.transforms.KLRewardTransform` for KL divergence rewards + - **Tool execution**: :class:`~torchrl.envs.llm.transforms.PythonInterpreter` for Python code execution + - **Data loading**: :class:`~torchrl.envs.llm.transforms.DataLoadingPrimer` for loading prompts from datasets + - **Thinking prompts**: :class:`~torchrl.envs.llm.transforms.AddThinkingPrompt` for chain-of-thought reasoning - Args: + Keyword Args: + input_mode (Literal["history", "text", "tokens"]): The mode of input to the environment. + Defaults to `"history"`. batch_size (torch.Size): Expected batch size of the input. Defaults to `(1,)` (null batch sizes such as `()` are not recommended as they don't play well with generators). - system_prompt (str, optional): an optional `"system"` prompt string to use during reset calls. + system_prompt (str, optional): An optional `"system"` prompt string to use during reset calls. Defaults to `None`. - apply_template (bool, optional): if `True` (and a tokenizer is passed), the history will be parsed to a string - in the `"text"` entry of the output tensordict at reset time. Defaults to `False`. - - .. note:: If transforms are appended to the environment, the template will be applied to the history before the transform is applied. - As transforms can encode tools, this means that the text returned by the environment may be incomplete. - The :class:`~torchrl.modules.llm.vLLMWrapper` and :class:`~torchrl.modules.llm.TransformersWrapper` - will apply the template to the history when queried if no `"text"` input is provided. - - tokenizer (transformers.PreTrainedTokenizer, *optional*): A tokenizer that will be used to tokenize the text. + tokenizer (transformers.PreTrainedTokenizer, optional): A tokenizer that will be used to tokenize the text. Defaults to `None`. - template_kwargs (dict[str, any], optional): keyword arguments passed to :meth:`~torchrl.data.llm.History.apply_chat_template`. + template_kwargs (dict[str, any], optional): Keyword arguments passed to :meth:`~torchrl.data.llm.History.apply_chat_template`. Defaults to `None`. - system_role (str, optional): the role of the system (at reset time). Defaults to `"system"`. - user_role (str, optional): the role of the user (at reset time). Defaults to `"user"`. - make_lazy (bool, optional): if `True`, the environment will return a lazy stack of tensordicts. This is the recommended setting - for training, since it allows for efficient batching of environment outputs that may have different shapes or contents. - Defaults to `True`. + system_role (str, optional): The role of the system (at reset time). Defaults to `"system"`. + user_role (str, optional): The role of the user (at reset time). Defaults to `"user"`. + policy_role (str, optional): The role of the policy/assistant. Defaults to `"assistant"`. + data_key (str, optional): The key of the data input to the env at reset time (from dataloader). Defaults to `"query"`. + device (torch.device, optional): The device to use for computations. Defaults to `None`. Methods: - reset (TensorDict): Resets the state of the environment. A tensordict or equivalent with a `"text"` entry must be passed. - step (TensorDict): Makes a step in the environment (see above for a description of what `step` does). - A tensordict or equivalent with a `"text"` entry must be passed. + reset (TensorDict): Resets the state of the environment. A tensordict or equivalent with a `"query"` entry + (originating from the dataloader) must be passed. This key name is defined as a class attribute `data_key`. + step (TensorDict): Makes a step in the environment. A tensordict or equivalent with the LLM's response must be passed. + The response key is defined as a class attribute `response_key`. .. seealso:: To see examples of a `ChatEnv` in action, see :class:`~torchrl.envs.llm.chat.DatasetChatEnv`, :class:`~torchrl.envs.llm.GSM8KEnv` and :class:`~torchrl.envs.llm.IFEvalEnv`. Examples: - >>> import pprint - >>> - >>> import transformers - >>> from tensordict import TensorDict, set_list_to_stack >>> from torchrl.envs.llm import ChatEnv - >>> set_list_to_stack(True).set() + >>> from torchrl.data.llm import History + >>> from tensordict import TensorDict + >>> + >>> # Create a basic chat environment + >>> env = ChatEnv( + ... system_prompt="You are a helpful assistant.", + ... input_mode="history" + ... ) >>> - >>> tokenizer = transformers.AutoTokenizer.from_pretrained("Qwen/Qwen2.5-3B") + >>> # Reset with a user query + >>> reset_data = TensorDict({"query": "Hello, how are you?"}, batch_size=(1,)) + >>> obs = env.reset(reset_data) + >>> print(obs["history"].prompt) # History with system prompt + user message >>> - >>> env = ChatEnv(batch_size=(1,), tokenizer=tokenizer, apply_template=True, system_prompt="I'm system, do what I want.") - >>> td_reset = env.reset(TensorDict(text=["I'm the user. I'm going to tell you a little about something."], batch_size=(1,))) - >>> pprint.pprint(f'{td_reset["history"]=}') - ('td_reset["history"]=History(\n' - ' content=NonTensorStack(\n' - ' [["I\'m system, do what I want.", "I\'m the user. I\'...,\n' - ' batch_size=torch.Size([1, 2]),\n' - ' device=None),\n' - ' role=NonTensorStack(\n' - " [['system', 'user']],\n" - ' batch_size=torch.Size([1, 2]),\n' - ' device=None),\n' - ' batch_size=torch.Size([1, 2]),\n' - ' device=None,\n' - ' is_shared=False)') - >>> pprint.pprint(f'{td_reset["text"]=}') - ('td_reset["text"]=["<|im_start|>system\\nI\'m system, do what I ' - "want.<|im_end|>\\n<|im_start|>user\\nI'm the user. I'm going to tell you a " - 'little about something.<|im_end|>\\n<|im_start|>assistant\\n"]') - >>> td_action = td_reset.set("text_response", ["This is the action from the assistant!<|im_end|>"]) - >>> td_next = env.step(td_action) - >>> pprint.pprint(f'{td_next["next", "history"]=}') - ('td_next["next", "history"]=History(\n' - ' content=NonTensorStack(\n' - ' [["I\'m system, do what I want.", "I\'m the user. I\'...,\n' - ' batch_size=torch.Size([1, 3]),\n' - ' device=None),\n' - ' role=NonTensorStack(\n' - " [['system', 'user', 'assistant']],\n" - ' batch_size=torch.Size([1, 3]),\n' - ' device=None),\n' - ' batch_size=torch.Size([1, 3]),\n' - ' device=None,\n' - ' is_shared=False)') - >>> pprint.pprint(f'{td_next["next", "text"]=}') - ('td_next["next", "text"]=["<|im_start|>system\\nI\'m system, do what I ' - "want.<|im_end|>\\n<|im_start|>user\\nI'm the user. I'm going to tell you a " - 'little about something.<|im_end|>\\n<|im_start|>assistant\\nThis is the ' - 'action from the assistant!<|im_end|>\\n<|im_start|>assistant\\n"]') + >>> # Simulate LLM response and step + >>> response_data = TensorDict({ + ... "history": History.from_chats([[ + ... {"role": "system", "content": "You are a helpful assistant."}, + ... {"role": "user", "content": "Hello, how are you?"}, + ... {"role": "assistant", "content": "I'm doing well, thank you!"} + ... ]]) + ... }, batch_size=(1,)) + >>> next_obs = env.step(response_data) + >>> print(next_obs["history"].prompt) # Full conversation history """ + # Nested key corresponding to the text input to the LLM + text_key = ("text", "prompt") + # Nested key corresponding to the response from the LLM + response_key = ("text", "response") + # Nested key corresponding to the data input to the env at reset time (from dataloader) + data_key = "query" + def __init__( self, + *, + input_mode: Literal["history", "text"] = "history", batch_size: tuple | torch.Size | None = None, system_prompt: str | None = None, - apply_template: bool | None = None, tokenizer: transformers.AutoTokenizer | None = None, # noqa: F821 template_kwargs: dict[str, Any] | None = None, system_role: str = "system", user_role: str = "user", policy_role: str | None = "assistant", - make_lazy: bool = True, + data_key: str | None = None, + device: torch.device | None = None, ): + self.input_mode = input_mode if batch_size is None: batch_size = (1,) if isinstance(batch_size, int): @@ -140,100 +158,185 @@ def __init__( batch_size = torch.Size(batch_size) if batch_size == (): raise ValueError(f"{type(self).__name__} must have at least one dimension") + if data_key is not None: + self.data_key = data_key + super().__init__(batch_size=batch_size, device=device) + self.batch_size = batch_size - super().__init__(batch_size=batch_size) - self.full_observation_spec = Composite( - history=History.default_spec(shape=batch_size + (-1,)), - shape=batch_size, - ) - self.full_state_spec = self.full_observation_spec.clone() - self.full_state_spec["text"] = NonTensor( - shape=self.batch_size, example_data="a string", device=self.device - ) self.system_prompt = system_prompt - self.apply_template = ( - apply_template or (template_kwargs is not None) or (tokenizer is not None) - ) - self.tokenizer = tokenizer + + self.system_prompt = system_prompt + if template_kwargs is None: template_kwargs = {} - # FIXME: what to do if True? - template_kwargs.setdefault("tokenize", False) self.template_kwargs = template_kwargs - if self.apply_template: - self.full_observation_spec["text"] = NonTensor( - shape=self.batch_size, example_data="a string", device=self.device - ) - self.full_action_spec = Composite( - text_response=NonTensor( - shape=self.batch_size, example_data="a string", device=self.device - ), - batch_size=self.batch_size, - ) + self.system_role = system_role self.user_role = user_role self.policy_role = policy_role - self.make_lazy = make_lazy + self.tokenizer = tokenizer - def _step(self, tensordict): - # Expect action to be a "text_response" string - action = tensordict["text_response"] - # Find the total text - text = tensordict["text"] - if isinstance(text, str): - text = [text] - action = [action] - text = [t + a for t, a in zip(text, action)] - # Convert text to a history - chat_template_name = None - if self.tokenizer is not None: - name_or_path = self.tokenizer.name_or_path - if "qwen" in name_or_path.lower(): - chat_template_name = "qwen" - parsed_history = History.from_text(text, chat_template_name=chat_template_name) - # Isolate last element, which should be our action - local_history = parsed_history[..., -1] - # Get previous history - history = tensordict["history"] - # Check that history has one more item than before - if history.shape[-1] <= parsed_history.shape[-1]: - warnings.warn( - "The parsed history has fewer or the same number than the last element in history." - ) - if self.policy_role is not None: - # Iterate over batch and check policy role - for lh in local_history.unbind(0): - if lh.role != self.policy_role: - raise ValueError( - "The role received in the last block parsed from the policy " - f"output does not match the expected policy role: received {lh.role} but expected {self.policy_role}.\n" - f"Parsed input: {text=}\n" - f"Parsed history: {parsed_history=}\n" - f"Final element: {local_history=}" - ) - # Append history item - history = history.append(local_history, inplace=False) - # FIXME: consider done to be always False - td_out = lazy_stack( - list( - TensorDict( - history=history, - done=torch.zeros(tensordict.shape + (1,), dtype=torch.bool), - batch_size=self.batch_size, - ).unbind(0) - ) + self._make_specs() + + def _make_specs(self): + if self.input_mode == "history": + self._make_specs_history() + elif self.input_mode == "text": + self._make_specs_text() + elif self.input_mode == "tokens": + self._make_specs_tokens() + else: + raise ValueError(f"Invalid input mode: {self.input_mode}") + + def _make_specs_history(self): + # we output prompt + self.full_observation_spec = Composite( + history=ChatHistory.default_spec(shape=self.batch_size, keys=["prompt"]).to( + self.device + ), + shape=self.batch_size, + device=self.device, ) - if self.apply_template: - td_out["text"] = history.apply_chat_template( - tokenizer=self.tokenizer, **self.template_kwargs - ) - return td_out + # We receive prompt, response and full + self.full_action_spec = Composite( + history=ChatHistory.default_spec(shape=self.batch_size, keys=["full"]).to( + self.device + ), + shape=self.batch_size, + device=self.device, + ) + self.full_state_spec = Composite( + { + self.data_key: NonTensor( + example_data="a string", shape=self.batch_size, device=self.device + ) + }, + shape=self.batch_size, + device=self.device, + ) + + def _make_specs_text(self): + # we output prompt + self.full_observation_spec = Composite( + text=Text.default_spec(shape=self.batch_size, keys=["prompt"]).to( + self.device + ), + shape=self.batch_size, + device=self.device, + ) + # We receive prompt, response and full + self.full_action_spec = Composite( + text=Text.default_spec(shape=self.batch_size, keys=["full"]).to( + self.device + ), + shape=self.batch_size, + device=self.device, + ) + self.full_state_spec = Composite( + { + self.data_key: NonTensor( + example_data="a string", shape=self.batch_size, device=self.device + ) + }, + shape=self.batch_size, + device=self.device, + ) + + def _make_specs_tokens(self): + # we output prompt + self.full_observation_spec = Composite( + tokens=Tokens.default_spec(shape=self.batch_size, keys=["prompt"]).to( + self.device + ), + shape=self.batch_size, + device=self.device, + ) + # We receive prompt, response and full + self.full_action_spec = Composite( + tokens=Tokens.default_spec(shape=self.batch_size, keys=["full"]).to( + self.device + ), + shape=self.batch_size, + device=self.device, + ) + self.full_state_spec = Composite( + { + self.data_key: NonTensor( + example_data="a string", shape=self.batch_size, device=self.device + ) + }, + shape=self.batch_size, + device=self.device, + ) + + # def _post_step_mdp_hooks(self, tensordict: TensorDictBase) -> TensorDictBase: + # """Allows modification of the tensordict after the step_mdp.""" + # if self.input_mode == "history": + # tensordict.exclude( + # ("history", "response"), ("history", "full"), inplace=True + # ) + # if self.input_mode in ("text", "history"): + # tensordict.exclude(("text", "response"), ("text", "full"), inplace=True) + # if self.input_mode in ("tokens", "history", "text"): + # tensordict.exclude(("tokens", "response"), ("tokens", "full"), inplace=True) + # if "log_probs" in tensordict.keys(): + # tensordict.exclude( + # ("log_probs", "response"), ("log_probs", "full"), inplace=True + # ) + # return tensordict - def _reset(self, tensordict: TensorDictBase | None): + def _step(self, tensordict): + if self.input_mode == "history": + return self._step_history(tensordict) + if self.input_mode in ("text", "history"): + return self._step_text(tensordict) + if self.input_mode in ("tokens", "history", "text"): + return self._step_tokens(tensordict) + else: + raise ValueError(f"Invalid input mode: {self.input_mode}") + + def _step_history(self, tensordict): + """Step the environment in history mode.""" + # get history from tensordict + chat_history: ChatHistory = tensordict["history"] + # prompt = chat_history.prompt + full = chat_history.full + # response = chat_history.response + empty_td = tensordict.empty(device=self.device) + # Old full will be new prompt - can be modified at will + new_history = ChatHistory(prompt=full) + empty_td.set("history", new_history) + return empty_td + + def _step_text(self, tensordict): + """Step the environment in text mode.""" + # get text from tensordict + text: Text = tensordict["text"] + full = text.full + empty_td = tensordict.empty(device=self.device) + new_history = Text(prompt=full) + empty_td.set("text", new_history) + return empty_td + + def _step_tokens(self, tensordict): + """Step the environment in tokens mode.""" + # get tokens from tensordict + tokens: Tokens = tensordict["tokens"] + full = tokens.full + empty_td = tensordict.empty(device=self.device) + new_history = Tokens(prompt=full) + empty_td.set("tokens", new_history) + return empty_td + + def _reset(self, tensordict: TensorDictBase | None, **kwargs): if tensordict is None: raise RuntimeError(f"{type(self).__name__} expects a tensordict as input") # Find the total text - content = tensordict.get("text") + content = tensordict.get(self.data_key) + if content is None: + raise RuntimeError( + f"{type(self).__name__} expects a tensordict with a {self.data_key} key, got {tensordict.keys()}" + ) if content.batch_size != self.batch_size: for s in reversed(self.batch_size): content = [content for _ in range(s)] @@ -254,23 +357,58 @@ def _reset(self, tensordict: TensorDictBase | None): history = lazy_stack([history_system, history], -1) else: history = history.unsqueeze(-1) - result = TensorDict( - history=history, - done=torch.zeros(tensordict.shape + (1,), dtype=torch.bool), - batch_size=self.batch_size, - ) - if self.make_lazy: - result = result.unbind(0) - result = lazy_stack(list(result), dim=0) - elif tensordict._lazy: - result = result.unbind(tensordict.stack_dim) - result = lazy_stack(list(result), dim=tensordict.stack_dim) - result.update(tensordict.exclude(*result.keys(True))) - if self.apply_template: - template = history.apply_chat_template( - tokenizer=self.tokenizer, **self.template_kwargs + + # Now that we have the history, call the specific reset method + if self.input_mode == "history": + return ( + self._reset_history(tensordict, history) + .update(tensordict) + .to_lazystack(0) + ) + elif self.input_mode == "text": + return ( + self._reset_text(tensordict, history).update(tensordict).to_lazystack(0) ) - result["text"] = template + elif self.input_mode == "tokens": + return ( + self._reset_tokens(tensordict, history) + .update(tensordict) + .to_lazystack(0) + ) + else: + raise ValueError(f"Invalid input mode: {self.input_mode}") + + def _reset_history(self, tensordict: TensorDictBase, history: History): + # Simplest case: history is the prompt + chat_history = ChatHistory._from_tensordict( + tensordict.empty(device=self.device) + ) + chat_history.prompt = history + return tensordict.empty(device=self.device).set("history", chat_history) + + def _reset_text(self, tensordict: TensorDictBase, history: History): + # We need to parse the history to a text + text = history.apply_chat_template( + tokenizer=self.tokenizer, add_generation_prompt=True, **self.template_kwargs + ) + txt = Text._from_tensordict(tensordict.empty()) + txt.prompt = text + result = tensordict.empty(device=self.device).set("text", txt) + return result + + def _reset_tokens(self, tensordict: TensorDictBase, history: History): + # We need to parse the history to a tokens + tokens = history.apply_chat_template( + tokenizer=self.tokenizer, + add_generation_prompt=True, + return_tensors="pt", + return_dict=True, + **self.template_kwargs, + ) + tokens_obj = Tokens._from_tensordict(tensordict.empty().to_lazystack(0)) + for to, tok in _zip_strict(tokens_obj.unbind(0), tokens["input_ids"]): + to.prompt = tok + result = tensordict.empty(device=self.device).set("tokens", tokens_obj) return result def _set_seed(self, seed): @@ -302,7 +440,11 @@ class DatasetChatEnv(TransformedEnv): template_kwargs (dict[str, Any] | None, optional): Additional keyword arguments for the template. Defaults to `None`. apply_template (bool | None, optional): Whether to apply the template to the text. Defaults to `False`. collate_fn (Callable | None, optional): A custom collate function for data loading. If `None`, a default - collate function is used. Defaults to `None`. + collate function is used that renames the `"text"` key to `"query"` to avoid conflicts with the `"text"` key + in the tensordict returned by TorchRL components. Defaults to `None`. + input_mode (Literal["history", "text", "tokens"], optional): The mode of input to the environment. Defaults to `"history"`. + data_key (str, optional): The spec of the data returned by the dataloader (or better, its collate_fn). + Defaults to `None` (automatically determined based on the input_mode). .. seealso:: `DatasetChatEnv` is a thin wrapper around :class:`~torchrl.envs.llm.ChatEnv` bucketed with a :class:`~torchrl.envs.llm.DataLoadingPrimer` transform. See these two classes for more insight on data format @@ -331,6 +473,9 @@ def __init__( template_kwargs: dict[str, Any] | None = None, apply_template: bool | None = False, collate_fn: Callable[[Any], Any] | None = None, + input_mode: Literal["history", "text", "tokens"] = "history", + data_key: str | None = None, + primers: Composite | None = None, ): from datasets import load_dataset from tensordict import list_to_stack @@ -343,11 +488,11 @@ def __init__( batch_size = (num_envs,) - dataset = load_dataset(dataset, name) - if split is None and "train" in dataset: + dataset_obj = load_dataset(dataset, name) + if split is None and "train" in dataset_obj: split = "train" if split is not None: - dataset = dataset[split] + dataset_obj = dataset_obj[split] # Env if seed is None: seed = int(torch.empty((), dtype=torch.int64).random_().item()) @@ -355,10 +500,10 @@ def __init__( generator.manual_seed(seed) dataloader = DataLoader( # noqa: TOR401 - dataset, + dataset_obj, batch_size=batch_size_dl, shuffle=shuffle, - collate_fn=collate_fn, + collate_fn=collate_fn if collate_fn is not None else _default_collate_fn, generator=generator, ) @@ -368,13 +513,16 @@ def __init__( device=device, group_repeats=group_repeats, batch_size=batch_size, + primers=primers, ) env_base = ChatEnv( batch_size=batch_size, system_prompt=self.SYSTEM_PROMPT, tokenizer=tokenizer, template_kwargs=template_kwargs, - apply_template=apply_template, + input_mode=input_mode, + data_key=data_key, + device=device, ) return super().__init__(env_base, primer) @@ -386,5 +534,6 @@ def reset_dataloader(self): Returns: self: The environment itself. """ - self.transform[0].reset_dataloader() + if hasattr(self.transform, "__getitem__"): + self.transform[0].reset_dataloader() return self diff --git a/torchrl/envs/llm/datasets/gsm8k.py b/torchrl/envs/llm/datasets/gsm8k.py index 903f50d75f7..49897f2d53f 100644 --- a/torchrl/envs/llm/datasets/gsm8k.py +++ b/torchrl/envs/llm/datasets/gsm8k.py @@ -5,7 +5,7 @@ from __future__ import annotations import warnings -from typing import Any, Callable +from typing import Any, Callable, Literal import torch from tensordict import NestedKey, TensorDict, TensorDictBase @@ -71,7 +71,7 @@ def transform_observation_spec(self, observation_spec: TensorSpec) -> TensorSpec def _collate_fn(batch): batch = torch.stack([TensorDict.from_dict(_batch) for _batch in batch]) - batch.rename_key_("question", "text") + batch.rename_key_("question", "query") return batch @@ -123,7 +123,13 @@ def make_gsm8k_env( env.append_transform(StepCounter(max_steps=1)) if tokenizer is not None: - env.append_transform(GSM8KRewardParser(tokenizer=tokenizer)) + env.append_transform( + GSM8KRewardParser( + tokenizer=tokenizer, + input_mode="text", + in_keys=["text_response", "answer"], + ) + ) else: warnings.warn("No tokenizer specified - reward will not be assigned.") @@ -154,6 +160,7 @@ class GSM8KEnv(DatasetChatEnv): collate_fn (Callable | None, optional): A custom collate function for data loading. If `None`, a default collate function is used. Defaults to `None`. max_steps (int, optional): The maximum number of steps allowed in an episode. Defaults to `1`. + input_mode (Literal["history", "text", "tokens"], optional): The mode of input to use. Defaults to `"history"`. Examples: >>> import transformers @@ -304,6 +311,7 @@ def __init__( compute_reward: bool = True, collate_fn: Callable | None = None, max_steps: int = 1, + input_mode: Literal["history", "text", "tokens"] = "history", ): if collate_fn is None: collate_fn = _collate_fn @@ -321,6 +329,7 @@ def __init__( template_kwargs=template_kwargs, apply_template=apply_template, collate_fn=collate_fn, + input_mode=input_mode, ) if max_steps: self.append_transform(StepCounter(max_steps=max_steps)) diff --git a/torchrl/envs/llm/datasets/ifeval.py b/torchrl/envs/llm/datasets/ifeval.py index 4c3e7e8866e..856189644b7 100644 --- a/torchrl/envs/llm/datasets/ifeval.py +++ b/torchrl/envs/llm/datasets/ifeval.py @@ -4,14 +4,13 @@ # LICENSE file in the root directory of this source tree. from __future__ import annotations -from typing import Any, Callable +from typing import Any, Callable, Literal import torch -from tensordict import TensorClass, TensorDict +from tensordict import NonTensorData, NonTensorStack, TensorClass, TensorDict +from torchrl.data import Composite, NonTensor, Unbounded from torchrl.envs import StepCounter - from torchrl.envs.llm.chat import DatasetChatEnv - from torchrl.envs.llm.reward.ifeval import IfEvalScorer @@ -19,9 +18,10 @@ class IFEvalData(TensorClass["nocast"]): """A tensorclass for IFEval dta.""" key: torch.Tensor - instruction_id_list: str + instruction_id_list: list[str] kwargs: list[dict] - text: str + query: str + # Reponses and additional fields response: str | None = None tokens: torch.Tensor | None = None @@ -29,11 +29,63 @@ class IFEvalData(TensorClass["nocast"]): logits: torch.Tensor | None = None reward: torch.Tensor | None = None + @classmethod + def default_spec( + cls, shape: torch.Size, device: torch.device | None = None + ) -> Composite: + return Composite( + key=Unbounded(shape=shape, dtype=torch.int64, device=device), + instruction_id_list=NonTensor( + shape=shape, + device=device, + feature_dims=0, + example_data=["punctuation:no_comma"], + ), + kwargs=NonTensor( + shape=shape, + device=device, + feature_dims=0, + example_data={ + "num_highlights": None, + "relation": None, + "num_placeholders": None, + }, + ), + query=NonTensor( + shape=shape, + device=device, + example_data="Plan a 2 week Europe trip and visit London, Paris, and Rome. Answer in all caps. The response must contain at least 8 placeholders (i.e., [restaurant]).", + ), + shape=shape, + step_mdp_static=True, + data_cls=cls, + ) + def _collate_fn(batch): batch = torch.stack([TensorDict.from_any(_batch) for _batch in batch]) - batch.rename_key_("prompt", "text") - return IFEvalData.from_tensordict(batch) + batch.rename_key_("prompt", "query") + # we want instruction_id_list and kwargs to be lists, but not NonTensorStacks + instruction_id_list = batch["instruction_id_list"] + # instruction_id_list should be a list of lists + instruction_id_list = NonTensorStack( + *[ + NonTensorData([item] if not isinstance(item, list) else item) + for item in instruction_id_list + ] + ) + kwargs = batch["kwargs"] + kwargs = NonTensorStack( + *[ + NonTensorData([item] if not isinstance(item, list) else item) + for item in kwargs + ] + ) + batch.set("instruction_id_list", instruction_id_list) + batch.set("kwargs", kwargs) + # we don't need a tensorclass here + return batch + # return IFEvalData.from_tensordict(batch) class IFEvalEnv(DatasetChatEnv): @@ -60,6 +112,7 @@ class IFEvalEnv(DatasetChatEnv): collate_fn (Callable | None, optional): A custom collate function for data loading. If `None`, a default collate function is used. Defaults to `None`. max_steps (int, optional): The maximum number of steps allowed in an episode. Defaults to `1`. + input_mode (Literal["history", "text", "tokens"], optional): The mode of input to use. Defaults to `"history"`. Examples: >>> import transformers @@ -160,6 +213,7 @@ def __init__( compute_reward: bool = True, collate_fn: Callable | None = None, max_steps: int = 1, + input_mode: Literal["history", "text", "tokens"] = "history", ): if collate_fn is None: collate_fn = _collate_fn @@ -176,6 +230,9 @@ def __init__( template_kwargs=template_kwargs, apply_template=apply_template, collate_fn=collate_fn, + input_mode=input_mode, + data_key="query", + primers=IFEvalData.default_spec((num_envs,), device), ) if max_steps: self.append_transform(StepCounter(max_steps=max_steps)) diff --git a/torchrl/envs/llm/reward/gsm8k.py b/torchrl/envs/llm/reward/gsm8k.py index 2edbc001d8d..041bc1424a1 100644 --- a/torchrl/envs/llm/reward/gsm8k.py +++ b/torchrl/envs/llm/reward/gsm8k.py @@ -4,24 +4,33 @@ # LICENSE file in the root directory of this source tree. from __future__ import annotations -import torch -from tensordict import NestedKey, TensorDict, TensorDictBase -from tensordict.utils import _zip_strict +from typing import Literal +import torch +from tensordict import lazy_stack, NestedKey, TensorDict, TensorDictBase +from tensordict.utils import _zip_strict, is_non_tensor from torchrl.data import Composite, Unbounded from torchrl.envs import Transform +from torchrl.envs.common import EnvBase class GSM8KRewardParser(Transform): """Reward parser for GSM8KEnv or make_gsm8k_env. + This parser automatically detects the input_mode from the parent environment and handles + responses accordingly: + - "history" mode: response is in ("history", "response") and is a History object + - "text" mode: response is in ("text", "response") and is text + - "tokens" mode: response is in ("tokens", "response") and is tokens + Args: - tokenizer (AutoTokenizer from transformers): the tokenizer asssociated with the model. - in_keys (list of NestedKey): the input keys. Defaults to `["text_response", "answer"]`. + tokenizer (AutoTokenizer from transformers): the tokenizer associated with the model. + in_keys (list of NestedKey): the input keys. If None, will be automatically determined based on parent's input_mode. out_keys (list of NestedKey): the output keys. Defaults to `[ "reward_answer", "reward_think", "reward_right", "reward_contained", "reward", "success"]`. eos_token (str): the end of sentence token. Defaults to `tokenizer.eos_token` if not provided. set_done_if_answer (bool): whether to set the done flag to `True` when an answer is present. Defaults to `True`. - + input_mode (Literal["history", "text", "tokens"]): the input mode of the parent environment. + Defaults to `None` (will be automatically determined based on parent's input_mode). """ def __init__( @@ -31,6 +40,7 @@ def __init__( out_keys: list[NestedKey] | None = None, eos_token: str | None = None, set_done_if_answer: bool = True, + input_mode: Literal["history", "text", "tokens"] | None = None, ): super().__init__() self.tokenizer = tokenizer @@ -42,12 +52,8 @@ def __init__( else None ) self.set_done_if_answer = set_done_if_answer - if in_keys is None: - in_keys = ["text_response", "answer"] - if not isinstance(in_keys, list) or len(in_keys) != 2: - raise ValueError( - f"{type(self).__name__} requires in_keys to be of type list and have 2 elements." - ) + self._input_mode = input_mode + if out_keys is None: out_keys = [ "reward_answer", @@ -57,7 +63,42 @@ def __init__( "reward", "success", ] - super().__init__(in_keys, out_keys) + super().__init__() + if in_keys is not None: + self.in_keys = in_keys + self.out_keys = out_keys + + def _maybe_get_in_keys(self): + if not self.in_keys: + parent = getattr(self, "parent", None) + if parent is not None: + if getattr(parent, "base_env", None) is not None: + if getattr(parent.base_env, "input_mode", None) == "history": + self.in_keys = [("history", "full"), "answer"] + elif getattr(parent.base_env, "input_mode", None) == "text": + self.in_keys = [("text", "full"), "answer"] + elif getattr(parent.base_env, "input_mode", None) == "tokens": + self.in_keys = [("tokens", "full"), "answer"] + else: + raise ValueError(f"No base env found for {self}") + + def set_container(self, container: Transform | EnvBase) -> None: + result = super().set_container(container) + self._maybe_get_in_keys() + return result + + _input_mode = None + + @property + def input_mode(self): + if self._input_mode is None: + input_mode = ( + getattr(self.parent, "input_mode", "history") + if hasattr(self, "parent") and self.parent is not None + else "history" + ) + self._input_mode = input_mode + return self._input_mode def _step( self, tensordict: TensorDictBase, next_tensordict: TensorDictBase @@ -72,26 +113,72 @@ def _step( # did update in place return next_tensordict - # Get the completion + # Get the completion based on input_mode + self._maybe_get_in_keys() responses = tensordict[self.in_keys[0]] # batch_size, grpo_size, L - if isinstance(responses, str): - responses = [responses for _ in range(next_tensordict.batch_size[0])] + # Handle different response types based on input_mode + input_mode = self.input_mode + if input_mode == "history": + # responses is a History object, extract the text content + responses = lazy_stack([r[..., -1] for r in responses.unbind(0)]) + if hasattr(responses, "content"): + # If it's a History object with content attribute + text_completion = responses.content + if is_non_tensor(text_completion): + text_completion = text_completion.tolist() + if not isinstance(text_completion, list): + text_completion = [text_completion] + elif hasattr(responses, "apply_chat_template"): + # If it's a History object, apply chat template to get text + text_completion = responses.apply_chat_template( + tokenizer=self.tokenizer, add_generation_prompt=False + ) + if not isinstance(text_completion, list): + text_completion = [text_completion] + else: + # Fallback: try to convert to string + text_completion = [str(responses)] + elif input_mode == "text": + # responses is already text + if isinstance(responses, str): + text_completion = [ + responses for _ in range(next_tensordict.batch_size[0]) + ] + elif not isinstance(responses, list): + text_completion = [responses] + else: + text_completion = responses + elif input_mode == "tokens": + # responses is tokens, need to decode + if isinstance(responses, torch.Tensor): + if responses.ndim == 3: + batch_size, grpo_size, _ = responses.shape + # decode + text_completion = self.tokenizer.decode( + responses.flatten(0, 1).tolist() + ) + if not isinstance(text_completion, list): + text_completion = [ + text_completion for _ in range(next_tensordict.batch_size[0]) + ] + else: + # Assume it's already a list of token sequences + text_completion = [] + for token_seq in responses: + if isinstance(token_seq, torch.Tensor): + text_completion.append( + self.tokenizer.decode(token_seq.tolist()) + ) + else: + text_completion.append(str(token_seq)) + else: + raise ValueError(f"Unknown input_mode: {input_mode}") if self.eos_token is not None: - responses = [r.removesuffix(self.eos_token) for r in responses] + text_completion = [r.removesuffix(self.eos_token) for r in text_completion] answers = next_tensordict[self.in_keys[1]] # batch_size, grpo_size - if isinstance(responses, torch.Tensor): - if responses.ndim == 3: - batch_size, grpo_size, _ = responses.shape - # decode - text_completion = self.tokenizer.decode(responses.flatten(0, 1).tolist()) - else: - text_completion = responses - if not isinstance(text_completion, list): - text_completion = [ - text_completion for _ in range(next_tensordict.batch_size[0]) - ] + # Decomposed reward tds = [] # torchrl_logger.info(f"{answers=}") @@ -114,10 +201,13 @@ def _step( # With tensorclass comparison should be easy cot_orig, answer = answer.split("#### ") tds.append( - self._single_shaped_correctness_reward(answer, potential_answer, cot) + self._single_shaped_correctness_reward( + answer, [potential_answer], [cot] + ) ) tds = torch.stack(tds) if isinstance(responses, torch.Tensor) and responses.ndim == 3: + batch_size, grpo_size, _ = responses.shape tds = tds.reshape(batch_size, grpo_size) # Rewards need to have shape broadcastable to [batch x tokens x 1] tds = tds.apply(lambda t: t.unsqueeze(-1).unsqueeze(-1)) @@ -220,7 +310,13 @@ def extract_tags(text: str) -> tuple[str, str]: except ET.ParseError: return ("", "") + think_elem = root.find("think") + answer_elem = root.find("answer") return ( - root.find("think").text if root.find("think") is not None else "", - root.find("answer").text if root.find("answer") is not None else "", + think_elem.text + if think_elem is not None and think_elem.text is not None + else "", + answer_elem.text + if answer_elem is not None and answer_elem.text is not None + else "", ) diff --git a/torchrl/envs/llm/reward/ifeval/_instructions_main.py b/torchrl/envs/llm/reward/ifeval/_instructions_main.py index d891ae823f8..6ce14ea3175 100644 --- a/torchrl/envs/llm/reward/ifeval/_instructions_main.py +++ b/torchrl/envs/llm/reward/ifeval/_instructions_main.py @@ -40,6 +40,10 @@ def _test_instruction_following_strict( ): """Tests response to see if instructions are followed.""" instruction_list = inp.instruction_id_list + if not isinstance(instruction_list, list): + raise ValueError( + f"instruction_list must be a list, got {type(instruction_list)}, {instruction_list=}" + ) is_following_list = [] for index, instruction_id in enumerate(instruction_list): diff --git a/torchrl/envs/llm/reward/ifeval/_scorer.py b/torchrl/envs/llm/reward/ifeval/_scorer.py index 0ebef1a76c7..f0a9a3a2467 100644 --- a/torchrl/envs/llm/reward/ifeval/_scorer.py +++ b/torchrl/envs/llm/reward/ifeval/_scorer.py @@ -20,7 +20,14 @@ from typing import Callable import torch -from tensordict import NestedKey, NonTensorData, TensorClass, TensorDict, TensorDictBase +from tensordict import ( + lazy_stack, + NestedKey, + NonTensorData, + TensorClass, + TensorDict, + TensorDictBase, +) from tensordict.tensorclass import is_non_tensor from torchrl._utils import logger as torchrl_logger @@ -40,6 +47,27 @@ class IFEvalScoreData(TensorClass): prompt_level_loose_acc: torch.Tensor | None inst_level_loose_acc: torch.Tensor | None + @classmethod + def default_spec( + cls, shape: torch.Size, device: torch.device | None = None + ) -> Composite: + return Composite( + prompt_level_strict_acc=Unbounded( + shape=shape, dtype=torch.bool, device=device + ), + inst_level_strict_acc=Unbounded( + shape=shape, dtype=torch.bool, device=device + ), + prompt_level_loose_acc=Unbounded( + shape=shape, dtype=torch.bool, device=device + ), + inst_level_loose_acc=Unbounded( + shape=shape, dtype=torch.bool, device=device + ), + data_cls=cls, + step_mdp_static=True, + ) + def __post_init__(self): prompt_level_loose_acc = self.get( "prompt_level_loose_acc", as_padded_tensor=True @@ -72,7 +100,10 @@ def __post_init__(self): def _process_results( - data: TensorDict, response: str | NonTensorData, verbose: bool = False + data: TensorDict, + response: str | NonTensorData, + verbose: bool = False, + prompt: str | None = None, ) -> IFEvalScoreData: if not _has_langdetect: raise ImportError("langdetect must be installed to user IFEvalScorer.") @@ -85,10 +116,13 @@ def _process_results( _test_instruction_following_strict, ) + if prompt is None: + prompt = data["text"] + inp = _InputExample( key=data["key"], instruction_id_list=data["instruction_id_list"], - prompt=data["text"], + prompt=prompt, kwargs=data["kwargs"], ) @@ -136,6 +170,7 @@ class IfEvalScorer(Transform): `prompt_level_loose_acc`, `inst_level_loose_acc`, in that order). Defaults to `[0.4, 0.3, 0.2, 0.1]`. This is only used if `aggregate_reward` is `True` and the default aggregator is used. verbose (bool, optional): Whether to print verbose information. Defaults to `False`. + set_done_if_answer (bool): whether to set the done flag to `True` when an answer is present. Defaults to `True`. .. note:: `IFEvalScorer` requires the following libraries to be installed: `langdetect`, `nltk` and `immutabledict`. @@ -156,9 +191,11 @@ def __init__( ] = True, format_weights: list[float] | None = None, verbose: bool = False, + set_done_if_answer: bool = True, ): self.aggregate_reward = aggregate_reward self.score_key = score_key + self.set_done_if_answer = set_done_if_answer out_keys = [self.score_key] if aggregate_reward: out_keys.append("reward") @@ -281,8 +318,11 @@ def default_reward_aggregator( def _step( self, tensordict: TensorDictBase, next_tensordict: TensorDictBase ) -> TensorDictBase: + if not getattr(self.parent.base_env, "input_mode", "history") == "history": + raise ValueError("IFEvalScorer only supports history input mode") + if tensordict.ndim: - return torch.stack( + return lazy_stack( [ self._step(td, next_td) for td, next_td in zip( @@ -290,7 +330,8 @@ def _step( ) ] ) - h = next_tensordict["history"][..., -1] + h = tensordict["history", "full"][..., -1] + prompt = tensordict["history", "prompt"][..., -1].content response = h.content complete = h.is_complete # response = tensordict.get(self.response_key) @@ -311,6 +352,7 @@ def _step( tensordict.copy().auto_device_(), answer_blocks[0] if answer_blocks else "", verbose=self.verbose, + prompt=prompt, ) next_tensordict.set( self.score_key, @@ -327,8 +369,31 @@ def _step( answer_blocks=answer_blocks, complete=complete, ) + reward = reward.view( + next_tensordict.batch_size + + ( + 1, + 1, + ) + ) next_tensordict.set("reward", reward) - + if self.set_done_if_answer and bool(answer_blocks): + next_tensordict.set( + "done", + torch.ones( + next_tensordict.batch_size + (1,), + device=next_tensordict.device, + dtype=torch.bool, + ), + ) + next_tensordict.set( + "terminated", + torch.ones( + next_tensordict.batch_size + (1,), + device=next_tensordict.device, + dtype=torch.bool, + ), + ) return next_tensordict @property @@ -343,23 +408,14 @@ def expected_keys(self) -> list[str]: def transform_reward_spec(self, reward_spec: Composite) -> Composite: reward_spec["reward"] = Unbounded( - reward_spec.shape + (1,), dtype=torch.get_default_dtype() + reward_spec.shape + (1, 1), + dtype=torch.get_default_dtype(), + device=reward_spec.device, ) return reward_spec def transform_observation_spec(self, observation_spec: Composite) -> Composite: - observation_spec[self.score_key] = Composite( - prompt_level_strict_acc=Unbounded( - shape=observation_spec.shape, dtype=torch.bool - ), - inst_level_strict_acc=Unbounded( - shape=observation_spec.shape, dtype=torch.bool - ), - prompt_level_loose_acc=Unbounded( - shape=observation_spec.shape, dtype=torch.bool - ), - inst_level_loose_acc=Unbounded( - shape=observation_spec.shape, dtype=torch.bool - ), + observation_spec[self.score_key] = IFEvalScoreData.default_spec( + observation_spec.shape, device=observation_spec.device ) return observation_spec diff --git a/torchrl/envs/llm/transforms/__init__.py b/torchrl/envs/llm/transforms/__init__.py index 7502ba5f131..6e28b1ac18e 100644 --- a/torchrl/envs/llm/transforms/__init__.py +++ b/torchrl/envs/llm/transforms/__init__.py @@ -6,7 +6,7 @@ from .browser import BrowserTransform from .dataloading import as_nested_tensor, as_padded_tensor, DataLoadingPrimer from .format import TemplateTransform -from .kl import KLRewardTransform, RetrieveLogProb +from .kl import KLComputation, KLRewardTransform, RetrieveKL, RetrieveLogProb from .policy_version import PolicyVersion from .reason import AddThinkingPrompt from .tokenizer import Tokenizer @@ -17,10 +17,12 @@ "DataLoadingPrimer", "KLRewardTransform", "RetrieveLogProb", + "RetrieveKL", "MCPToolTransform", "PolicyVersion", "PythonInterpreter", "AddThinkingPrompt", + "KLComputation", "TemplateTransform", "Tokenizer", "as_nested_tensor", diff --git a/torchrl/envs/llm/transforms/dataloading.py b/torchrl/envs/llm/transforms/dataloading.py index 0d09dc4c992..770948f1e14 100644 --- a/torchrl/envs/llm/transforms/dataloading.py +++ b/torchrl/envs/llm/transforms/dataloading.py @@ -464,24 +464,43 @@ def _endless_iter(self, obj): while True: yield from obj + _device: torch.device | None = None + + @property + def device(self) -> torch.device | None: + if self._device is None: + primers = getattr(self, "primers", None) + if primers is not None: + device = self.primers.device + else: + parent = getattr(self, "parent", None) + if parent is not None: + device = getattr(parent, "device", None) + else: + device = None + self._device = device + return self._device + + @device.setter + def device(self, device: torch.device | None): + self._device = device + def _load_from_dataloader(self, reset: torch.Tensor | None = None): """Loads a single element from the dataloader, or alternatively from the buffer. If `reset` is passed, then one element per reset will be loaded. """ + device = self.device + if reset is not None: if not reset.any(): raise RuntimeError("reset must have at least one True value.") if reset.ndim > 0: - loaded = [self._load_from_dataloader() for _ in range(reset.sum())] + loaded = [ + self._load_from_dataloader().to(device) for _ in range(reset.sum()) + ] return self.stack_method(loaded) - primers = getattr(self, "primers", None) - if primers is not None: - device = self.primers.device - else: - device = None - if len(self._queue) > 0: result = self._queue.popleft() if result.device != device: diff --git a/torchrl/envs/llm/transforms/kl.py b/torchrl/envs/llm/transforms/kl.py index a8a13798d0f..3123c76f362 100644 --- a/torchrl/envs/llm/transforms/kl.py +++ b/torchrl/envs/llm/transforms/kl.py @@ -4,20 +4,21 @@ # LICENSE file in the root directory of this source tree. from __future__ import annotations -import contextlib -import gc +import warnings +from contextlib import nullcontext from copy import copy +from typing import Any, Literal import torch from tensordict import NestedKey, set_list_to_stack, TensorDictBase, unravel_key -from tensordict.nn import ProbabilisticTensorDictModule -from tensordict.utils import _zip_strict, is_seq_of_nested_key +from tensordict.utils import _zip_strict, is_seq_of_nested_key, logger as torchrl_logger +from torch.nn.utils.rnn import pad_sequence from torchrl.data import Composite, Unbounded -from torchrl.data.llm.chat import History from torchrl.envs import EnvBase, Transform +from torchrl.envs.transforms.transforms import Compose from torchrl.envs.transforms.utils import _set_missing_tolerance -from torchrl.modules.llm.policies.common import CategoricalSequential +from torchrl.modules.llm.policies.common import LLMWrapperBase try: import transformers @@ -26,50 +27,65 @@ class KLRewardTransform(Transform): - """A transform to add a KL[pi_current||pi_0] correction term to the reward. + """A legacy transform for computing KL divergence-based rewards. - This transform is used to constrain the policy to remain close to its original - configuration which limits overfitting when fine-tuning using RLHF. + **Deprecated**: This transform is maintained for backward compatibility but is no longer + the recommended approach. Use :class:`~torchrl.envs.llm.transforms.kl.RetrieveKL` instead, + which provides better modularity and integration with the new wrapper design. + + **Recent Changes:** + - **Legacy Status**: This transform is now considered legacy and may not work optimally + with the new modular wrapper design. + - **ChatHistory Integration**: Limited support for the new :class:`~torchrl.modules.llm.policies.ChatHistory` objects. + - **Input Mode Support**: May not handle all input modes (`"history"`, `"text"`, `"tokens"`) consistently. + + **Recommendation**: + Use :class:`~torchrl.envs.llm.transforms.kl.RetrieveKL` for new code, which provides: + - Better integration with the new wrapper design + - Consistent support for all input modes + - Proper handling of ChatHistory objects + - More modular and composable architecture Args: - actor (ProbabilisticTensorDictModule): a frozen probabilistic actor. It must - have the following features: it must have a set of input (``in_keys``) - and output keys (``out_keys``). It must have a ``get_dist`` method - that outputs the distribution of the action. - coef (:obj:`float`): the coefficient of the KL term. Defaults to ``1.0``. - in_keys (str or list of str/tuples of str): the input key where the - reward should be fetched. Defaults to ``"reward"``. - out_keys (str or list of str/tuples of str): the output key where the - reward should be written. Defaults to ``["reward", "kl_penalty", "ref_log_prob"]``. - add_to_reward (bool): whether to add the reward term to the reward. - Defaults to ``True``. - - .. note:: If the parameters are not differentiable (default), they will *not* - follow the module when dtype or device casting operations will be called - (such as :meth:`cuda`, :meth:`to` etc.). When ``requires_grad=True``, - casting operations will work as expected. + gen_model (LLMWrapperBase): the generation model. + ref_model (LLMWrapperBase): the reference model. - Examples: - TODO + Keyword Args: + assistant_only (bool): whether to only compute KL on assistant tokens. Defaults to `True`. + tokenizer (transformers.AutoTokenizer): the tokenizer to use. Defaults to `None`. + detach (bool): whether to detach the KL from the computation graph. Defaults to `True`. + device (torch.device): the device to use. Defaults to `None`. + padding_side (str): the side of the padding when using pad_sequence. Defaults to `"left"`. - .. note:: Because the KL formula is not always available and the parameters of the - original distribution may not have been recorded, we use a stochastic estimate - of the KL divergence. + Examples: + >>> # Legacy usage (not recommended for new code) + >>> transform = KLRewardTransform(gen_model, ref_model) + >>> + >>> # Recommended approach using RetrieveKL + >>> from torchrl.envs.llm.transforms.kl import RetrieveKL + >>> transform = RetrieveKL(gen_model, ref_model, assistant_only=True) + .. seealso:: + :class:`~torchrl.envs.llm.transforms.kl.RetrieveKL`: The recommended transform for KL divergence computation. + :class:`~torchrl.envs.llm.transforms.kl.RetrieveLogProb`: Base transform for retrieving log-probabilities. + :class:`~torchrl.envs.llm.transforms.kl.KLComputation`: Transform for computing KL divergence between log-prob tensors. """ DEFAULT_IN_KEYS = ["reward"] def __init__( self, - actor: ProbabilisticTensorDictModule, + ref_model: LLMWrapperBase, + *, coef=1.0, in_keys=None, out_keys=None, - log_prob_key: NestedKey = "log_probs", - action_key: NestedKey | None = None, + log_prob_key: NestedKey = ("log_probs", "full"), device: torch.device | None = None, add_to_reward: bool = True, + tokenizer: transformers.AutoTokenizer | None = None, + assistant_only: bool = True, + padding_side: str = "left", ): if in_keys is None: in_keys = self.DEFAULT_IN_KEYS @@ -94,31 +110,55 @@ def __init__( ) self._out_keys = [unravel_key(out_key) for out_key in self._out_keys] + if getattr(ref_model, "generate", False): + raise ValueError( + "The actor is configured to generate text, not compute the log-probs." + ) + # update the in_keys for dispatch etc - self.in_keys = self.in_keys + actor.in_keys + self.in_keys = self.in_keys + ref_model.in_keys self.in_keys = [unravel_key(in_key) for in_key in self.in_keys] self.add_to_reward = add_to_reward # check that the model has parameters - self.__dict__["actor"] = actor + self.__dict__["ref_model"] = ref_model # self._buffers["actor_params"] = params.clone().detach() self.device = device - self.action_key = action_key # find the sample log-prob key - self.sample_log_prob_key = log_prob_key - - def find_sample_log_prob(module): - if hasattr(module, "log_prob_key"): - self.sample_log_prob_key = module.log_prob_key + self.log_prob_full_key = log_prob_key - self.actor.apply(find_sample_log_prob) + self._tokenizer = tokenizer + self.assistant_only = assistant_only + self.padding_side = padding_side if not isinstance(coef, torch.Tensor): coef = torch.as_tensor(coef) self.register_buffer("coef", coef) + # sanity check for the ref_model + if not getattr(ref_model, "input_mode", "tokens") == "tokens": + raise ValueError( + "The ref_model must be configured to use tokens as input. Please set the `input_mode` argument to `tokens`." + ) + + @property + def pad_output(self): + # We need pad_output to match the pad_output of the inference model + return self.ref_model.pad_output + + @property + def tokenizer(self): + tokenizer = self._tokenizer + if tokenizer is not None: + return tokenizer + try: + return self.ref_model.tokenizer + except AttributeError: + raise AttributeError( + "The ref_model does not have a tokenizer. Please pass the tokenizer to the constructor." + ) def set_container(self, container: Transform | EnvBase) -> None: result = super().set_container(container) @@ -141,54 +181,127 @@ def _reset( tensordict_reset = self._step(tensordict_reset, tensordict_reset) return tensordict_reset + @property + def action_key(self) -> NestedKey: + # Get the action from the base env (a ChatEnv). + if self.parent.base_env.input_mode == "history": + return ("history", "full") + if self.parent.base_env.input_mode == "text": + return ("text", "full") + if self.parent.base_env.input_mode == "tokens": + return ("tokens", "full") + raise ValueError(f"Invalid input mode: {self.parent.base_env.input_mode}") + def _step( self, tensordict: TensorDictBase, next_tensordict: TensorDictBase ) -> TensorDictBase: - # run the actor on the tensordict - action_key = self.action_key - if action_key is None: - raise ValueError( - f"action_key is required. Please set a parent for the {type(self).__name__} to recover the action keys automatically, " - f"or pass the action_key argument directly to {type(self).__name__} constructor." - ) - response_txt = tensordict.get(action_key, None) - if response_txt is None: + if self.device is not None: + tensordict = tensordict.to(self.device) + next_tensordict = next_tensordict.to(self.device) + # tensordict = self._get_text_response(tensordict, next_tensordict) + response = tensordict.get(self.action_key, None) + if response is None: if not self.missing_tolerance: raise RuntimeError( - f"Action with key {action_key} not found data {tensordict}" + f"Action with key {self.action_key} not found data {tensordict}" ) # being called after reset or without action, skipping if self.out_keys[0] != "reward" and self.parent is not None: next_tensordict.set(self.out_keys[0], self.parent.reward_spec.zero()) return next_tensordict - if hasattr(self.actor, "log_prob"): - if self.device is not None and tensordict.device != self.device: - td_device = tensordict.to(self.device) - else: - td_device = tensordict.copy() - ref_log_prob = self.actor.log_prob( - td_device, as_nested_tensor=True, layout=torch.strided + + # We use the ("tokens", "full") key to get the log-probs of the reference model + with torch.device(self.device) if self.device is not None else nullcontext(): + td_input = tensordict.copy() + ref_log_prob_td = self.ref_model(td_input) + if self.pad_output: + ref_log_prob_padded = ref_log_prob_td.get(self.log_prob_full_key) + else: + ref_log_prob_unpadded = ref_log_prob_td.get( + self.log_prob_full_key, as_list=True ) + if self.assistant_only: + # Get the assistant mask + mask = tensordict.get(("masks", "all_assistant_mask")) + # mask will often be None - fall back on prompt / response separation + if mask is None: + if self.pad_output: + # simple case: just take the prompt length + prompt_length = tensordict.get(("tokens", "prompt")).shape[-1] + mask = tensordict.get(("masks", "all_attention_mask")).clone() + mask[..., :prompt_length] = False + else: + # simple case: just take the prompt length + prompt_length = [ + t.size(-1) + for t in tensordict.get(("tokens", "prompt"), as_list=True) + ] + mask = tensordict.get(("masks", "all_attention_mask"), as_list=True) + for i in range(len(prompt_length)): + mask[i] = mask[i].clone() + mask[i][..., : prompt_length[i]] = False + + # we want to keep the batch dimension + ref_log_prob_list = [] + if self.pad_output: + for i in range(ref_log_prob_padded.size(0)): + ref_log_prob_list.append( + ref_log_prob_padded[i].masked_fill(~mask[i], 0) + ) + else: + for i in range(len(ref_log_prob_unpadded)): + ref_log_prob_list.append( + ref_log_prob_unpadded[i].masked_fill(~mask[i], 0) + ) + if self.pad_output: + ref_log_prob = pad_sequence( + ref_log_prob_list, + batch_first=True, + padding_value=0, + padding_side=self.padding_side, + ) + else: + ref_log_prob = torch.nested.nested_tensor( + ref_log_prob_list, layout=torch.strided + ) + + # we obtain the current log-probs (already computed) from the current tensordict + if self.pad_output: + curr_log_prob_padded = tensordict.get(self.log_prob_full_key) else: - ref_log_prob_td = self.actor(tensordict) - ref_log_prob = ref_log_prob_td.get(self.sample_log_prob_key) + curr_log_prob_unpadded = tensordict.get( + self.log_prob_full_key, as_list=True + ) + if self.assistant_only: + # we want to keep the batch dimension + curr_log_prob_list = [] + if self.pad_output: + for i in range(curr_log_prob_padded.size(0)): + curr_log_prob_list.append( + curr_log_prob_padded[i].masked_fill(~mask[i], 0) + ) + else: + for i in range(len(curr_log_prob_unpadded)): + curr_log_prob_list.append( + curr_log_prob_unpadded[i].masked_fill(~mask[i], 0) + ) + if self.pad_output: + curr_log_prob = pad_sequence( + curr_log_prob_list, + batch_first=True, + padding_value=0, + padding_side=self.padding_side, + ) + else: + curr_log_prob = torch.nested.nested_tensor( + curr_log_prob_list, layout=torch.strided + ) - reward_key = self.in_keys[0] - reward = next_tensordict.get(reward_key) - curr_log_prob = tensordict.get( - self.sample_log_prob_key, as_nested_tensor=True, layout=torch.strided - ) ref_log_prob = ref_log_prob.to(curr_log_prob.device) # We want the log-probs to have a similar dim to the reward curr_log_prob = curr_log_prob.unsqueeze(-1) ref_log_prob = ref_log_prob.unsqueeze(-1) - # we use the unbiased consistent estimator of the KL: log_p(x) - log_q(x) when x ~ p(x) - if not reward.is_nested and ref_log_prob.is_nested: - reward = torch.nested.nested_tensor( - [rew.expand(lp.shape) for rew, lp in zip(reward, ref_log_prob)], - layout=torch.strided, - ) for i in range(ref_log_prob.size(0)): if ref_log_prob[i].shape != curr_log_prob[i].shape: # Don't check shapes if nested @@ -197,16 +310,25 @@ def _step( f"One possible reason is that the padding token is identical to the eos token, which means that the eos_token log_prob is truncated from the " f"reference model output." ) - if reward is not None and reward.ndim != curr_log_prob.ndim: - raise ValueError( - "The number of dimensions of reward must be the same as the number of dimensions of the KL " - f"term. Got ndim={reward.ndim} and {curr_log_prob.ndim} respectively." - ) kl = curr_log_prob - ref_log_prob if self.add_to_reward: + reward_key = self.in_keys[0] + reward = next_tensordict.get(reward_key) + # we use the unbiased consistent estimator of the KL: log_p(x) - log_q(x) when x ~ p(x) + if not reward.is_nested and ref_log_prob.is_nested: + reward = torch.nested.nested_tensor( + [rew.expand(lp.shape) for rew, lp in zip(reward, ref_log_prob)], + layout=torch.strided, + ) + if reward is not None and reward.ndim != curr_log_prob.ndim: + raise ValueError( + "The number of dimensions of reward must be the same as the number of dimensions of the KL " + f"term. Got ndim={reward.ndim} and {curr_log_prob.ndim} respectively." + ) if reward is None: reward = 0 - next_tensordict.set(self.out_keys[0], reward - self.coef * kl) + reward = reward - self.coef * kl + next_tensordict.set(self.out_keys[0], reward) next_tensordict.set(self.out_keys[1], kl) next_tensordict.set(self.out_keys[2], ref_log_prob) return next_tensordict @@ -282,36 +404,44 @@ def transform_output_spec(self, output_spec: Composite) -> Composite: class RetrieveLogProb(Transform): - """A transform to retrieve the log-probs of a text given a reference model. + """A transform to retrieve log-probabilities from a model for KL divergence computation. + + This transform computes log-probabilities from a reference model, which can then be used + to compute KL divergence with another model's log-probabilities. It's designed to work + with the :class:`~torchrl.envs.llm.transforms.kl.RetrieveKL` and :class:`~torchrl.envs.llm.transforms.kl.KLComputation` transforms. Args: - actor (CategoricalSequential): the reference model. + model (LLMWrapperBase): the model to use to compute the log-probs. Keyword Args: - history_key (NestedKey): the key where the history is stored. Defaults to `"history"`. - log_prob_key (NestedKey): the key where the log-probs are stored. Defaults to `"ref_log_prob"`. + log_probs_full_key (NestedKey): the key where the log-probs are stored. + If not provided, the key will be retrieved from the model's `log_probs_key` attribute + (i.e., `(model.log_probs_key, "full")`). assistant_only (bool): whether to only retrieve the log-probs of the assistant tokens (i.e., steps of history - where the role is `"assistant"`). Defaults to `False`. + where the role is `"assistant"`). Defaults to `True`. - .. note:: The template must accommodate the `return_assistant_tokens_mask` keyword argument. - This may not be the case for all templates. In this case, you can pass a custom template to the `apply_chat_template` method - via the `tokenizer_kwargs` argument: `tokenizer_kwargs = {"chat_template_name": "qwen"}` or `tokenizer_kwargs = {"chat_template": my_template}. + .. note:: When `assistant_only=True`, the model must have `input_mode='history'` to properly identify + assistant tokens. For other input modes (`"text"` or `"tokens"`), set `assistant_only=False`. + This ensures users are conscious of the limitation that assistant token identification requires + structured conversation history. tokenizer_kwargs (dict): the keyword arguments to pass to the tokenizer to be used to apply the chat template to the history when `assistant_only` is `True`. - To control the tokenization in the actor, pass the tokenizer kwargs to the actor constructor. - Defaults to `{"return_assistant_tokens_mask": True, "tokenize": True, "return_tensors": "pt", "padding": True, "add_generation_prompt": False}`. - tokenizer (transformers.AutoTokenizer): the tokenizer to be used to tokenize the input and compute the assitant mask. If not provided, the tokenizer will be inferred from the `actor`. + To control the tokenization in the ref_model, pass the tokenizer kwargs to the ref_model constructor. + Defaults to `{"return_assistant_tokens_mask": True, "tokenize": True, "return_dict": True, "padding": False, "add_generation_prompt": False}`. + tokenizer (transformers.AutoTokenizer): the tokenizer to be used to tokenize the input and compute the assitant mask. If not provided, the tokenizer will be inferred from the `ref_model`. detach (bool): whether to exclude the log-probs from the gradient computation. Defaults to `True`. device (torch.device): the device to use for tensor creation. Defaults to `None`. + padding_side (str): the side of the padding when using pad_sequence. Defaults to `"left"`. Examples: - >>> from torchrl.data.llm.chat import History, _CHAT_TEMPLATES + >>> from torchrl.data.llm import History >>> from torchrl.modules.llm import TransformersWrapper - >>> from torchrl.objectives.llm.sft import SFTLoss + >>> from torchrl.modules.llm.policies import ChatHistory >>> from transformers import AutoTokenizer, OPTConfig, OPTForCausalLM - >>> from tensordict import TensorDict, lazy_stack, set_list_to_stack + >>> from tensordict import TensorDict, set_list_to_stack >>> import torch >>> + >>> # Set up list to stack for History >>> set_list_to_stack(True).set() >>> >>> # Create chat data @@ -334,174 +464,748 @@ class RetrieveLogProb(Transform): >>> # Setup tokenizer and model >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") >>> tokenizer.pad_token = tokenizer.eos_token - >>> tokenizer.chat_template = _CHAT_TEMPLATES["chatml_format"] >>> model = OPTForCausalLM(OPTConfig()).eval() >>> - >>> # Create training and reference policies - >>> policy_train = TransformersWrapper( - ... model, - ... tokenizer=tokenizer, - ... generate=False, - ... from_text=True, - ... chat_template_name="qwen", - ... ) - >>> policy_ref = TransformersWrapper( + >>> # Create reference model + >>> ref_model = TransformersWrapper( ... model, ... tokenizer=tokenizer, + ... input_mode="history", ... generate=False, - ... from_text=True, ... return_log_probs=True, - ... chat_template_name="qwen", + ... pad_output=True, ... ) >>> >>> # Create the RetrieveLogProb transform >>> transform = RetrieveLogProb( - ... policy_ref, + ... ref_model, ... assistant_only=True, - ... tokenizer_kwargs={"chat_template_name": "qwen"}, ... tokenizer=tokenizer, ... ) >>> - >>> # Prepare data - >>> text = history[:, :-1].apply_chat_template( - ... tokenizer=tokenizer, chat_template_name="qwen", add_generation_prompt=True - ... ) - >>> text_response = history.apply_chat_template( - ... tokenizer=tokenizer, chat_template_name="qwen", add_generation_prompt=False - ... ) - >>> text_response = [ - ... txt[len(txt_start):] for txt, txt_start in zip(text_response, text) - ... ] - >>> td = TensorDict( - ... text=text, - ... text_response=text_response, - ... history=history, - ... next=TensorDict( - ... reward=torch.randn(2, 1), - ... done=torch.zeros(2, dtype=torch.bool), - ... history=history, - ... ), - ... batch_size=(2,), - ... ) - >>> data = lazy_stack(list(td.unbind(0))) + >>> # Prepare data using ChatHistory + >>> chat_history = ChatHistory(full=history) + >>> data = TensorDict(history=chat_history, batch_size=(2,)) >>> >>> # Apply the transform to get reference log probabilities - >>> data = transform(data) - >>> # You can get a padded tensor for batching: - >>> ref_log_probs = data.get(("next", "ref_log_prob"), as_padded_tensor=True) - >>> print(f"Type: {type(ref_log_probs)}, Length: {len(ref_log_probs)}") - Type: , Length: 2 - >>> print(f"Example shapes: {[x.shape for x in ref_log_probs]}") - Example shapes: [torch.Size([35]), torch.Size([35])] - >>> print(ref_log_probs.shape) # (batch, max_seq_len) - torch.Size([2, 35]) - >>> - >>> # Use with SFTLoss for KL regularization - >>> loss = SFTLoss( - ... actor_network=policy_train, - ... tokenizer=tokenizer, - ... reduction="mean", - ... normalize_by_seq_length=True, - ... kl_to_ref_coeff=0.1, - ... tokenizer_kwargs={"chat_template_name": "qwen"}, - ... ) - >>> loss_vals = loss(data) - >>> print(f"SFT Loss: {loss_vals.loss_sft.item():.4f}") - SFT Loss: 10.7856 - >>> print(f"KL to Reference Loss: {loss_vals.loss_kl_to_ref.item():.4f}") - KL to Reference Loss: 0.0000 - >>> print(f"Total Loss: {loss_vals.sum(reduce=True).item():.4f}") - Total Loss: 10.7856 + >>> result = transform(data) + >>> log_probs_key = (ref_model.log_probs_key, "full") + >>> ref_log_probs = result.get(log_probs_key) + >>> print(f"Log-probs shape: {ref_log_probs.shape}") + Log-probs shape: torch.Size([2, 26]) - Note: + .. note:: By default, the log-probabilities are stored as a list of tensors (one per sample, with variable length). Use `as_padded_tensor=True` in `.get()` to obtain a batchable tensor (with padding). The reference log probabilities are computed only for assistant tokens when `assistant_only=True`. + **Input Mode Compatibility:** + - When `assistant_only=True` (default), the model must have `input_mode='history'` to properly identify assistant tokens. + - When `assistant_only=False`, the transform works with any input mode (`"history"`, `"text"`, or `"tokens"`). + - This design ensures users are conscious of the limitation that assistant token identification requires structured conversation history. + + .. seealso:: + :class:`~torchrl.envs.llm.transforms.kl.RetrieveKL`: A higher-level transform that combines two `RetrieveLogProb` instances with `KLComputation`. + :class:`~torchrl.envs.llm.transforms.kl.KLComputation`: A transform that computes KL divergence between two log-prob tensors. + :class:`~torchrl.envs.llm.transforms.kl.KLRewardTransform`: A legacy transform for KL reward computation (use `RetrieveKL` instead). """ def __init__( self, - actor: CategoricalSequential, + model: LLMWrapperBase, *, - history_key: NestedKey | None = None, - log_prob_key: NestedKey = "ref_log_prob", - assistant_only: bool = False, + log_probs_full_key: NestedKey | None = None, + assistant_only: bool = True, tokenizer_kwargs: dict | None = None, detach: bool = True, device: torch.device | None = None, tokenizer: transformers.AutoTokenizer | None = None, + padding_side: str = "left", ): - if history_key is None: - history_key = "history" - self.history_key = history_key - self.log_prob_key = log_prob_key - super().__init__(in_keys=[history_key], out_keys=[log_prob_key]) - self.actor = actor - if not getattr(actor, "return_log_probs", True): - raise ValueError( - "The actor must have `return_log_probs=True` to use the `AssistantLogProb` transform." - ) - if getattr(actor, "generate", True): - raise ValueError( - "The actor must have `generate=False` to use the `AssistantLogProb` transform." - ) - if not getattr(actor, "from_text", False): - raise ValueError( - "The actor must have `from_text=True` to use the `AssistantLogProb` transform. If `from_text=False` is required, please file an issue on GitHub." + # Set up keys + if log_probs_full_key is None: + log_probs_full_key = (model.log_probs_key, "full") + elif ( + not isinstance(log_probs_full_key, tuple) + or log_probs_full_key[-1] != "full" + ): + warnings.warn( + f"The log_probs_full_key {log_probs_full_key} is not a tuple or does not end with 'full'. " + "This may cause issues with the KL computation. " + "Please use a tuple with the log_probs_key and 'full' as the last element." ) - # if getattr(self.actor, "tokenizer_kwargs", {}).get("add_generation_prompt", True): - # raise ValueError("The actor must have `tokenizer_kwargs['add_generation_prompt']=False` to use the `AssistantLogProb` transform.") + self.log_probs_full_key = log_probs_full_key + + # Set up input/output keys + in_keys = list(model.in_keys) + out_keys = [self.log_probs_full_key] + super().__init__(in_keys=in_keys, out_keys=out_keys) + + # Store model and configuration + self.model = model self.assistant_only = assistant_only + self.detach = detach + self.device = device + self.tokenizer = tokenizer + self.padding_side = padding_side + + # Set up tokenizer kwargs if tokenizer_kwargs is None: tokenizer_kwargs = {} tokenizer_kwargs.setdefault("return_assistant_tokens_mask", True) tokenizer_kwargs.setdefault("tokenize", True) - tokenizer_kwargs.setdefault("return_tensors", "pt") + tokenizer_kwargs.setdefault("return_dict", True) tokenizer_kwargs.setdefault("padding", False) tokenizer_kwargs.setdefault("add_generation_prompt", False) self.tokenizer_kwargs = tokenizer_kwargs - self.tokenizer = tokenizer - self.detach = detach - self.device = device + + # Validate model configuration (after setting assistant_only) + self._validate_model_config(model) + + def _validate_model_config(self, model: LLMWrapperBase): + """Validate model configuration.""" + if not getattr(model, "return_log_probs", True): + raise ValueError( + "The model must have `return_log_probs=True` to use the `RetrieveLogProb` transform." + ) + if getattr(model, "generate", True): + raise ValueError( + "The model must have `generate=False` to use the `RetrieveLogProb` transform." + ) + + # Check input mode compatibility with assistant_only + input_mode = getattr(model, "input_mode", "history") + if self.assistant_only and input_mode != "history": + raise ValueError( + f"The model must have `input_mode='history'` when `assistant_only=True`. " + f"Current input_mode is '{input_mode}'. " + f"To use input_mode '{input_mode}', set `assistant_only=False`." + ) def forward(self, tensordict: TensorDictBase) -> TensorDictBase: - next_td = self._step(tensordict, tensordict.get("next")) - return tensordict.set("next", next_td) + next_td = tensordict.get("next") + next_is_none = False + if next_td is None: + next_is_none = True + next_td = tensordict + output = self._step(tensordict, next_td) + if next_is_none: + return output + return tensordict.set("next", output) + + def _mask_assistant_tokens( + self, td: TensorDictBase, lp_key: NestedKey + ) -> torch.Tensor: + """Mask log-probs to only include assistant tokens. + + Args: + td: TensorDict containing the data + lp_key: Key for log-probs in the TensorDict + + Returns: + Masked log-probs tensor + """ + with torch.device(self.device) if self.device is not None else nullcontext(): + # Get assistant mask + assistant_masks = td.get(("masks", "all_assistant_mask"), as_list=True) + log_probs = td.get(lp_key, as_list=True) + log_probs = [ + lp[mask.bool()] for lp, mask in _zip_strict(log_probs, assistant_masks) + ] + if self.model.pad_output: + log_probs = pad_sequence( + log_probs, + batch_first=True, + padding_value=0.0, + padding_side=self.padding_side, + ) + else: + log_probs = torch.nested.as_nested_tensor( + log_probs, layout=self.model.layout + ) + return log_probs @set_list_to_stack(True) def _step( self, tensordict: TensorDictBase, next_tensordict: TensorDictBase ) -> TensorDictBase: - td = next_tensordict.select(self.history_key) - with torch.device( - self.device - ) if self.device is not None else contextlib.nullcontext(), torch.no_grad() if self.detach else contextlib.nullcontext(): - result = self.actor(td.select(self.history_key)) - td.update(result.select(getattr(self.actor, "log_prob_key", "log_probs"))) - td.rename_key_( - getattr(self.actor, "log_prob_key", "log_probs"), self.log_prob_key - ) - if torch.cuda.is_available(): - gc.collect() - torch.cuda.empty_cache() + # Compute log-probs using the model + # Use tensordict since we want to process the "full" entry + ref_td = self.model(tensordict.copy()) + tmp_log_probs_key = (self.model.log_probs_key, "full") + + # Apply assistant masking if requested if self.assistant_only: - with torch.device( - self.device - ) if self.device is not None else contextlib.nullcontext(): - # Get assistant mask - history: History = td.get(self.history_key) - proc = history.apply_chat_template( - tokenizer=self.actor.tokenizer - if self.tokenizer is None - else self.tokenizer, - **self.tokenizer_kwargs, + log_probs = self._mask_assistant_tokens(ref_td, tmp_log_probs_key) + ref_td.set(tmp_log_probs_key, log_probs) + + # Rename and store the log-probs + if tmp_log_probs_key != self.log_probs_full_key: + ref_td.rename_key_(tmp_log_probs_key, self.log_probs_full_key) + next_tensordict.update(ref_td, keys_to_update=(self.log_probs_full_key,)) + + return next_tensordict + + def transform_observation_spec(self, observation_spec: Composite) -> Composite: + # Add kl to observation spec + observation_spec["kl_penalty"] = Unbounded( + device=observation_spec.device, + shape=observation_spec.shape, + ) + return observation_spec + + +class RetrieveKL(Compose): + """A transform to retrieve the KL divergence between two models' log-probabilities. + + This transform combines two :class:`~torchrl.envs.llm.transforms.kl.RetrieveLogProb` instances + with a :class:`~torchrl.envs.llm.transforms.kl.KLComputation` to compute KL divergence + between a generation model and a reference model. + + .. note:: + Both gen_model and ref_model must use the same pad_output value (True or False), otherwise KL computation will fail. + + Args: + gen_model (LLMWrapperBase): the generation model, wrapped in such a way that it does not generate but computes the log-probs. + In cases where the transform is used within a :class:`~torchrl.collectors.llm.LLMCollector` run on a remote worker, the + policy may not be available ahead of time. In this case, the `gen_model` can be set to `"from_collector"` (default) to retrieve the + policy from the collector. See :meth:`~torchrl.modules.llm.policies.LLMWrapperBase.get_new_version` for more details + about generating a new version of the policy to gather the log-probs. + ref_model (LLMWrapperBase): the reference model, wrapped in such a way that it does not generate but computes the log-probs. + + Keyword Args: + assistant_only (bool): whether to only retrieve the log-probs of the assistant tokens (i.e., steps of history + where the role is `"assistant"`). Defaults to `None` (takes the opposite value from the `gen_model` and `ref_model` if they match, as + selection needs to happen only once, or `False` if not specified within the models). + + .. note:: When `assistant_only=True`, both models must have `input_mode='history'` to properly identify assistant tokens. + For other input modes (`"text"` or `"tokens"`), set `assistant_only=False`. + This ensures users are conscious of the limitation that assistant token identification requires structured conversation history. + + gen_log_prob_full_key (str): the key where the log-probs of the generation model are stored. Defaults to `("log_probs", "full")`. + ref_log_prob_full_key (str): the key where the log-probs of the reference model are stored. Defaults to `("ref_log_probs", "full")`. + history_key (str): the key where the history is stored. Defaults to `"history"`. + tokenizer_kwargs (dict): the keyword arguments to pass to the tokenizer to be used to apply the chat template to the history when `assistant_only` is `True`. + To control the tokenization in the actor, pass the tokenizer kwargs to the actor constructor. + Defaults to `{"return_assistant_tokens_mask": True, "tokenize": True, "return_tensors": "pt", "padding": True, "add_generation_prompt": False}`. + detach (bool): whether to exclude the log-probs from the gradient computation. Defaults to `True`. + device (torch.device): the device to use for tensor creation. Defaults to `None`. + tokenizer (transformers.AutoTokenizer): the tokenizer to be used to tokenize the input and compute the assitant mask. If not provided, the tokenizer will be inferred from the `actor`. + padding_side (str): the side of the padding when using pad_sequence. Defaults to `"left"`. + kl_key (NestedKey): the key where the KL divergence is stored. Defaults to `"kl_penalty"`. + add_to_reward (bool): whether to add the KL divergence to the reward. Defaults to `True`. + coeff (float): the coefficient for the KL term when adding to reward. Defaults to `1.0`. + padding_side (str): the side of the padding when using pad_sequence. Defaults to `"left"`. + **kwargs: additional arguments to pass to the `RetrieveLogProb` transform. + + Examples: + >>> from torchrl.data.llm import History + >>> from torchrl.modules.llm import TransformersWrapper + >>> from torchrl.modules.llm.policies import ChatHistory + >>> from transformers import AutoTokenizer, OPTConfig, OPTForCausalLM + >>> from tensordict import TensorDict, set_list_to_stack + >>> import torch + >>> + >>> # Set up list to stack for History + >>> set_list_to_stack(True).set() + >>> + >>> # Create chat data + >>> chats = [ + ... [ + ... {"role": "system", "content": "You are a helpful assistant."}, + ... {"role": "user", "content": "Hello, how are you?"}, + ... {"role": "assistant", "content": "I'm doing well, thank you!"}, + ... ], + ... [ + ... {"role": "system", "content": "You are a helpful assistant."}, + ... {"role": "user", "content": "What's the weather like?"}, + ... {"role": "assistant", "content": "I can't check the weather for you."}, + ... ], + ... ] + >>> history = History.from_chats(chats) + >>> print(f"Created history with shape: {history.shape}") + Created history with shape: torch.Size([2, 3]) + >>> + >>> # Setup tokenizer and model + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") + >>> tokenizer.pad_token = tokenizer.eos_token + >>> model = OPTForCausalLM(OPTConfig()).eval() + >>> + >>> # Create generation and reference models + >>> gen_model = TransformersWrapper( + ... model, + ... tokenizer=tokenizer, + ... input_mode="history", + ... generate=False, + ... return_log_probs=True, + ... pad_output=True, + ... log_probs_key="gen_log_probs", + ... ) + >>> ref_model = TransformersWrapper( + ... model, + ... tokenizer=tokenizer, + ... input_mode="history", + ... generate=False, + ... return_log_probs=True, + ... pad_output=True, + ... log_probs_key="ref_log_probs", + ... ) + >>> + >>> # Create RetrieveKL transform + >>> transform = RetrieveKL( + ... gen_model=gen_model, + ... ref_model=ref_model, + ... assistant_only=True, + ... tokenizer=tokenizer, + ... ) + >>> + >>> # Prepare data with next tensordict using ChatHistory + >>> chat_history = ChatHistory(full=history) + >>> next_td = TensorDict(history=chat_history, batch_size=(2,)) + >>> data = TensorDict(history=chat_history, next=next_td, batch_size=(2,)) + >>> + >>> # Apply transform + >>> result = transform(data) + >>> kl = result["next"].get("kl_penalty") + >>> print(f"KL shape: {kl.shape}") + KL shape: torch.Size([2, 26]) + + Note: + **Input Mode Compatibility:** + - When `assistant_only=True`, both models must have `input_mode='history'` to properly identify assistant tokens. + - When `assistant_only=False`, the transform works with any input mode (`"history"`, `"text"`, or `"tokens"`). + - This design ensures users are conscious of the limitation that assistant token identification requires structured conversation history. + + .. seealso:: + :class:`~torchrl.envs.llm.transforms.kl.RetrieveLogProb`: The base transform for retrieving log-probabilities from a single model. + :class:`~torchrl.envs.llm.transforms.kl.KLComputation`: The transform that computes KL divergence between two log-prob tensors. + :class:`~torchrl.envs.llm.transforms.kl.KLRewardTransform`: A legacy transform for KL reward computation (use `RetrieveKL` instead). + """ + + def __init__( + self, + gen_model: LLMWrapperBase | Literal["from_collector"] = "from_collector", + ref_model: LLMWrapperBase | None = None, + *, + assistant_only: bool | None = None, + history_key: str = "history", + tokenizer_kwargs: dict[str, Any] | None = None, + detach: bool = True, + device: torch.device | None = None, + tokenizer: transformers.AutoTokenizer | None = None, + padding_side: str = "left", + gen_log_probs_full_key: NestedKey = ("log_probs", "full"), + ref_log_probs_full_key: NestedKey = ("ref_log_probs", "full"), + kl_key: NestedKey = "kl_penalty", + add_to_reward: bool = True, + coeff: float = 1.0, + **kwargs, + ): + if isinstance(gen_model, str) and gen_model == "from_collector": + # Lazy init + self._initialized = False + self._init_params = { + "ref_model": ref_model, + "assistant_only": assistant_only, + "history_key": history_key, + "tokenizer_kwargs": tokenizer_kwargs, + "detach": detach, + "device": device, + "tokenizer": tokenizer, + "gen_log_probs_full_key": gen_log_probs_full_key, + "ref_log_probs_full_key": ref_log_probs_full_key, + "kl_key": kl_key, + "add_to_reward": add_to_reward, + "coeff": coeff, + "padding_side": padding_side, + **kwargs, + } + super().__init__() + return + + self._initialized = True + + # Check pad_output consistency if both models are provided + if hasattr(gen_model, "pad_output") and hasattr(ref_model, "pad_output"): + if gen_model.pad_output != ref_model.pad_output: + raise ValueError( + f"pad_output mismatch: gen_model.pad_output={gen_model.pad_output}, " + f"ref_model.pad_output={ref_model.pad_output}. " + "Both models must use the same padding strategy for KL computation." ) - assistant_masks = proc.get("assistant_masks", as_list=True) - log_probs = td.get(self.log_prob_key, as_list=True) - log_probs = [ - lp[mask.bool()] - for lp, mask in _zip_strict(log_probs, assistant_masks) - ] - td = td.set(self.log_prob_key, log_probs) - return next_tensordict.update(td) + + if not getattr(gen_model, "return_log_probs", True): + raise ValueError( + "The generation model must have `return_log_probs=True` to use the `RetrieveKL` transform." + ) + elif getattr(gen_model, "generate", False): + raise ValueError( + "The generation model must have `generate=False` to use the `RetrieveKL` transform." + ) + + if not getattr(ref_model, "return_log_probs", True): + raise ValueError( + "The reference model must have `return_log_probs=True` to use the `RetrieveKL` transform." + ) + elif getattr(ref_model, "generate", False): + raise ValueError( + "The reference model must have `generate=False` to use the `RetrieveKL` transform." + ) + if getattr(gen_model, "log_probs_key", "gen_log_probs") == getattr( + ref_model, "log_probs_key", "log_probs" + ): + raise ValueError( + "The generation and reference models must have different `log_prob_key` values to use the `RetrieveKL` transform." + ) + t1 = RetrieveLogProb( + gen_model, + log_probs_full_key=gen_log_probs_full_key, + assistant_only=assistant_only, + tokenizer_kwargs=tokenizer_kwargs, + detach=detach, + device=device, + tokenizer=tokenizer, + padding_side=padding_side, + **kwargs, + ) + t2 = RetrieveLogProb( + ref_model, + log_probs_full_key=ref_log_probs_full_key, + assistant_only=assistant_only, + tokenizer_kwargs=tokenizer_kwargs, + detach=detach, + device=device, + tokenizer=tokenizer, + padding_side=padding_side, + **kwargs, + ) + t3 = KLComputation( + gen_log_probs_full_key=gen_log_probs_full_key, + ref_log_probs_full_key=ref_log_probs_full_key, + kl_key=kl_key, + add_to_reward=add_to_reward, + coeff=coeff, + ) + super().__init__(t1, t2, t3) + + def _init_deferred(self): + torchrl_logger.info("Initializing RetrieveKL transform") + container = self.container + if container is None: + # also logging, since this will be sometimes hidden within the AttributeError + torchrl_logger.warning( + "The container is not set. Please set the container before calling this method." + ) + raise ValueError( + "The container is not set. Please set the container before calling this method." + ) + container.empty_cache() + self.empty_cache() + collector = self.collector + if collector is None: + # also logging, since this will be sometimes hidden within the AttributeError + torchrl_logger.warning( + "The collector is not set. Please set the collector before calling this method." + ) + raise ValueError( + "The collector is not set. Please set the collector before calling this method." + ) + ref_model = self._init_params["ref_model"] + pad_output = getattr(ref_model, "pad_output", None) + gen_log_probs_full_key = self._init_params["gen_log_probs_full_key"] + if ( + not isinstance(gen_log_probs_full_key, tuple) + or gen_log_probs_full_key[-1] != "full" + ): + raise ValueError( + f"The gen_log_probs_full_key {gen_log_probs_full_key} is not a tuple or does not end with 'full'. " + "This may cause issues with the KL computation. " + "Please use a tuple with the log_probs_key and 'full' as the last element." + ) + log_probs_key = gen_log_probs_full_key[:-1] + gen_model = collector.policy.get_new_version( + generate=False, + return_log_probs=True, + log_probs_key=log_probs_key, + input_mode=ref_model.input_mode, + input_key=(ref_model.input_mode, "full"), + pad_output=pad_output, # Pass pad_output from ref_model + ) + # Create the transforms manually instead of calling __init__ + t1 = RetrieveLogProb( + gen_model, + log_probs_full_key=gen_log_probs_full_key, + assistant_only=self._init_params["assistant_only"], + tokenizer_kwargs=self._init_params["tokenizer_kwargs"], + detach=self._init_params["detach"], + device=self._init_params["device"], + tokenizer=self._init_params["tokenizer"], + padding_side=self._init_params["padding_side"], + ) + ref_log_probs_full_key = self._init_params["ref_log_probs_full_key"] + if ( + not isinstance(ref_log_probs_full_key, tuple) + or ref_log_probs_full_key[-1] != "full" + ): + raise ValueError( + f"The ref_log_probs_full_key {ref_log_probs_full_key} is not a tuple or does not end with 'full'. " + "This may cause issues with the KL computation. " + "Please use a tuple with the log_probs_key and 'full' as the last element." + ) + t2 = RetrieveLogProb( + ref_model, + log_probs_full_key=ref_log_probs_full_key, + assistant_only=self._init_params["assistant_only"], + tokenizer_kwargs=self._init_params["tokenizer_kwargs"], + detach=self._init_params["detach"], + device=self._init_params["device"], + tokenizer=self._init_params["tokenizer"], + padding_side=self._init_params["padding_side"], + ) + t3 = KLComputation( + gen_log_probs_full_key=gen_log_probs_full_key, + ref_log_probs_full_key=ref_log_probs_full_key, + kl_key=self._init_params["kl_key"], + add_to_reward=self._init_params["add_to_reward"], + coeff=self._init_params["coeff"], + ) + # Replace the transforms in the Compose + self.transforms.extend([t1, t2, t3]) + del self._init_params + self._initialized = True + torchrl_logger.info("Successfully initialized") + + def _step( + self, tensordict: TensorDictBase, next_tensordict: TensorDictBase + ) -> TensorDictBase: + if not self._initialized: + self._init_deferred() + return super()._step(tensordict, next_tensordict) + + def _reset( + self, tensordict: TensorDictBase, tensordict_reset: TensorDictBase + ) -> TensorDictBase: + if not self._initialized: + self._init_deferred() + return super()._reset(tensordict, tensordict_reset) + + def forward(self, tensordict: TensorDictBase) -> TensorDictBase: + if not self._initialized: + self._init_deferred() + return super().forward(tensordict) + + def transform_observation_spec(self, observation_spec: Composite) -> Composite: + if not self._initialized: + self._init_deferred() + return super().transform_observation_spec(observation_spec) + + def transform_reward_spec(self, reward_spec: Composite) -> Composite: + if not self._initialized: + self._init_deferred() + return super().transform_reward_spec(reward_spec) + + def _inv_call(self, tensordict: TensorDictBase) -> TensorDictBase: + if not self._initialized: + self._init_deferred() + return super()._inv_call(tensordict) + + def transform_action_spec(self, action_spec: Composite) -> Composite: + if not self._initialized: + self._init_deferred() + return super().transform_action_spec(action_spec) + + def transform_input_spec(self, input_spec: Composite) -> Composite: + if not self._initialized: + self._init_deferred() + return super().transform_input_spec(input_spec) + + def transform_output_spec(self, output_spec: Composite) -> Composite: + if not self._initialized: + self._init_deferred() + return super().transform_output_spec(output_spec) + + def transform_state_spec(self, state_spec: Composite) -> Composite: + if not self._initialized: + self._init_deferred() + return super().transform_state_spec(state_spec) + + +class KLComputation(Transform): + """A transform to compute KL divergence between two log-prob tensors and optionally add it to the reward. + + This transform computes KL divergence between generation and reference log-probabilities + and can optionally subtract it from the reward (for KL penalty). It's designed to work + with the :class:`~torchrl.envs.llm.transforms.kl.RetrieveLogProb` and :class:`~torchrl.envs.llm.transforms.kl.RetrieveKL` transforms. + + .. note:: + Both input log-prob tensors must use the same padding strategy (pad_output) for correct KL computation. + + Args: + gen_log_probs_full_key (NestedKey): the key where the generation model log-probs are stored. + Defaults to `("gen_log_probs", "full")`. + ref_log_probs_full_key (NestedKey): the key where the reference model log-probs are stored. + Defaults to `("ref_log_probs", "full")`. + kl_key (NestedKey): the key where the KL divergence is stored. Defaults to `"kl_penalty"`. + add_to_reward (bool): whether to add the KL divergence to the reward. Defaults to `True`. + coeff (float): the coefficient for the KL term when adding to reward. Defaults to `1.0`. + padding_side (str): the side of the padding when using pad_sequence. Defaults to `"left"`. + + Examples: + >>> from tensordict import TensorDict + >>> import torch + >>> + >>> # Create sample log-probs + >>> gen_log_probs = torch.randn(2, 10) # 2 samples, 10 tokens each + >>> ref_log_probs = torch.randn(2, 10) + >>> + >>> # Create data with next tensordict + >>> next_td = TensorDict( + ... { + ... ("gen_log_probs", "full"): gen_log_probs, + ... ("ref_log_probs", "full"): ref_log_probs, + ... "reward": torch.randn(2, 10, 1), + ... }, + ... batch_size=(2,) + ... ) + >>> data = TensorDict(next=next_td, batch_size=(2,)) + >>> + >>> # Create KLComputation transform + >>> kl_transform = KLComputation( + ... gen_log_probs_key=("gen_log_probs", "full"), + ... ref_log_probs_key=("ref_log_probs", "full"), + ... kl_key="kl_penalty", + ... add_to_reward=True, + ... coef=1.0, + ... ) + >>> + >>> # Apply transform + >>> result = kl_transform(data) + >>> kl = result["next"].get("kl_penalty") + >>> print(f"KL shape: {kl.shape}") + KL shape: torch.Size([2, 10]) + + .. seealso:: + :class:`~torchrl.envs.llm.transforms.kl.RetrieveLogProb`: The base transform for retrieving log-probabilities from a single model. + :class:`~torchrl.envs.llm.transforms.kl.RetrieveKL`: A higher-level transform that combines two `RetrieveLogProb` instances with `KLComputation`. + :class:`~torchrl.envs.llm.transforms.kl.KLRewardTransform`: A legacy transform for KL reward computation (use `RetrieveKL` instead). + + """ + + def __init__( + self, + gen_log_probs_full_key: NestedKey = ("log_probs", "full"), + ref_log_probs_full_key: NestedKey = ("ref_log_probs", "full"), + *, + kl_key: NestedKey = "kl_penalty", + add_to_reward: bool = True, + coeff: float = 1.0, + padding_side: str = "left", + ): + in_keys = [gen_log_probs_full_key, ref_log_probs_full_key] + if add_to_reward: + in_keys.append("reward") + out_keys = [kl_key] + if add_to_reward: + out_keys.append("reward") + super().__init__(in_keys=in_keys, out_keys=out_keys) + + self.gen_log_probs_full_key = gen_log_probs_full_key + self.ref_log_probs_full_key = ref_log_probs_full_key + self.kl_key = kl_key + self.add_to_reward = add_to_reward + self.coeff = coeff + self.padding_side = padding_side + + def forward(self, tensordict: TensorDictBase) -> TensorDictBase: + next_td = tensordict.get("next") + has_next_td = True + if next_td is None: + next_td = tensordict + has_next_td = False + next_td = self._step(tensordict, next_td) + if has_next_td: + return tensordict.set("next", next_td) + return next_td + + def _step( + self, tensordict: TensorDictBase, next_tensordict: TensorDictBase + ) -> TensorDictBase: + # Get log-probs + gen_log_probs = next_tensordict.get(self.gen_log_probs_full_key, as_list=True) + ref_log_probs = next_tensordict.get(self.ref_log_probs_full_key, as_list=True) + + if gen_log_probs is None or ref_log_probs is None: + raise ValueError( + f"Log-probs not found. Expected keys: {self.gen_log_probs_key}, {self.ref_log_probs_key}" + ) + + # Debug: Check lengths and shapes + if len(gen_log_probs) != len(ref_log_probs): + raise ValueError( + f"Batch size mismatch: gen_log_probs has {len(gen_log_probs)} samples, ref_log_probs has {len(ref_log_probs)} samples" + ) + + # Check individual sequence lengths + for i, (gen_lp, ref_lp) in enumerate(_zip_strict(gen_log_probs, ref_log_probs)): + if gen_lp.shape != ref_lp.shape: + raise ValueError( + f"Sample {i} has different shapes: gen_log_probs[{i}].shape={gen_lp.shape}, ref_log_probs[{i}].shape={ref_lp.shape}" + ) + + # Compute KL divergence: KL(p||q) = E_p[log p - log q] + # Here gen_log_probs = log p, ref_log_probs = log q + kl = [ + gen_lp - ref_lp + for gen_lp, ref_lp in _zip_strict(gen_log_probs, ref_log_probs) + ] + + kl = torch.nested.as_nested_tensor(kl, layout=torch.strided) + + next_tensordict.set(self.kl_key, kl) + + # Add to reward if requested + if self.add_to_reward: + reward = next_tensordict.get("reward", as_list=True) + if reward is not None: + if isinstance(reward, list): + if reward[0].ndim != kl[0].ndim + 1: + raise ValueError( + f"The rewards have shape {reward[0].shape} but the kl has shape {kl[0].shape}. " + f"The rewards should have one more dimension than the KL." + ) + reward = [ + r - self.coeff * k.unsqueeze(-1) + for r, k in _zip_strict(reward, kl) + ] + next_tensordict.set( + "reward", + torch.nested.as_nested_tensor(reward, layout=torch.strided), + ) + else: + if reward.ndim != kl.ndim + 1: + raise ValueError( + f"The rewards have shape {reward.shape} but the kl has shape {kl.shape}. " + f"The rewards should have one more dimension than the KL." + ) + reward = reward - self.coeff * kl.unsqueeze(-1) + next_tensordict.set("reward", reward) + + return next_tensordict + + def transform_observation_spec(self, observation_spec: Composite) -> Composite: + # Add kl to observation spec + observation_spec[self.kl_key] = Unbounded( + device=observation_spec.device, + shape=observation_spec.shape, + ) + return observation_spec + + def transform_reward_spec(self, reward_spec: Composite) -> Composite: + # Optionally adjust reward spec if KL is added to reward + if self.add_to_reward: + shape = reward_spec["reward"].shape + # For LLMs, the shape of the reward is (batch, -1, 1) + shape = (*shape, -1, 1) + reward_spec["reward"] = reward_spec["reward"].clone() + reward_spec["reward"].shape = torch.Size(shape) + return reward_spec diff --git a/torchrl/envs/llm/transforms/policy_version.py b/torchrl/envs/llm/transforms/policy_version.py index 711326be410..493b630780c 100644 --- a/torchrl/envs/llm/transforms/policy_version.py +++ b/torchrl/envs/llm/transforms/policy_version.py @@ -178,10 +178,12 @@ def transform_observation_spec(self, spec: Composite) -> Composite: """ if self.version_type in (str, "uuid"): spec["policy_version"] = NonTensor( - example_data=uuid.uuid4(), shape=spec.shape + example_data=uuid.uuid4(), shape=spec.shape, device=spec.device ) elif self.version_type in (int, "int"): - spec["policy_version"] = Unbounded(shape=spec.shape, dtype=torch.int64) + spec["policy_version"] = Unbounded( + shape=spec.shape, dtype=torch.int64, device=spec.device + ) else: raise ValueError(f"Invalid version type: {self.version_type}") return spec diff --git a/torchrl/envs/llm/transforms/reason.py b/torchrl/envs/llm/transforms/reason.py index 6890d45b80e..0db8aa1cdbd 100644 --- a/torchrl/envs/llm/transforms/reason.py +++ b/torchrl/envs/llm/transforms/reason.py @@ -9,8 +9,9 @@ from typing import Callable, Literal from tensordict import lazy_stack, TensorDictBase +from torchrl._utils import logger as torchrl_logger -from torchrl.data.llm.chat import History +from torchrl.data.llm.history import History from torchrl.envs import Transform from torchrl.envs.common import EnvBase @@ -161,12 +162,24 @@ def _step( next_tensordict.update(lazy_stack(ntds)) return next_tensordict + # Check that base_env is on history mode + parent = self.parent + if parent is None: + raise RuntimeError("AddThinkingPrompt must be used with a ChatEnv") + base_env = parent.base_env + if base_env.input_mode != "history": + raise RuntimeError( + "AddThinkingPrompt must be used with a ChatEnv in history mode" + ) + # Check if we should add the thinking prompt if self.cond(next_tensordict): - history: History = next_tensordict["history"] + torchrl_logger.info("Adding thinking prompt.") + history: History = next_tensordict["history"].prompt last_turn = history[..., -1] if self.edit_last_turn: + # Edit the last assistant response content = last_turn.content modified_content = self._replace_answer_with_prompt(content) @@ -181,14 +194,14 @@ def _step( # Replace the last turn in history history = history[..., :-1].append(new_turn) - next_tensordict["history"] = history + next_tensordict["history"].prompt = history else: # Add a new message prompt = self.prompt history = history.append(History(role=self.role, content=prompt)) - next_tensordict["history"] = history + next_tensordict["history"].prompt = history if self.undo_done: parent: EnvBase = self.parent @@ -208,6 +221,8 @@ def _step( reward = next_tensordict.get(key) if reward is not None: next_tensordict.set(key, reward.zero_()) + else: + torchrl_logger.info("Not adding thinking prompt.") return next_tensordict def _replace_answer_with_prompt(self, content: str) -> str: @@ -223,6 +238,7 @@ def _replace_answer_with_prompt(self, content: str) -> str: The modified content with the answer replaced by the thinking prompt """ # Pattern to match ... with optional EOS token + # Use non-greedy matching and be more specific about the end answer_pattern = r".*?(?:\s*<\|im_end\|>)?" # Check if there's an answer tag @@ -230,12 +246,24 @@ def _replace_answer_with_prompt(self, content: str) -> str: # Replace the answer section with the thinking prompt prompt = self.prompt - # Replace the answer section + # Replace the answer section, but preserve the EOS token if it exists modified_content = re.sub(answer_pattern, prompt, content, flags=re.DOTALL) # Clean up any trailing whitespace modified_content = modified_content.rstrip() + # Ensure we end with the EOS token if the original content had it + if content.endswith("<|im_end|>"): + modified_content = modified_content.rstrip() + "<|im_end|>" + + # Ensure proper spacing around the prompt + if not modified_content.endswith(prompt): + # If the prompt wasn't properly inserted, append it + modified_content = content.rstrip() + if modified_content.endswith("<|im_end|>"): + modified_content = modified_content[: -len("<|im_end|>")].rstrip() + modified_content = modified_content + "\n\n" + prompt + "<|im_end|>" + else: # No answer tag found, just append the prompt prompt = self.prompt diff --git a/torchrl/envs/llm/transforms/tools.py b/torchrl/envs/llm/transforms/tools.py index 3492cbb69b5..a9fd7e28434 100644 --- a/torchrl/envs/llm/transforms/tools.py +++ b/torchrl/envs/llm/transforms/tools.py @@ -508,11 +508,15 @@ def _process_llm_response(self, response: str, i: int) -> list[str]: return results - def _call(self, next_tensordict: TensorDictBase) -> TensorDictBase: + def _step( + self, tensordict: TensorDictBase, next_tensordict: TensorDictBase + ) -> TensorDictBase: if next_tensordict.batch_dims > 1: - with next_tensordict.view(-1) as next_tensordict_flat: + with next_tensordict.view(-1) as next_tensordict_flat, tensordict.view( + -1 + ) as tensordict_flat: # Call the transform on the flattened tensordict - next_tensordict_flat = self._call(next_tensordict_flat) + next_tensordict_flat = self._step(tensordict_flat, next_tensordict_flat) return next_tensordict # Ensure we have enough processes for the batch @@ -520,7 +524,7 @@ def _call(self, next_tensordict: TensorDictBase) -> TensorDictBase: self._ensure_processes(len(next_tensordict)) # Convert text to a history - history = next_tensordict["history"] + history = next_tensordict["history"].prompt # Isolate last element, which should be our action local_history = history[..., -1] @@ -555,7 +559,7 @@ def fill_procs(proc: list[History], max_len: int) -> list[History]: # Procs has the shape of the batch-size. We can cat along dim=-1 procs = lazy_stack([lazy_stack(p) for p in procs]) history.extend(procs, dim=-1) - next_tensordict["history"] = history + next_tensordict["history"].prompt = history return next_tensordict def __del__(self): @@ -765,8 +769,18 @@ def _call(self, next_tensordict: TensorDictBase) -> TensorDictBase: next_tensordict_flat = self._call(next_tensordict_flat) return next_tensordict + # Check that base_env is on history mode + parent = self.parent + if parent is None: + raise RuntimeError("MCPToolTransform must be used with a ChatEnv") + base_env = parent.base_env + if base_env.input_mode != "history": + raise RuntimeError( + "MCPToolTransform must be used with a ChatEnv in history mode" + ) + # Convert text to a history - history = next_tensordict["history"] + history = next_tensordict["history"].prompt # Isolate last element, which should be our action local_history = history[..., -1] @@ -801,7 +815,7 @@ def fill_procs(proc: list[History], max_len: int) -> list[History]: # Procs has the shape of the batch-size. We can cat along dim=-1 procs = lazy_stack([lazy_stack(p) for p in procs]) history.extend(procs, dim=-1) - next_tensordict["history"] = history + next_tensordict["history"].prompt = history return next_tensordict def _reset( diff --git a/torchrl/envs/transforms/transforms.py b/torchrl/envs/transforms/transforms.py index a00747d7e02..6857d74545a 100644 --- a/torchrl/envs/transforms/transforms.py +++ b/torchrl/envs/transforms/transforms.py @@ -308,6 +308,19 @@ def out_keys_inv(self, value): value = [unravel_key(val) for val in value] self._out_keys_inv = value + @property + def collector(self) -> DataCollectorBase | None: # noqa: F821 # type: ignore + """Returns the collector associated with the container, if it exists. + + This can be used whenever the transform needs to be made aware of the collector or the policy associated with it. + + Make sure to call this property only on transforms that are not nested in sub-processes. + The collector reference will not be passed to the workers of a :class:`~torchrl.envs.ParallelEnv` or + similar batched environments. + + """ + return self.container.collector + def _reset( self, tensordict: TensorDictBase, tensordict_reset: TensorDictBase ) -> TensorDictBase: @@ -687,7 +700,7 @@ def clone(self) -> T: return self_copy @property - def container(self): + def container(self) -> EnvBase | None: """Returns the env containing the transform. Examples: @@ -952,6 +965,13 @@ def add_truncated_keys(self) -> TransformedEnv: self.empty_cache() return self + # def _post_step_mdp_hooks(self, tensordict: TensorDictBase) -> TensorDictBase: + # """Allows modification of the tensordict after the step_mdp.""" + # if type(self.base_env)._post_step_mdp_hooks is not None: + # If the base env has a _post_step_mdp_hooks, we call it + # tensordict = self.base_env._post_step_mdp_hooks(tensordict) + # return tensordict + def _set_env(self, env: EnvBase, device) -> None: if device != env.device: env = env.to(device) @@ -1178,6 +1198,7 @@ def _reset(self, tensordict: TensorDictBase | None = None, **kwargs): if tensordict is not None: # We must avoid modifying the original tensordict so a shallow copy is necessary. # We just select the input data and reset signal, which is all we need. + self.transform.transform_input_spec(self.base_env.input_spec.unlock_()) tensordict = tensordict.select( *self.reset_keys, *self.state_spec.keys(True, True), strict=False ) @@ -6502,13 +6523,16 @@ def _reset_func( if self.single_default_value and callable(self.default_value): if not _reset.all(): # FIXME: use masked op - tensordict_reset = tensordict_reset.clone() + # tensordict_reset = tensordict_reset.clone() reset_val = self.default_value(reset=_reset) - # This is safe because env.reset calls _update_during_reset which will discard the new data - tensordict_reset = ( - self.container.full_observation_spec.zero().select( - *reset_val.keys(True) - ) + # This is safE because env.reset calls _update_during_reset which will discard the new data + # tensordict_reset = ( + # self.container.full_observation_spec.zero().select( + # *reset_val.keys(True) + # ) + # ) + tensordict_reset = reset_val.new_zeros( + _reset.shape, empty_lazy=True ) tensordict_reset[_reset] = reset_val else: diff --git a/torchrl/envs/transforms/utils.py b/torchrl/envs/transforms/utils.py index 8ef96c04ce0..b5302329bb2 100644 --- a/torchrl/envs/transforms/utils.py +++ b/torchrl/envs/transforms/utils.py @@ -24,7 +24,31 @@ def new_fun(self, *args, **kwargs): class _set_missing_tolerance: - """Context manager to change the transform tolerance to missing values.""" + """Context manager to change the transform tolerance to missing values. + + If a transform has a missing_tolerance of True, it will not raise an error if a key is missing during reset. + + This is implemented via :meth:`~torchrl.envs.transforms.Transform.set_missing_tolerance`. + + The way this is handled is that, if `_reset` calls the default `_call` method, it will not raise an error if an input key is missing. + + For custom `_reset` methods, you should implement this yourself: + + Exmples: + >>> def _reset(self, tensordict: TensorDictBase, tensordict_reset: TensorDictBase) -> TensorDictBase: + ... with _set_missing_tolerance(self, True): + ... tensordict_reset = self.foo(tensordict, tensordict_reset) + ... return tensordict_reset + >>> def foo(self, tensordict: TensorDictBase, tensordict_reset: TensorDictBase) -> TensorDictBase: + ... if self.input_keys[0] not in tensordict_reset and self.missing_tolerance: + ... return tensordict_reset + ... else: + ... # your code here + + Because `missing_tolerance` will be turned off during calls to `_step`, you can be sure that an appropriate KeyError will be raised + if the input key is missing at that time. + + """ def __init__(self, transform, mode): self.transform = transform diff --git a/torchrl/envs/utils.py b/torchrl/envs/utils.py index 0346a25935e..81485ff8e4e 100644 --- a/torchrl/envs/utils.py +++ b/torchrl/envs/utils.py @@ -93,11 +93,11 @@ def __init__( exclude_done: bool = False, exclude_action: bool = True, ): - action_keys = env.action_keys - done_keys = env.done_keys - reward_keys = env.reward_keys - observation_keys = env.full_observation_spec.keys(True, True) - state_keys = env.full_state_spec.keys(True, True) + action_keys = env._action_keys_step_mdp + done_keys = env._done_keys_step_mdp + reward_keys = env._reward_keys_step_mdp + observation_keys = env._observation_keys_step_mdp + state_keys = env._state_keys_step_mdp self.action_keys = [unravel_key(key) for key in action_keys] self.done_keys = [unravel_key(key) for key in done_keys] self.observation_keys = list(observation_keys) @@ -245,6 +245,8 @@ def _grab_and_place( else: if is_non_tensor(val): val = val.clone() + if is_tensor_collection(val): + val = val.copy() data_out._set_str( key, val, validated=True, inplace=False, non_blocking=False ) @@ -957,6 +959,7 @@ def make_shape(shape): # Assume all the non-tensors have the same datatype example_data=tensor.view(-1)[0].data, device=tensor.device, + feature_dims=len(tensor.shape) - len(data.shape), ) if is_non_tensor(tensor) else Unbounded( @@ -1463,7 +1466,9 @@ def _update_during_reset( reset = reset.any(-1) reset = reset.reshape(node.shape) # node.update(node.where(~reset, other=node_reset, pad=0)) - node.where(~reset, other=node_reset, out=node, pad=0) + node.where( + ~reset, other=node_reset, out=node, pad=0, update_batch_size=True + ) # node = node.clone() # idx = reset.nonzero(as_tuple=True)[0] # node[idx].update(node_reset[idx]) diff --git a/torchrl/modules/__init__.py b/torchrl/modules/__init__.py index e80d5b427dc..a349aba6635 100644 --- a/torchrl/modules/__init__.py +++ b/torchrl/modules/__init__.py @@ -140,6 +140,7 @@ "MaskedOneHotCategorical", "MultiAgentConvNet", "MultiAgentMLP", + "LLMMaskedCategorical", "MultiAgentNetBase", "MultiStepActorWrapper", "NoisyLazyLinear", diff --git a/torchrl/modules/distributions/__init__.py b/torchrl/modules/distributions/__init__.py index 17d7bef7085..1102637e26c 100644 --- a/torchrl/modules/distributions/__init__.py +++ b/torchrl/modules/distributions/__init__.py @@ -58,6 +58,7 @@ "distributions", "Delta", "IndependentNormal", + "LLMMaskedCategorical", "NormalParamWrapper", "TanhDelta", "TanhNormal", diff --git a/torchrl/modules/llm/__init__.py b/torchrl/modules/llm/__init__.py index 735715866ff..3ec911506ca 100644 --- a/torchrl/modules/llm/__init__.py +++ b/torchrl/modules/llm/__init__.py @@ -11,14 +11,28 @@ vLLMWorker, ) -from .policies import CategoricalSequential, TransformersWrapper, vLLMWrapper +from .policies import ( + ChatHistory, + LLMWrapperBase, + LogProbs, + Masks, + Text, + Tokens, + TransformersWrapper, + vLLMWrapper, +) __all__ = [ - "CategoricalSequential", + "LLMWrapperBase", "LLMOnDevice", "TransformersWrapper", "make_vllm_worker", + "ChatHistory", "stateless_init_process_group", "vLLMWorker", "vLLMWrapper", + "Text", + "LogProbs", + "Masks", + "Tokens", ] diff --git a/torchrl/modules/llm/policies/__init__.py b/torchrl/modules/llm/policies/__init__.py index e91ec9901cf..1bdf27e0db1 100644 --- a/torchrl/modules/llm/policies/__init__.py +++ b/torchrl/modules/llm/policies/__init__.py @@ -5,9 +5,18 @@ from __future__ import annotations -from .common import CategoricalSequential +from .common import ChatHistory, LLMWrapperBase, LogProbs, Masks, Text, Tokens from .transformers_wrapper import TransformersWrapper from .vllm_wrapper import vLLMWrapper -__all__ = ["TransformersWrapper", "vLLMWrapper", "CategoricalSequential"] +__all__ = [ + "TransformersWrapper", + "vLLMWrapper", + "LLMWrapperBase", + "Text", + "LogProbs", + "Masks", + "Tokens", + "ChatHistory", +] diff --git a/torchrl/modules/llm/policies/common.py b/torchrl/modules/llm/policies/common.py index 2021638406c..aaee8cd44ed 100644 --- a/torchrl/modules/llm/policies/common.py +++ b/torchrl/modules/llm/policies/common.py @@ -4,63 +4,805 @@ # LICENSE file in the root directory of this source tree. from __future__ import annotations +import weakref +from typing import Any, Literal, overload + import torch from tensordict import NestedKey, TensorDictBase from tensordict.nn import TensorDictModuleBase, TensorDictSequential +from tensordict.tensorclass import TensorClass +from tensordict.utils import _zip_strict from torch import distributions as D from torch.distributions import Categorical -from torchrl.modules import MaskedCategorical +from torch.nn.utils.rnn import pad_sequence +from torchrl.data.llm import History +from torchrl.data.tensor_specs import Unbounded +from torchrl.modules.distributions.discrete import LLMMaskedCategorical + +# TODOs: +# - [ ] Remove the useless view(-1) calls when num_samples is not > 1 +# - [ ] Remove as_list=True and use a context manager to handle that +# - [ ] Make sure tensordict can handle nested lazy tds that have a get(key, as_list=True) - I think it breaks atm +# - [ ] Handle packing + + +class Tokens(TensorClass["nocast"]): + """A Tokens container. + + Args: + prompt (torch.Tensor | None): The prompt tokens. + response (torch.Tensor | None): The response tokens. + assistant (torch.Tensor | None): The assistant tokens. + full (torch.Tensor | None): The tokens across prompt and response. + padded (bool | None): Whether the tokens are padded. + + Shapes: + - prompt: (batch_size, prompt_length). If padded, padded on the left. + - response: (batch_size, response_length). If padded, padded on the right. + - full: (batch_size, prompt_length + response_length). If padded, padded on the left and/or right. + - padded: bool. + + """ + + prompt: torch.Tensor | None = None + response: torch.Tensor | None = None + full: torch.Tensor | None = None + padded: bool | None = None + + @classmethod + def default_spec( + cls, + shape=(-1,), + keys: list[Literal["prompt", "response", "full"]] | None = None, + ): + """A default spec to use in transforms / envs that return Tokens objects.""" + from torchrl.data import Composite, NonTensor + + if keys is None: + keys = ["prompt", "response", "full"] + + defaults = {k: Unbounded(shape=shape + (-1,)) for k in keys} + defaults["padded"] = NonTensor(shape=shape, example_data=False) + + return Composite(defaults, shape=shape[:-1], data_cls=cls, step_mdp_static=True) + + +class Masks(TensorClass["nocast"]): + """A Masks container. + + Args: + all_attention_mask (torch.Tensor | None): The attention mask across all tokens. The attention mask represents + the tokens that are not masked. and that the model can attend to. + all_assistant_mask (torch.Tensor | None): The assistant mask across all tokens, i.e. the tokens that + are produced by the assistant. + This is recovered from the the `assistant_masks` output of :meth:`~torchrl.data.llm.History.apply_chat_template`, + if the chat template supports it. + padded (bool | None): Whether the masks are padded. + + The masks always have the same shape as the `full` tensor in :class:`~torchrl.modules.llm.policies.common.Tokens`, + and :class:`~torchrl.modules.llm.policies.common.LogProbs`. + + """ + + all_attention_mask: torch.Tensor | None = None + all_assistant_mask: torch.Tensor | None = None + padded: bool | None = None + + @classmethod + def default_spec( + cls, + shape=(-1,), + keys: list[Literal["all_attention_mask", "all_assistant_mask"]] | None = None, + ): + """A default spec to use in transforms / envs that return Masks objects.""" + from torchrl.data import Composite, NonTensor + + if keys is None: + keys = ["all_attention_mask", "all_assistant_mask"] + + defaults = {k: Unbounded(shape=shape + (-1,)) for k in keys} + defaults["padded"] = NonTensor(shape=shape, example_data=False) + + return Composite(defaults, shape=shape[:-1], data_cls=cls, step_mdp_static=True) + + +class ChatHistory(TensorClass["nocast"]): + """A chat history container for managing conversation data in LLM environments. + + This class serves as a structured container for chat history data, similar to how + :class:`~torchrl.modules.llm.policies.Text` and :class:`~torchrl.modules.llm.policies.Tokens` + are used for text and token data respectively. + + **Recent Changes:** + - **Modular Design**: ChatHistory is now used consistently across LLM wrappers and environments + to represent conversation state in a structured way. + - **Integration with Wrappers**: Both vLLMWrapper and TransformersWrapper now use ChatHistory + objects when `input_mode="history"` is specified. + - **Environment Support**: ChatEnv and related environments use ChatHistory for state management. + + Args: + prompt (History | None): The prompt history stack containing the conversation up to the current point. + response (History | None): The response history items (typically generated by the LLM). + full (History | None): The complete history across prompt and response. + + Example: + >>> from torchrl.data.llm import History + >>> from torchrl.modules.llm.policies import ChatHistory + >>> + >>> # Create a conversation history + >>> history = History.from_chats([[ + ... {"role": "user", "content": "Hello"}, + ... {"role": "assistant", "content": "Hi there!"} + ... ]]) + >>> + >>> # Create ChatHistory object for LLM wrapper input + >>> chat_history = ChatHistory(prompt=history) + >>> + >>> # Use with LLM wrapper + >>> result = wrapper(TensorDict(history=chat_history, batch_size=(1,))) + >>> print(result["history"].response) # New response from LLM + >>> print(result["history"].full) # Complete conversation + + .. seealso:: + :class:`~torchrl.modules.llm.policies.Text`: Container for text data. + :class:`~torchrl.modules.llm.policies.Tokens`: Container for token data. + :class:`~torchrl.data.llm.History`: The underlying History class for conversation data. + """ + + prompt: History | None = None + response: History | None = None + full: History | None = None + + @classmethod + def default_spec( + cls, + shape=(-1,), + keys: list[Literal["prompt", "response", "full"]] | None = None, + ): + """A default spec to use in transforms / envs that return ChatHistory objects.""" + from torchrl.data import Composite + + if keys is None: + keys = ["prompt", "response", "full"] + return Composite( + {k: History.default_spec(shape=shape + (-1,)) for k in keys}, + shape=shape[:-1], + data_cls=cls, + step_mdp_static=True, + ) + + +class LogProbs(TensorClass["nocast"]): + """A log-probability container. + + Args: + prompt (torch.Tensor | None): The prompt log-probabilities. + response (torch.Tensor | None): The response log-probabilities. + assistant (torch.Tensor | None): The assistant log-probabilities. + full (torch.Tensor | None): The log-probabilities across prompt and response. + padded (bool | None): Whether the log-probabilities are padded. + + Shapes: + - prompt: (batch_size, prompt_length). If padded, padded on the left. + - response: (batch_size, response_length). If padded, padded on the right. + - full: (batch_size, prompt_length + response_length). If padded, padded on the left and/or right. + - padded: bool. + + """ + + prompt: torch.Tensor | None = None + response: torch.Tensor | None = None + full: torch.Tensor | None = None + padded: bool | None = None + + @classmethod + def default_spec( + cls, + shape=(-1,), + keys: list[Literal["prompt", "response", "full"]] | None = None, + ): + """A default spec to use in transforms / envs that return LogProbs objects.""" + from torchrl.data import Composite, NonTensor + + if keys is None: + keys = ["prompt", "response", "full"] + + defaults = {k: Unbounded(shape=shape + (-1,)) for k in keys} + defaults["padded"] = NonTensor(shape=shape, example_data=False) + return Composite(defaults, shape=shape[:-1], data_cls=cls, step_mdp_static=True) -class CategoricalSequential(TensorDictModuleBase): - """A ProbabilisticTensorDictSequential subclass meant to work with LLMs. - .. seealso:: :class:`~tensordict.nn.ProbabilisticTensorDictSequential` class. +class Text(TensorClass["nocast"]): + """A text container. + Args: + prompt (str | None): The prompt text. + response (str | None): The response text. + full (str | None): The text across prompt and response. + """ + + prompt: str | None = None + response: str | None = None + full: str | None = None + + @classmethod + def default_spec( + cls, + shape=(-1,), + keys: list[Literal["prompt", "response", "full"]] | None = None, + ): + """A default spec to use in transforms / envs that return Text objects.""" + from torchrl.data import Composite, NonTensor + + if keys is None: + keys = ["prompt", "response", "full"] + + defaults = {k: NonTensor(shape=shape, example_data="a string") for k in keys} + + return Composite(defaults, shape=shape[:-1], data_cls=cls, step_mdp_static=True) + + +class LogProbDistribution(D.Distribution): + """A distribution that works directly with log-probabilities. + + This is useful when we have pre-computed log-probabilities (e.g., from vLLM) + and want to compute log_prob() without having access to the original logits. + """ + + def __init__(self, log_probs: torch.Tensor, mask: torch.Tensor | None = None): + """Initialize with log-probabilities. + + Args: + log_probs: Tensor of shape [batch, seq_len] containing log-probabilities + mask: Optional mask of shape [batch, seq_len] indicating valid positions + """ + self.log_probs = log_probs + self.mask = mask + batch_shape = log_probs.shape[:-1] if log_probs.dim() > 1 else log_probs.shape + event_shape = log_probs.shape[-1:] if log_probs.dim() > 1 else torch.Size([]) + super().__init__(batch_shape=batch_shape, event_shape=event_shape) + + def log_prob(self, value: torch.Tensor) -> torch.Tensor: + """Compute log-probability for the given tokens. + + Args: + value: Tensor of shape [batch, seq_len] containing token indices + + Returns: + Tensor of shape [batch, seq_len] containing log-probabilities + """ + # For log-prob distributions, we just return the pre-computed log-probs + # at the positions specified by the value tensor + if value.shape != self.log_probs.shape: + raise ValueError( + f"Value shape {value.shape} must match log_probs shape {self.log_probs.shape}" + ) + + result = self.log_probs.clone() + + # Apply mask if provided + if self.mask is not None: + result = torch.where( + self.mask, + result, + torch.tensor(0.0, device=result.device, dtype=result.dtype), + ) + + return result + + def sample(self, sample_shape: tuple | torch.Size | None = None) -> torch.Tensor: + """Sample from the distribution. + + Note: This is not implemented for log-prob distributions since we don't have + the full probability distribution, only the log-probs for specific tokens. + """ + raise NotImplementedError("Sampling not supported for LogProbDistribution") + + def entropy(self) -> torch.Tensor: + """Compute entropy. + + Note: This is not implemented for log-prob distributions since we don't have + the full probability distribution. + """ + raise NotImplementedError("Entropy not supported for LogProbDistribution") + + +class LLMWrapperBase(TensorDictModuleBase): + r"""A LLM wrapper base class. + + This class provides a consistent interface for LLM wrappers with the following features: + - Support for different input modalities (history, text, tokens) + - Consistent output structure using TensorClass objects (Text, Tokens, Masks, LogProbs) + - Configurable generation and log-probability computation + + Args: + model: The underlying model to wrap. + + Keyword Args: + tokenizer: The tokenizer to use for encoding and decoding text. + input_mode: The input modality to use. Must be one of "history", "text", or "tokens". + input_key: The key for the input data. If None, defaults to the input_mode name. + attention_mask_key: The key for attention masks (used in "tokens" mode). + generate: Whether to enable text generation. + generate_kwargs: Additional arguments to pass to the model's generate method. + tokenizer_kwargs: Additional arguments to pass to the tokenizer. + pad_output: Whether to pad the output sequences to a uniform length. + inplace: Determines how the module should handle in-place operations. + device: The device to use for computation. + layout: The layout to use for the output tensors when pad_output=False. + num_samples: The number of samples to generate. + log_probs_key (NestedKey | None, optional): The key for the log probabilities :class:`~torchrl.modules.llm.policies.LogProbs` object. Defaults to `"log_probs"`. + text_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.Text` object. Defaults to `"text"`. + tokens_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.Tokens` object. Defaults to `"tokens"`. + masks_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.Masks` object. Defaults to `"masks"`. + + Attributes: + collector: The collector associated with the module, if it exists. + + .. seealso:: + - :class:`~torchrl.modules.llm.policies.TransformersWrapper` (see :ref:`ref_transformers_wrapper`) + - :class:`~torchrl.modules.llm.policies.vLLMWrapper` (see :ref:`ref_vllm_wrapper`) """ generate: bool + pad_output: bool + text_key: NestedKey + tokens_key: NestedKey + masks_key: NestedKey + log_probs_key: NestedKey + in_keys: list[NestedKey] + out_keys: list[NestedKey] + inplace: bool + device: torch.device | None + layout: torch.layout | None + num_samples: int | None + + @overload + def __init__( + self, + model: Any | str, + *, + tokenizer: callable | str | None = None, # type: ignore + input_mode: str = "history", + input_key: NestedKey | None = None, + attention_mask_key: str = "attention_mask", + generate: bool = True, + generate_kwargs: dict | None = None, + tokenizer_kwargs: dict | None = None, + pad_output: bool = False, + inplace: Literal[True, False, "empty"] | None = None, + device: torch.device | None = None, + layout: torch.layout | None = None, + num_samples: int | None = None, + chat_template_name: Literal["chatml_format", "qwen"] | None = None, + chat_template: str | None = None, + return_log_probs: bool | None = None, + history_key: NestedKey | None = "history", + text_key: NestedKey | None = "text", + tokens_key: NestedKey | None = "tokens", + masks_key: NestedKey | None = "masks", + log_probs_key: NestedKey | None = "log_probs", + ): + ... + + def __init__(self, *args, **kwargs): + super().__init__() + + def get_new_version(self, **kwargs): + """Returns a new version of the module with altered parameters. + + For instance, the generate parameter can be altered to enable text generation or log-probabilities computation. + This is especially useful when one wants to avoid re-initializing the module with a new set of parameters, when the + same parameters could be used to gather log-probs. + + Positional arguments are not supported. + + See the class constructor for more details about the parameters. + """ + raise NotImplementedError + + _collector: weakref.ReferenceType[ + LLMCollector # noqa: F821 # type: ignore + ] | None = None + + def register_collector(self, collector: LLMCollector): # noqa: F821 # type: ignore + """Registers a weak reference to the container collector. + + This is automatically called by the :class:`~torchrl.collectors.llm.LLMCollector` class. + """ + self._collector = weakref.ref(collector) + + @property + def collector(self) -> LLMCollector | None: # noqa: F821 # type: ignore + """Returns the collector associated with the module, if it exists.""" + return self._collector() if self._collector is not None else None def get_dist( self, tensordict: TensorDictBase, tensordict_out: TensorDictBase | None = None, + logits_key: NestedKey = "logits", + mask_key: NestedKey | None = None, as_padded_tensor: bool | None = None, as_nested_tensor: bool | None = None, padding_value: float | None = None, - padding_side: str = "right", + padding_side: str = "left", layout: torch.layout | None = None, **kwargs, ) -> D.Distribution: + """Get distribution from logits/log-probs with optional masking. + + Args: + tensordict: Input tensordict + tensordict_out: Output tensordict (optional) + logits_key: Key for logits/log-probs + mask_key: Key for mask (optional). + as_padded_tensor: Whether to return padded tensor. Default is False. + as_nested_tensor: Whether to return nested tensor. Default is False. + padding_value: Value for padding. Default is 0.0 for logits and False for masks. + padding_side: Side for padding. Default is left by convention. + layout: Tensor layout + **kwargs: Additional arguments + + Returns: + Distribution (Categorical or LLMMaskedCategorical) + """ + if self.generate: + raise NotImplementedError( + "get_dist is not implemented for generate=True. " + "You can create a new version of this wrapper using the `get_new_version` method." + ) + td_out = self(tensordict.copy()) - # By default, pad and use masked categorical + + # Get logits/log-probs if as_padded_tensor is None: as_padded_tensor = as_nested_tensor is not True if padding_value is None: padding_value = 0.0 if as_nested_tensor is None: as_nested_tensor = False + logits = td_out.get( - "logits", + logits_key, as_padded_tensor=as_padded_tensor, as_nested_tensor=as_nested_tensor, padding_value=padding_value, padding_side=padding_side, layout=layout, ) - if as_padded_tensor: - # We can use MaskedCategorical - dist = MaskedCategorical( + + # Get mask if provided + mask = None + if mask_key is not None: + mask = td_out.get( + mask_key, + as_padded_tensor=as_padded_tensor, + as_nested_tensor=as_nested_tensor, + padding_value=False, + padding_side=padding_side, + layout=layout, + ) + elif as_padded_tensor: + # Default mask for padded tensors + mask = logits != padding_value + + if mask is not None: + return LLMMaskedCategorical( logits=logits, - mask=logits != padding_value, - use_cross_entropy=True, + mask=mask, ) - return dist return Categorical(logits) + def _get_dist_with_prompt_mask( + self, + tensordict: TensorDictBase, + tokens_key: NestedKey = ("tokens", "prompt"), + logits_key: NestedKey = "logits", + # TODO: add a prompt_mask and response_mask in Masks + assistant_mask_key: NestedKey = ("masks", "all_assistant_mask"), + attention_mask_key: NestedKey = ("masks", "all_attention_mask"), + padding_side: str = "left", + **kwargs, + ) -> D.Distribution: + """Get distribution masked to only include response tokens (exclude prompt). + + This is suitable for single-turn scenarios where we want to compute loss + only on the generated response, not the input prompt. + + Note: If prompt tokens are not available (e.g., when using history input), + this method falls back to using the assistant mask. + + Padding side is left by convention. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + if self.generate: + raise NotImplementedError( + "get_dist_with_prompt_mask is not implemented for generate=True. " + "You can create a new version of this wrapper using the `get_new_version` method." + ) + td_out = self(tensordict.copy()) + + # Try to get prompt tokens first + if self.pad_output: + prompt_tokens = tensordict.get( + tokens_key, + as_padded_tensor=True, + padding_value=-100, + padding_side=padding_side, + ) + logits = td_out.get( + logits_key, + as_padded_tensor=True, + padding_value=0.0, + padding_side=padding_side, + ) + attention_mask = tensordict.get( + attention_mask_key, + as_padded_tensor=True, + padding_value=False, + padding_side=padding_side, + ) + assistant_mask = tensordict.get( + assistant_mask_key, + as_padded_tensor=True, + padding_value=False, + padding_side=padding_side, + ) + else: + prompt_tokens = tensordict.get(tokens_key, as_list=True) + logits = td_out.get(logits_key, as_list=True) + attention_mask = td_out.get(attention_mask_key, as_list=True) + assistant_mask = td_out.get(assistant_mask_key, as_list=True) + + if prompt_tokens is None: + if assistant_mask is None: + raise ValueError( + f"Assistant mask not found in tensordict at key {assistant_mask_key} (keys: {td_out.keys()})" + ) + if self.pad_output: + response_mask = assistant_mask.clone() + else: + response_mask = [am.clone() for am in assistant_mask] + else: + if self.pad_output: + response_mask = attention_mask.clone() + response_mask[..., : prompt_tokens.shape[-1]] = False + else: + response_mask = [] + for am, p in _zip_strict(attention_mask, prompt_tokens): + am = am.clone() + am[..., : p.size(-1)] = False + response_mask.append(am) + + if logits is None: + raise ValueError( + f"Logits not found in tensordict at key {logits_key} (keys: {td_out.keys()})" + ) + + # Make the response mask using prompt tokens + if not self.pad_output: + # Check that the lengths of the mask is the same as the logits + for m, lg in _zip_strict(response_mask, logits): + if m.shape[-1] != lg.shape[-2]: + raise ValueError( + f"Mask and logits have different lengths: {m.shape[-1]} != {lg.shape[-2]}.\n" + f"All the logits shapes: {[lg.shape for lg in logits]}, all the mask shapes: {[m.shape for m in response_mask]}" + ) + logits = pad_sequence( + logits, batch_first=True, padding_value=0.0, padding_side=padding_side + ) + response_mask = pad_sequence( + response_mask, + batch_first=True, + padding_value=False, + padding_side=padding_side, + ) + + return LLMMaskedCategorical( + logits=logits, + mask=response_mask.bool(), + ) + + def _get_dist_with_assistant_mask( + self, + tensordict: TensorDictBase, + assistant_mask_key: NestedKey = ("masks", "all_assistant_mask"), + logits_key: NestedKey = "logits", + padding_side: str = "left", + **kwargs, + ) -> D.Distribution: + """Get distribution masked to only include assistant tokens. + + This is suitable for multi-turn scenarios where we want to compute loss + only on assistant-generated tokens across the entire conversation. + + Padding side is left by convention. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + if self.generate: + raise NotImplementedError( + "get_dist_with_assistant_mask is not implemented for generate=True. " + "You can create a new version of this wrapper using the `get_new_version` method." + ) + td_out = self(tensordict.copy()) + # Update the tokens key to reflect the tokenized history when querying the log-probs + tensordict.update( + td_out, + keys_to_update=[ + ("tokens", "full"), + ], + ) + + if self.pad_output: + logits = td_out.get(logits_key) + assistant_mask = td_out.get(assistant_mask_key) + else: + logits = td_out.get( + logits_key, + as_padded_tensor=True, + padding_value=0.0, + padding_side=padding_side, + ) + assistant_mask = td_out.get( + assistant_mask_key, + as_padded_tensor=True, + padding_value=False, + padding_side=padding_side, + ) + if logits is None: + raise ValueError(f"Logits not found in tensordict at key {logits_key}") + if assistant_mask is None: + if self.input_mode != "history": + post_msg = "This is likely because the input_mode is not 'history'." + else: + post_msg = "" + raise ValueError( + f"Assistant mask not found in tensordict at key {assistant_mask_key}. {post_msg}" + ) + + return LLMMaskedCategorical( + logits=logits, + mask=assistant_mask, + ) + + def _get_dist_with_attention_mask( + self, + tensordict: TensorDictBase, + attention_mask_key: NestedKey = ("masks", "all_attention_mask"), + logits_key: NestedKey = "logits", + padding_side: str = "left", + **kwargs, + ) -> D.Distribution: + """Get distribution masked using attention mask. + + This is suitable for generic scenarios where we want to compute loss + on all valid tokens (non-padding tokens). + + Padding side is left by convention. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + if self.generate: + raise NotImplementedError( + "get_dist_with_attention_mask is not implemented for generate=True. " + "You can create a new version of this wrapper using the `get_new_version` method." + ) + td_out = self(tensordict.copy()) + if self.pad_output: + logits = td_out.get(logits_key) + attention_mask = td_out.get(attention_mask_key) + else: + logits = td_out.get( + logits_key, + as_padded_tensor=True, + padding_value=0.0, + padding_side=padding_side, + ) + attention_mask = td_out.get( + attention_mask_key, + as_padded_tensor=True, + padding_value=False, + padding_side=padding_side, + ) + + if logits is None: + raise ValueError(f"Logits not found in tensordict at key {logits_key}") + if attention_mask is None: + raise ValueError( + f"Attention mask not found in tensordict at key {attention_mask_key}" + ) + + return LLMMaskedCategorical( + logits=logits, + mask=attention_mask, + ) + + def _get_dist_with_custom_mask( + self, + tensordict: TensorDictBase, + mask: torch.Tensor, + logits_key: NestedKey = "logits", + padding_side: str = "left", + **kwargs, + ) -> D.Distribution: + """Get distribution with custom mask. + + This allows for completely custom masking logic. + + Padding side is left by convention. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + if self.generate: + raise NotImplementedError( + "get_dist_with_custom_mask is not implemented for generate=True. " + "You can create a new version of this wrapper using the `get_new_version` method." + ) + td_out = self(tensordict.copy()) + if self.pad_output: + logits = td_out.get(logits_key) + else: + logits = td_out.get( + logits_key, + as_padded_tensor=True, + padding_value=0.0, + padding_side=padding_side, + ) + + if logits is None: + raise ValueError(f"Logits not found in tensordict at key {logits_key}") + + return LLMMaskedCategorical( + logits=logits, + mask=mask, + ) + + # Convenience methods for common LLM training scenarios + def _get_sft_dist(self, tensordict: TensorDictBase, **kwargs) -> D.Distribution: + """Get distribution suitable for SFT loss (response tokens only). + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + return self._get_dist_with_prompt_mask(tensordict, **kwargs) + + def _get_rlhf_dist(self, tensordict: TensorDictBase, **kwargs) -> D.Distribution: + """Get distribution suitable for RLHF loss (assistant tokens only). + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + return self._get_dist_with_assistant_mask(tensordict, **kwargs) + + def _get_generic_dist(self, tensordict: TensorDictBase, **kwargs) -> D.Distribution: + """Get distribution suitable for generic losses (all tokens). + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + return self._get_dist_with_attention_mask(tensordict, **kwargs) + # Sampling is taken care of by the sub-modules forward = TensorDictSequential.forward + def _check_padded(self, val: torch.Tensor) -> torch.Tensor: + """Check that a value is a padded tensor.""" + assert isinstance( + val, torch.Tensor + ), f"val must be torch.Tensor, got {type(val)}" + if not isinstance(val, torch.Tensor): + raise ValueError("Not a padded tensor") + return val + + def _check_not_padded( + self, val: list[torch.Tensor] | torch.Tensor + ) -> list[torch.Tensor] | torch.Tensor: + """Check that a value is not a padded tensor (i.e., a list of tensors).""" + if isinstance(val, torch.Tensor): + raise ValueError("Expected a list of tensors - not padded, got a tensor") + return val + @property def log_prob_keys(self) -> list[NestedKey]: return getattr(self, "_log_prob_keys", ["log_probs"]) @@ -69,14 +811,6 @@ def log_prob_keys(self) -> list[NestedKey]: def log_prob_keys(self, value: list[NestedKey]): self._log_prob_keys = value - @property - def log_prob_key(self) -> NestedKey: - return self.log_prob_keys[0] - - @log_prob_key.setter - def log_prob_key(self, value: NestedKey) -> None: - self.log_prob_keys[0] = value - @property def dist_params_keys(self) -> list[NestedKey]: raise NotImplementedError @@ -88,5 +822,5 @@ def dist_sample_keys(self) -> list[NestedKey]: def log_prob(self, data: TensorDictBase, **get_kwargs) -> TensorDictBase: if not self.generate: data = self(data) - return data.get(self.log_prob_key, **get_kwargs) + return data.get((self.log_prob_key, "response"), **get_kwargs) raise RuntimeError("log_prob not callable when generate=True.") diff --git a/torchrl/modules/llm/policies/transformers_wrapper.py b/torchrl/modules/llm/policies/transformers_wrapper.py index 98b9f8aae64..c71cf0616dc 100644 --- a/torchrl/modules/llm/policies/transformers_wrapper.py +++ b/torchrl/modules/llm/policies/transformers_wrapper.py @@ -4,180 +4,290 @@ # LICENSE file in the root directory of this source tree. from __future__ import annotations -from copy import copy +import contextlib +from contextlib import nullcontext +from copy import copy from typing import Literal import torch from tensordict import ( lazy_stack, - LazyStackedTensorDict, - NestedKey, + MetaData, + NonTensorStack, set_list_to_stack, TensorDict, TensorDictBase, ) -from tensordict.utils import _zip_strict +from tensordict.utils import _zip_strict, NestedKey +from torch import distributions as D from torch.nn.utils.rnn import pad_sequence -from torchrl.modules.llm.policies.common import CategoricalSequential +from torchrl.modules.llm.policies.common import ( + ChatHistory, + LLMWrapperBase, + LogProbs, + Masks, + Text, + Tokens, +) from torchrl.modules.utils.utils import _unpad_tensors -class TransformersWrapper(CategoricalSequential): +class TransformersWrapper(LLMWrapperBase): """A wrapper class for Hugging Face Transformers models, providing a consistent interface for text generation and log probability computation. - This class handles both text and token inputs, enabling text generation and log probability computation based on - the specified configuration. Unlike vLLM, Transformers require padded tensors for input and output sequences. + This class is a subclass of :class:`~torchrl.modules.llm.policies.LLMWrapperBase` and provides a unified API for handling different input modalities + (history, text, tokens) with consistent output structure using :class:`~tensordict.TensorClass` objects. Args: - model (transformers.LLM): The Hugging Face Transformers model to wrap. + model (transformers.AutoModelForCausalLM | str): The Hugging Face Transformers model to wrap. + If a string, it will be passed to `transformers.AutoModelForCausalLM.from_pretrained`. Keyword Args: - return_log_probs (bool | None, optional): Whether to return log probabilities of the generated tokens. - Defaults to `None`. - tokenizer (transformers.tokenization_utils.PreTrainedTokenizer | None, optional): The tokenizer to use for - encoding and decoding text. If `None`, the tokenizer associated with the model will be used. Defaults to - `None`. - from_text (bool, optional): Indicates whether the input is in text format. If `True`, the input is expected to - be text that will be tokenized. If `False`, the input is expected to be token sequences. Defaults to `True`. - - .. note:: If `from_text` is `True`, the input text can be provided in the `"text"` key or in the `"history"` key. - If using the `"history"` key, the history will be parsed from a :class:`~torchrl.data.llm.History` object to a - text string using the tokenizer. - - device (torch.device | None, optional): The device to use for computation. If `None`, the default device will - be used. Defaults to `None`. - generate (bool, optional): Whether to enable text generation. If `True`, the model will generate text based on - the input. If `False`, only log probabilities will be computed for the response tokens/text. Defaults to `True`. - generate_kwargs (dict | None, optional): Additional arguments to pass to the model's generate method. These - arguments can control aspects of the generation process, such as temperature and top-k sampling. Defaults - to `None`. - - .. note:: Sampling params can be overwritten at runtime using the kwargs of the forward method. - See `the full list of accepted keyword arguments here `__. - - tokenizer_kwargs (dict | None, optional): Additional arguments to pass to the tokenizer. These arguments can - control aspects of the tokenization process, such as padding and truncation. Defaults to `None`. - pad_output (bool, optional): Whether to pad the output sequences to a uniform length. Transformers require - `pad_output=True`, and the output sequences will be padded and represented as tensors. Defaults to `True`. - inplace (Literal[True, False, "empty"] | None, optional): Determines how the module should handle in-place - operations. If `True`, operations will be performed in-place. If `False`, a new TensorDict instance will be - created. If `"empty"`, the output data structure will be initialized with `input.empty()` (i.e., it will - conserve type, batch-size, and device). Defaults to `True`. - chat_template_name (Literal["chatml_format", "qwen"] | None, optional): The name of the chat template to use when - applying the chat template to the history. Defaults to `None`. + tokenizer (transformers.tokenization_utils.PreTrainedTokenizer | str | None, optional): The tokenizer to use for + encoding and decoding text. If `None`, the tokenizer associated with the model will be used. + If a string, it will be passed to `transformers.AutoTokenizer.from_pretrained`. Defaults to `None`. + input_mode (str, optional): The input modality to use. Must be one of `"history"`, `"text"`, or `"tokens"`. + Defaults to `"history"`. + input_key (str | None, optional): The key for the input data. If `None`, defaults to + - `("history", "prompt")` for `"history"` when `generate=True`, `("history", "full")` for `"history"` when `generate=False` + - `("text", "prompt")` for `"text"` when `generate=True`, `("text", "full")` for `"text"` when `generate=False` + - `("tokens", "prompt")` for `"tokens"` when `generate=True`, `("tokens", "full")` for `"tokens"` when `generate=False` + attention_mask_key (str, optional): The key for attention masks (used in `"tokens"` mode). Defaults to `"attention_mask"`. + + .. warning:: This argument is under development and may change in the future. + + generate (bool, optional): Whether to enable text generation. If `True`, the model will generate text based on the input. + If `False`, only log probabilities will be computed. Defaults to `True`. + return_log_probs (bool, optional): Whether to return log probabilities. Defaults to `False`. + generate_kwargs (dict | None, optional): Additional arguments to pass to the model's generate method. Defaults to `None`. + tokenizer_kwargs (dict | None, optional): Additional arguments to pass to the tokenizer. Defaults to `None`. + pad_output (bool, optional): Whether to pad the output sequences to a uniform length. Transformers require `pad_output=True`, and the output + sequences will be padded and represented as tensors. Defaults to `False`. + inplace (Literal[True, False, "empty"] | None, optional): Determines how the module should handle in-place operations. Defaults to `True`. + device (torch.device | None, optional): The device to use for computation. Defaults to `None`. + layout (torch.layout | None, optional): The layout to use for the output tensors when `pad_output=False`. Defaults to `torch.strided`. + num_samples (int | None, optional): The number of samples to generate. Defaults to `None` (one sample, and no batch-dimension for it). + Can also be set via the `generate_kwargs["num_return_sequences"] = value` argument. Requires the "do_sample" argument to be set to `True` in `generate_kwargs`. + chat_template_name (Literal["chatml_format", "qwen"] | None, optional): The name of the chat template to use when applying the chat + template to the history. Defaults to `None`. For `input_mode="history"` only. chat_template (str | None, optional): The chat template to use when applying the chat template to the history. - Defaults to `None`. - - .. note:: The tokenizer is used when `from_text` is `True` to convert input text into token sequences. It is also - required (or retrieved) when `pad_output` is `True` or when using text inputs with `generate=False` to ensure proper - tokenization and padding. + Defaults to `None`. For `input_mode="history"` only. + log_probs_key (NestedKey | None, optional): The key for the log probabilities :class:`~torchrl.modules.llm.policies.LogProbs` object. Defaults to `"log_probs"`. + text_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.Text` object. Defaults to `"text"`. + tokens_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.Tokens` object. Defaults to `"tokens"`. + masks_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.Masks` object. Defaults to `"masks"`. + history_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.ChatHistory` object. Defaults to `"history"`. Input Keys: - - - If `from_text` is `True`: - - - `"text"`: The input text to be tokenized. - - `"text_response"`: the response text (if `generate=False` as the log probabilities are computed for the response.) - - - If `from_text` is `False`: - - - "tokens": The input token sequences. - - "attention_mask": The attention mask for the tokens. - - "tokens_response": The response token sequences (if `generate=False` as the log probabilities are - computed for the response.) + The input key depends on both `input_mode` and `generate`: + - If `input_mode="history"` and `generate=True`: `input_key` (defaults to `("history", "prompt")`) + - If `input_mode="history"` and `generate=False`: `input_key` (defaults to `("history", "full")`) + - If `input_mode="text"` and `generate=True`: `input_key` (defaults to `("text", "prompt")`) + - If `input_mode="text"` and `generate=False`: `input_key` (defaults to `("text", "full")`) + - If `input_mode="tokens"` and `generate=True`: `input_key` (defaults to `("tokens", "prompt")`) + - If `input_mode="tokens"` and `generate=False`: `input_key` (defaults to `("tokens", "full")`) Output Keys: - - - `"tokens_response"`: The generated token sequences. - - `"log_probs"`: The log probabilities of the generated tokens (if `return_log_probs` is `True`). - - `"text_response"`: The generated text (if `from_text` is `True` and `generate` is `True`). + The output keys are automatically determined based on the input_mode: + - **Tokens**: Always returned (`tokens_key`, defaults to `"tokens"`) + - **Text**: Returned for `"text"` and `"history"` modes (`text_key`, defaults to `"text"`) + - **History**: Returned only for `"history"` mode (`history_key`, defaults to `"history"`) + - **Masks**: Always returned (`masks_key`, defaults to `"masks"`) + - **Log Probs**: Returned when `return_log_probs=True` (`log_probs_key`, defaults to `"log_probs"`) + + Example output structure for `input_mode="history"`: + ``` + TensorDict( + text=Text(prompt=..., response=..., full=...), + masks=Masks(all_attention_mask=..., all_assistant_mask=...), + tokens=Tokens(prompt=..., response=..., full=...), + log_probs=LogProbs(prompt=..., response=..., full=...), + history=ChatHistory(prompt=..., response=..., full=...) + ) + ``` Example: >>> from transformers import AutoModelForCausalLM, AutoTokenizer + >>> from torchrl.data.llm import History + >>> from torchrl.modules.llm.policies import ChatHistory + >>> >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> + >>> # History input (recommended for RL environments) >>> wrapper = TransformersWrapper( ... model, ... tokenizer=tokenizer, - ... from_text=True, - ... generate=True + ... input_mode="history", + ... generate=True, + ... return_log_probs=True ... ) - >>> input_data = TensorDict({"text": ["Hello, world!", "This is another text"]}, batch_size=1) - >>> output_data = wrapper(input_data) - >>> print(output_data["text_response"]) - - .. seealso:: :func:`~torchrl.modules.vLLMWrapper` for a similar interface using vLLM. - + >>> + >>> history = History.from_chats([[ + ... {"role": "user", "content": "Hello"}, + ... {"role": "assistant", "content": "Hi there!"} + ... ]]) + >>> chat_history = ChatHistory(prompt=history) + >>> result = wrapper(TensorDict(history=chat_history, batch_size=(1,))) + >>> print(result["text"].response) # Generated text + >>> print(result["log_probs"].response) # Log probabilities + >>> print(result["history"].response) # History with response + + Attributes: + collector: The collector associated with the module, if it exists. + + .. seealso:: + - :class:`~torchrl.modules.llm.policies.LLMWrapperBase` (see :ref:`ref_categorical_sequential`) + - :class:`~torchrl.modules.llm.policies.vLLMWrapper` (see :ref:`ref_vllm_wrapper`) """ - text_key: NestedKey = ("text",) - history_key: NestedKey = ("history",) - token_key: NestedKey = ("tokens",) - token_response_key: NestedKey = ("tokens_response",) - text_response_key: NestedKey = ("text_response",) - attention_mask_key: NestedKey = ("attention_mask",) - def __init__( self, - model: transformers.LLM, # noqa - # noqa + model, *, - return_log_probs: bool | None = None, - tokenizer: transformers.tokenization_utils.PreTrainedTokenizer # noqa - | None = None, - # noqa - from_text: bool = True, - device: torch.device | None = None, + tokenizer=None, + input_mode: str = "history", + input_key: str | None = None, + attention_mask_key: str = "attention_mask", generate: bool = True, generate_kwargs: dict | None = None, tokenizer_kwargs: dict | None = None, - pad_output: bool = True, - inplace: Literal[True, False, "empty"] | None = True, + pad_output: bool = False, + inplace: Literal[True, False, "empty"] | None = None, + device: torch.device | None = None, + layout: torch.layout | None = None, + num_samples: int | None = None, chat_template_name: Literal["chatml_format", "qwen"] | None = None, chat_template: str | None = None, + return_log_probs: bool | None = None, + history_key: NestedKey | None = "history", + text_key: NestedKey | None = "text", + tokens_key: NestedKey | None = "tokens", + masks_key: NestedKey | None = "masks", + log_probs_key: NestedKey | None = "log_probs", ): super().__init__() + if isinstance(model, str): + from transformers import AutoModelForCausalLM + + model = AutoModelForCausalLM.from_pretrained(model) + + if isinstance(tokenizer, str): + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained(tokenizer) + + # Validate input_mode + if input_mode not in ["history", "text", "tokens"]: + raise ValueError( + f"input_mode must be one of 'history', 'text', 'tokens'. Got '{input_mode}'" + ) + self.model = model - self.from_text = from_text - if device is not None: - device = torch.device(device) - self._device = device + self.input_mode = input_mode + self.attention_mask_key = attention_mask_key self.generate = generate - self.inplace = inplace + + # Auto-determine what to return based on input mode + self.return_history = input_mode in ("history",) + self.return_text = input_mode in ("text", "history") + self.return_tokens = input_mode in ("tokens", "history", "text") + self.return_masks = True + if return_log_probs is False and not generate: + raise ValueError("return_log_probs must be True when generate=False.") + return_log_probs = ( + True + if (return_log_probs is None and generate) or (not generate) + else bool(return_log_probs) + ) + self.return_log_probs = return_log_probs + + self.history_key = history_key + self.text_key = text_key + self.tokens_key = tokens_key + self.masks_key = masks_key + self.log_probs_key = log_probs_key + if not isinstance(pad_output, bool): + raise ValueError("pad_output must be a boolean") self.pad_output = pad_output + self._device = device + if not pad_output and layout is None: + layout = torch.strided + self.layout = layout padding_value = None - self.chat_template_name = chat_template_name - self.chat_template = chat_template + # Auto-determine input_key if not provided + + # Set input keys based on mode and generate parameter + if input_mode == "history": + if generate: + self.in_keys = [ + ("history", "prompt") if input_key is None else input_key + ] + else: + self.in_keys = [("history", "full") if input_key is None else input_key] + elif input_mode == "text": + if generate: + self.in_keys = [("text", "prompt") if input_key is None else input_key] + else: + self.in_keys = [("text", "full") if input_key is None else input_key] + elif input_mode == "tokens": + if generate: + self.in_keys = [ + ("tokens", "prompt") if input_key is None else input_key + ] + else: + self.in_keys = [("tokens", "full") if input_key is None else input_key] + self.input_key = self.in_keys[0] + + # Set output keys based on auto-determined return flags + self.out_keys = [] + if self.return_text: + self.out_keys.append(self.text_key) + if self.return_masks: + self.out_keys.append(self.masks_key) + if self.return_tokens: + self.out_keys.append(self.tokens_key) + if self.return_log_probs: + self.out_keys.append(self.log_probs_key) + if self.return_history: + self.out_keys.append(self.history_key) + + # Tokenizer setup if not tokenizer_kwargs: tokenizer_kwargs = {} + else: + tokenizer_kwargs = dict(tokenizer_kwargs) if not tokenizer_kwargs.setdefault("return_attention_mask", True): - raise RuntimeError - - # If we don't pad, we use lists - if not self.pad_output: - raise NotImplementedError("transformers requires `pad_output=True`.") - if tokenizer_kwargs.setdefault("return_tensors", "pt") != "pt": - raise RuntimeError - if tokenizer_kwargs.setdefault("padding", self.pad_output) not in ( - self.pad_output, - ): - raise RuntimeError + raise RuntimeError("return_attention_mask must be True") + + # We always pad, so we always return tensors + return_tensors = "pt" + tokenizer_kwargs.setdefault("padding", True) + if return_tensors: + if ( + tokenizer_kwargs.setdefault("return_tensors", return_tensors) + != return_tensors + ): + raise RuntimeError + + # We always pad atm if tokenizer_kwargs.setdefault("padding_side", "left") != "left": raise RuntimeError self.tokenizer_kwargs = tokenizer_kwargs - if (pad_output or (from_text and not generate)) and tokenizer is None: - # We need a tokenizer if we pad or when using text inputs with generate=False - # The latter case is due to the fact that we want the log-probs for the response only, - # but if the response is presented as a text we have to tokenize the whole prompt + response and - # identify where the prompt ends and where the response starts. + + # Get tokenizer if needed + if ( + pad_output or (input_mode in ["text", "history"] and not generate) + ) and tokenizer is None: tokenizer = model.get_tokenizer() self.tokenizer = tokenizer - if tokenizer is not None and ( + + if self.tokenizer is not None and ( not hasattr(self.tokenizer, "pad_token") or self.tokenizer.pad_token is None ): self.tokenizer.pad_token = self.tokenizer.eos_token @@ -185,40 +295,173 @@ def __init__( padding_value = self.tokenizer(self.tokenizer.pad_token)["input_ids"][0] self.padding_value = padding_value + # Generate kwargs setup if generate_kwargs is None: generate_kwargs = {} else: generate_kwargs = dict(generate_kwargs) - if not generate: - # TODO - if return_log_probs in (None, True): - return_log_probs = True - else: + self.num_samples = num_samples + if ( + generate_kwargs.get("num_return_sequences", 1) > 1 + or num_samples is not None + ): + if inplace in (True, "empty"): raise ValueError( - "return_log_probs must be True or None when generate=False." + "inplace must be False (or None) when generating more than one sample." ) - elif return_log_probs in (None, False): - return_log_probs = False - self.return_log_probs = return_log_probs + if inplace is None: + inplace = False + if ( + generate_kwargs.get("num_return_sequences", 1) > 1 + and num_samples is not None + and generate_kwargs.get("num_return_sequences", 1) != num_samples + ): + raise ValueError("num_samples differs from generate_kwargs['n'].") + elif num_samples is None: + self.num_samples = generate_kwargs.get("num_return_sequences", 1) + generate_kwargs["num_return_sequences"] = self.num_samples + elif inplace is None: + inplace = True + + self.inplace = inplace + + if not generate: + # We want only the log-probs, we generate a single token (that we then discard) + # and retrieve the prompt log-probs + generate_kwargs["max_tokens"] = 1 generate_kwargs.setdefault("tokenizer", self.tokenizer) generate_kwargs.setdefault("output_logits", self.return_log_probs) generate_kwargs.setdefault("return_dict_in_generate", True) - if not generate: - generate_kwargs.setdefault("return_dict_in_generate", True) self.generate_kwargs = generate_kwargs - if from_text: - self.in_keys = [self.text_key] - else: - self.in_keys = [self.token_key, self.attention_mask_key] - self.out_keys = [self.token_response_key] - if from_text: - self.out_keys += [self.text_response_key, self.token_key] - if self.return_log_probs: - self.out_keys += [self.log_prob_key, "logits"] + # Additional transformers-specific settings + self.chat_template_name = chat_template_name + self.chat_template = chat_template + + # Flag to track when we're in a get_dist call + self._in_get_dist_call = False + + def get_new_version(self, **kwargs): + """Returns a new version of the module with altered parameters. + + For instance, the generate parameter can be altered to enable text generation or log-probabilities computation. + This is especially useful when one wants to avoid re-initializing the module with a new set of parameters, when the + same parameters could be used to gather log-probs. + + Positional arguments are not supported. + + See the class constructor for more details about the parameters. + """ + # Build the constructor arguments by using current values for missing parameters + constructor_kwargs = {} + + # Model is always required + constructor_kwargs["model"] = kwargs.get("model", self.model) + + # Check for each parameter and use current value if not provided + if "tokenizer" in kwargs: + constructor_kwargs["tokenizer"] = kwargs["tokenizer"] + elif hasattr(self, "tokenizer"): + constructor_kwargs["tokenizer"] = self.tokenizer + + if "input_mode" in kwargs: + constructor_kwargs["input_mode"] = kwargs["input_mode"] + elif hasattr(self, "input_mode"): + constructor_kwargs["input_mode"] = self.input_mode + + if "input_key" in kwargs: + constructor_kwargs["input_key"] = kwargs["input_key"] + elif hasattr(self, "input_key"): + constructor_kwargs["input_key"] = self.input_key + + if "attention_mask_key" in kwargs: + constructor_kwargs["attention_mask_key"] = kwargs["attention_mask_key"] + elif hasattr(self, "attention_mask_key"): + constructor_kwargs["attention_mask_key"] = self.attention_mask_key + + if "generate" in kwargs: + constructor_kwargs["generate"] = kwargs["generate"] + elif hasattr(self, "generate"): + constructor_kwargs["generate"] = self.generate + + if "generate_kwargs" in kwargs: + constructor_kwargs["generate_kwargs"] = kwargs["generate_kwargs"] + elif hasattr(self, "generate_kwargs"): + constructor_kwargs["generate_kwargs"] = self.generate_kwargs + + if "pad_output" in kwargs: + constructor_kwargs["pad_output"] = kwargs["pad_output"] + elif hasattr(self, "pad_output"): + constructor_kwargs["pad_output"] = self.pad_output + + if "tokenizer_kwargs" in kwargs: + constructor_kwargs["tokenizer_kwargs"] = kwargs["tokenizer_kwargs"] + elif hasattr(self, "tokenizer_kwargs"): + constructor_kwargs["tokenizer_kwargs"] = self.tokenizer_kwargs + if ( + "pad_output" in kwargs + and kwargs.get("pad_output") + != constructor_kwargs["tokenizer_kwargs"]["padding"] + ): + constructor_kwargs["tokenizer_kwargs"]["padding"] = kwargs.get( + "pad_output" + ) + + if "inplace" in kwargs: + constructor_kwargs["inplace"] = kwargs["inplace"] + elif hasattr(self, "inplace"): + constructor_kwargs["inplace"] = self.inplace + + if "device" in kwargs: + constructor_kwargs["device"] = kwargs["device"] + elif hasattr(self, "_device"): + constructor_kwargs["device"] = self._device + + if "layout" in kwargs: + constructor_kwargs["layout"] = kwargs["layout"] + elif hasattr(self, "layout"): + constructor_kwargs["layout"] = self.layout + + if "num_samples" in kwargs: + constructor_kwargs["num_samples"] = kwargs["num_samples"] + elif hasattr(self, "num_samples"): + constructor_kwargs["num_samples"] = self.num_samples + + if "chat_template_name" in kwargs: + constructor_kwargs["chat_template_name"] = kwargs["chat_template_name"] + elif hasattr(self, "chat_template_name"): + constructor_kwargs["chat_template_name"] = self.chat_template_name + + if "chat_template" in kwargs: + constructor_kwargs["chat_template"] = kwargs["chat_template"] + elif hasattr(self, "chat_template"): + constructor_kwargs["chat_template"] = self.chat_template + + if "text_key" in kwargs: + constructor_kwargs["text_key"] = kwargs["text_key"] + elif hasattr(self, "text_key"): + constructor_kwargs["text_key"] = self.text_key + + if "tokens_key" in kwargs: + constructor_kwargs["tokens_key"] = kwargs["tokens_key"] + elif hasattr(self, "tokens_key"): + constructor_kwargs["tokens_key"] = self.tokens_key + + if "masks_key" in kwargs: + constructor_kwargs["masks_key"] = kwargs["masks_key"] + elif hasattr(self, "masks_key"): + constructor_kwargs["masks_key"] = self.masks_key + + if "log_probs_key" in kwargs: + constructor_kwargs["log_probs_key"] = kwargs["log_probs_key"] + elif hasattr(self, "log_probs_key"): + constructor_kwargs["log_probs_key"] = self.log_probs_key + + # Create and return new instance + return type(self)(**constructor_kwargs) @set_list_to_stack(True) def forward( @@ -251,223 +494,645 @@ def forward( else: cfg = None - out = LazyStackedTensorDict( - *[ + if self.num_samples is not None: + out = ( TensorDict( - device=tensordict.device, batch_size=tensordict.batch_size[1:] + device=tensordict.device, + batch_size=( + tensordict.batch_size[0], + self.num_samples, + *tensordict.batch_size[1:], + ), ) - for _ in range(tensordict.shape[0]) - ] - ) - if self.from_text: + .to_lazystack(1) + .to_lazystack(0) + ) + else: + out = TensorDict( + device=tensordict.device, batch_size=tensordict.batch_size + ).to_lazystack(0) + + if self.input_mode == "history": if self.generate: - out = self._from_transformers_generate_text( - tensordict, out=out, cfg=cfg - ) + out = self._from_transformers_generate_history(tensordict, cfg, out) else: - out = self._from_transformers_logprobs_text( - tensordict, out=out, cfg=cfg - ) - else: + out = self._from_transformers_logprobs_history(tensordict, cfg, out) + elif self.input_mode == "text": if self.generate: - out = self._from_transformers_generate_tokens( - tensordict, out=out, cfg=cfg - ) + out = self._from_transformers_generate_text(tensordict, cfg, out) else: - out = self._from_transformers_logprobs_tokens( - tensordict, out=out, cfg=cfg - ) + out = self._from_transformers_logprobs_text(tensordict, cfg, out) + elif self.input_mode == "tokens": + if self.generate: + out = self._from_transformers_generate_tokens(tensordict, cfg, out) + else: + out = self._from_transformers_logprobs_tokens(tensordict, cfg, out) + if _source_device: out = out.to(_source_device) if tensordict_out is None: if self.inplace is True: + # The output is the input tensordict_out = tensordict elif self.inplace is False: - tensordict_out = TensorDict() + # The output is the new structure + tensordict_out = out elif self.inplace == "empty": + # The output is empty tensordict_out = tensordict.empty() - if tensordict_out is not None: - result = tensordict_out + if tensordict_out is not None and tensordict_out is not out: + result = tensordict_out.exclude(*self.out_keys, inplace=True) result.update(out, keys_to_update=self.out_keys) - else: + elif tensordict_out is out: + result = out.select(*self.out_keys) + elif self.inplace: result = out keys = list(set(self.out_keys + list(tensordict.keys(True, True)))) - return tensordict.update(result, keys_to_update=keys) + result = tensordict.exclude(*self.out_keys, inplace=True).update( + result, keys_to_update=keys + ) + else: + result = out return result - def _from_transformers_generate_text(self, td, out, cfg=None): - pad_val = self.tokenizer.pad_token_id + def _from_transformers_generate_history(self, td, cfg, out) -> TensorDictBase: + """Generate text from history input.""" + from torchrl.data.llm import History - text = td.get(self.text_key) - if text is None: - # Fallback on history parsing - history = td.get(self.history_key) - if history is None: - raise ValueError( - "No text or history provided to the TransformersWrapper." + # Validate input + if self.input_key not in td: + raise ValueError( + f"Expected '{self.input_key}' key for history input mode, " + f"but found keys: {list(td.keys())}" + ) + + history = td.get(self.input_key) + if not isinstance(history, History): + raise TypeError( + f"Expected History object for '{self.input_key}', got {type(history)}" + ) + + # Apply chat template + tokenizer_kwargs = {} + if self.chat_template_name is not None: + tokenizer_kwargs.setdefault("chat_template_name", self.chat_template_name) + if self.chat_template is not None: + tokenizer_kwargs.setdefault("chat_template", self.chat_template) + tokenizer_kwargs.setdefault("add_generation_prompt", True) + text_prompt = history.apply_chat_template( + tokenizer=self.tokenizer, **tokenizer_kwargs + ) + if not isinstance(text_prompt, list): + raise ValueError( + f"Expected list of text for history input, got {type(text_prompt)}" + ) + tokenizer_kwargs.setdefault("return_assistant_tokens_mask", False) + tokenizer_kwargs.setdefault("tokenize", True) + tokenizer_kwargs.setdefault("padding", False) + tokenizer_kwargs.setdefault("return_dict", True) + response_struct = history.apply_chat_template( + tokenizer=self.tokenizer, **tokenizer_kwargs + ) + tokens_prompt_padded = response_struct.get( + "input_ids", + as_padded_tensor=True, + padding_value=self.padding_value, + padding_side="left", + ) + attention_mask_prompt_padded = response_struct.get( + "attention_mask", + as_padded_tensor=True, + padding_value=0, + padding_side="left", + ) + + if attention_mask_prompt_padded is None: + attention_mask_prompt_padded = ( + tokens_prompt_padded != self.tokenizer.pad_token_id + ) + + result = self._generate_from_tokens( + tokens_prompt_padded, attention_mask_prompt_padded, cfg, out + ) + + # Generate using text path + if self.pad_output: + result[(self.tokens_key, "prompt")] = ( + tokens_prompt_padded + if not self.num_samples + else tokens_prompt_padded.unsqueeze(1).repeat(1, self.num_samples, 1) + ) + else: + tokens_prompt_unpadded = response_struct.get( + "input_ids", + as_nested_tensor=True, + ) + if not self.num_samples: + result[(self.tokens_key, "prompt")] = tokens_prompt_unpadded + else: + for r in result.unbind(1): + r[(self.tokens_key, "prompt")] = tokens_prompt_unpadded + + text_result = Text._from_tensordict(result.empty()) + result.set(self.text_key, text_result) + if not self.num_samples: + text_result.prompt = text_prompt + else: + for r in result.unbind(1): + r[self.text_key, "prompt"] = text_prompt + with result.view(-1) as result_flat: + if self.pad_output: + tokens_full_padded = result_flat.get( + (self.tokens_key, "full"), + as_padded_tensor=True, + padding_side="right", + padding_value=self.padding_value, ) - tokenizer_kwargs = {} - if self.chat_template_name is not None: - tokenizer_kwargs.setdefault( - "chat_template_name", self.chat_template_name + if tokens_full_padded is None: + raise ValueError("tokens_full_padded is None") + text_full = self.tokenizer.batch_decode( + tokens_full_padded, skip_special_tokens=False + ) + else: + tokens_full_unpadded = result_flat.get( + (self.tokens_key, "full"), as_list=True + ) + if tokens_full_unpadded is None: + raise ValueError("tokens_full_unpadded is None") + text_full = self.tokenizer.batch_decode( + tokens_full_unpadded, skip_special_tokens=False ) - if self.chat_template is not None: - tokenizer_kwargs.setdefault("chat_template", self.chat_template) - tokenizer_kwargs.setdefault("add_generation_prompt", False) - text = history.apply_chat_template( + text_prompt = result_flat[self.text_key, "prompt"] + text_response = [ + txt[len(prompt) :] + for txt, prompt in _zip_strict(text_full, text_prompt) + ] + result_flat.set((self.text_key, "full"), text_full) + result_flat.set((self.text_key, "response"), text_response) + # Now parse the full text back to a history object, and use the extra history objects + # as response + history_chat = ChatHistory._from_tensordict(result.empty()) + if self.num_samples is None: + history_chat.prompt = history + else: + for h in history_chat.unbind(1): + h.prompt = history + with history_chat.view(-1) as history_chat_flat: + history_chat_flat.full = full_histories = History.from_text(text_full) + prompt_histories = history_chat_flat.prompt + # iterate over batch + h_responses = [] + for h_full, h_prompt in _zip_strict( + full_histories.unbind(0), prompt_histories.unbind(0) + ): + if h_full.shape[0] <= h_prompt.shape[0]: + raise RuntimeError("Full history is shorter than prompt history") + h_responses.append(h_full[h_prompt.shape[0] :]) + history_chat_flat.response = torch.stack(h_responses) + result.set(self.history_key, history_chat) + return result + + def _from_transformers_logprobs_history(self, td, cfg, out): + """Compute log-probs from history input.""" + from torchrl.data.llm import History + + # Validate input + if self.input_key not in td: + raise ValueError( + f"Expected '{self.input_key}' key for history input mode, " + f"but found keys: {list(td.keys())}" + ) + + history = td.get(self.input_key) + if not isinstance(history, History): + raise TypeError( + f"Expected History object for '{self.input_key}', got {type(history)}" + ) + + # Apply chat template + tokenizer_kwargs = {} + if self.chat_template_name is not None: + tokenizer_kwargs.setdefault("chat_template_name", self.chat_template_name) + if self.chat_template is not None: + tokenizer_kwargs.setdefault("chat_template", self.chat_template) + tokenizer_kwargs.setdefault("add_generation_prompt", False) + text_full = history.apply_chat_template( + tokenizer=self.tokenizer, **tokenizer_kwargs + ) + + tokenizer_kwargs.setdefault("return_assistant_tokens_mask", True) + tokenizer_kwargs.setdefault("tokenize", True) + tokenizer_kwargs.setdefault("padding", False) + tokenizer_kwargs.setdefault("return_dict", True) + + with torch.device(self._device) if self._device is not None else nullcontext(): + response_tokens = history.apply_chat_template( tokenizer=self.tokenizer, **tokenizer_kwargs ) - if not isinstance(text, (list, str)): + if not isinstance(response_tokens, TensorDictBase): + raise ValueError( + f"Expected TensorDictBase for history input, got {type(response_tokens)}" + ) + result = self._logprobs_from_history_tokens(response_tokens, cfg, out) + text_result = Text._from_tensordict(result.empty()) + result.set(self.text_key, text_result) + result[self.text_key, "full"] = text_full + result.set(self.history_key, ChatHistory(full=history)) + return result + + def _cat_text(self, text, response_text): + """Concatenate text and response text.""" + if isinstance(text, list): + return [self._cat_text(t, t_) for t, t_ in _zip_strict(text, response_text)] + else: + return text + response_text + + def _generate_from_text(self, text, cfg, out) -> TensorDictBase: + """Generate text from text input.""" + pad_val = self.tokenizer.pad_token_id + + # Convert text to list format + if isinstance(text, str): + text = [text] + elif not isinstance(text, list): text = text.tolist() - tokens_in = self.tokenizer(text, **self.tokenizer_kwargs) + + tokenizer_kwargs = dict(self.tokenizer_kwargs) + tokenizer_kwargs.setdefault("padding", True) + + with torch.device( + self._device + ) if self._device is not None else contextlib.nullcontext(): + tokens_in = self.tokenizer(text, **tokenizer_kwargs) if self._device is not None: tokens_in = tokens_in.to(self._device) - input_ids = tokens_in["input_ids"] - attention_mask = tokens_in["attention_mask"] + # We are going to map this tokens_in to a tensordict to facilitate the padding in case we need it + tokens_in = dict(tokens_in) + for k, v in dict(tokens_in).items(): + if isinstance(v, list): + if isinstance(v[0], torch.Tensor): + v = torch.nested.nested_tensor(v) + else: + v = torch.nested.nested_tensor([torch.tensor(t) for t in v]) + tokens_in[k] = v + tokens_in = ( + TensorDict(batch_size=tokens_in["input_ids"].size(0)) + .to_lazystack(0) + .update(tokens_in) + ) + tokens_prompt_padded = tokens_in.get( + "input_ids", + as_padded_tensor=True, + padding_side="left", + padding_value=pad_val, + ) + attention_mask_prompt_padded = tokens_in.get( + "attention_mask", + as_padded_tensor=True, + padding_side="left", + padding_value=0, + ) + if cfg is not None: kwargs = copy(self.generate_kwargs) kwargs["generation_config"] = cfg else: kwargs = self.generate_kwargs + tokens_out = self.model.generate( - input_ids=input_ids, attention_mask=attention_mask, **kwargs + input_ids=tokens_prompt_padded, + attention_mask=attention_mask_prompt_padded, + **kwargs, + ) + tokens_full_padded = tokens_out["sequences"] + tokens_response_padded = tokens_full_padded[ + ..., tokens_prompt_padded.shape[-1] : + ] + + attention_mask_response_padded = tokens_response_padded != pad_val + if self.num_samples: + attention_mask_full_padded = torch.cat( + [ + attention_mask_prompt_padded.repeat_interleave( + self.num_samples, dim=0 + ), + attention_mask_response_padded, + ], + dim=-1, + ) + else: + attention_mask_full_padded = torch.cat( + [attention_mask_prompt_padded, attention_mask_response_padded], dim=-1 + ) + tokens_response_unpadded = _unpad_tensors( + tokens_response_padded, attention_mask_response_padded, as_nested=False ) - sequences = tokens_out["sequences"] - sequences = sequences[..., input_ids.shape[-1] :] - mask_sequences = sequences != pad_val - sequences = _unpad_tensors(sequences, mask_sequences, as_nested=False) if self.return_log_probs: + # These are only for the new tokens, not for the prompt - to get that, we'd need to run the forward pass again logits = torch.stack(list(tokens_out["logits"]), 1) - logits = _unpad_tensors(logits, mask_sequences, as_nested=False) log_probs, logits = self._log_probs_generate( - sequences, logits, pad_val=-100 + tokens_response_padded, logits, pad_val=-100, pad=False ) + response_text = self.tokenizer.batch_decode( - sequences, skip_special_tokens=False + tokens_response_unpadded, skip_special_tokens=False ) - out.set(self.token_response_key, sequences) - out.set( - self.token_key, _unpad_tensors(input_ids, attention_mask, as_nested=False) - ) - out.set(self.text_response_key, list(response_text)) - out.set( - self.attention_mask_key, - _unpad_tensors(attention_mask, attention_mask, as_nested=False), - ) - if self.return_log_probs: - out.set( - self.log_prob_key, - _unpad_tensors(log_probs, mask_sequences, as_nested=False), + + # Build output TensorClass objects + if self.num_samples is not None: + text = [txt for txt in text for _ in range(self.num_samples)] + text_obj = Text._from_tensordict(out.empty()) + with text_obj.view(-1) as text_obj_flat: + text_obj_flat.prompt = text + text_obj_flat.response = response_text + text_obj_flat.full = self._cat_text(text, response_text) + out.set(self.text_key, text_obj) + + tokens_obj = Tokens._from_tensordict(out.empty()) + if self.pad_output: + prompt = tokens_prompt_padded + else: + prompt = _unpad_tensors( + tokens_prompt_padded, attention_mask_prompt_padded, as_nested=False + ) + if tokens_obj.ndim == 2: + for i in range(self.num_samples): + tokens_obj[:, i].prompt = prompt + else: + tokens_obj.prompt = prompt + with tokens_obj.view(-1) as tokens_obj_flat: + if not self.pad_output: + tokens_obj_flat.response = tokens_response_unpadded + tokens_full_unpadded = _unpad_tensors( + tokens_full_padded, attention_mask_full_padded, as_nested=False + ) + tokens_obj_flat.full = tokens_full_unpadded + else: + tokens_obj_flat.response = tokens_response_padded + tokens_obj_flat.full = tokens_full_padded + tokens_obj.padded = MetaData(self.pad_output) + out.set(self.tokens_key, tokens_obj) + + masks_obj = Masks._from_tensordict(out.empty()) + if out.ndim == 2: + attention_mask_full_padded = attention_mask_full_padded.unflatten( + 0, (-1, self.num_samples) ) - out.set("logits", _unpad_tensors(logits, mask_sequences, as_nested=False)) + if self.pad_output: + masks_obj.all_attention_mask = attention_mask_full_padded.bool() + else: + if out.ndim == 2: + with tokens_obj.view(-1) as tokens_obj_flat, masks_obj.view( + -1 + ) as masks_obj_flat: + attention_mask_full_unpadded = attention_mask_full_padded.flatten( + 0, 1 + ) + attention_mask_full_unpadded = _unpad_tensors( + attention_mask_full_unpadded.bool(), + attention_mask_full_padded.flatten(0, 1), + as_nested=False, + ) + masks_obj_flat.all_attention_mask = attention_mask_full_unpadded + else: + attention_mask_full_unpadded = _unpad_tensors( + attention_mask_full_padded.bool(), + attention_mask_full_padded, + as_nested=False, + ) + masks_obj.all_attention_mask = attention_mask_full_unpadded + masks_obj.all_assistant_mask = None + masks_obj.padded = MetaData(self.pad_output) + out.set(self.masks_key, masks_obj) + + if self.return_log_probs: + log_probs_obj = LogProbs._from_tensordict(out.empty()) + with log_probs_obj.view(-1) as log_probs_obj_flat: + # Unfortunate but we only have the log-probs for the new tokens, not for the prompt - to get that, we'd need to run the forward pass again + if self.pad_output: + log_probs_obj_flat.prompt = None + log_probs_obj_flat.response = log_probs + log_probs_obj_flat.full = None + else: + log_probs_unpadded = _unpad_tensors( + log_probs, attention_mask_response_padded, as_nested=False + ) + log_probs_obj_flat.prompt = None + log_probs_obj_flat.response = log_probs_unpadded + log_probs_obj_flat.full = None + log_probs_obj.padded = MetaData(self.pad_output) + out.set(self.log_probs_key, log_probs_obj) + + # Add logits to output if we're in a get_dist call + if self._in_get_dist_call: + if self.pad_output: + out.set("logits", logits) + else: + logits_full_unpadded = _unpad_tensors( + logits, attention_mask_full_padded, as_nested=False + ) + out.set("logits", logits_full_unpadded) + return out - def _from_transformers_generate_tokens(self, td, out, cfg=None): + def _cat_tensors( + self, + tokens: torch.Tensor | list[torch.Tensor], + response_tokens: torch.Tensor | list[torch.Tensor], + cast: torch.dtype | None = None, + ): + """Concatenate tokens and response tokens.""" + if isinstance(tokens, list) or isinstance(response_tokens, list): + return [ + self._cat_tensors(t, t_, cast=cast) + for t, t_ in _zip_strict(tokens, response_tokens) + ] + else: + result = torch.cat([tokens, response_tokens], dim=-1) + if cast is not None: + result = result.to(cast) + return result + + def _logprobs_from_history_tokens(self, response_tokens, cfg, out): + """Compute log-probs from history tokens.""" pad_val = self.tokenizer.pad_token_id - input_ids = td.get( - self.token_key, + # unfortunately HF wants us to use padded tensors + tokens_full_padded = response_tokens.get( + "input_ids", as_padded_tensor=True, padding_side="left", padding_value=pad_val, ) - attention_mask = td.get( - self.attention_mask_key, + if not isinstance(tokens_full_padded, torch.Tensor): + raise ValueError( + f"Expected Tensor for tokens_full_padded, got {type(tokens_full_padded)}" + ) + attention_mask_full_padded = response_tokens.get( + "attention_mask", as_padded_tensor=True, padding_side="left", padding_value=0, ) - if attention_mask is None: - attention_mask = (input_ids != pad_val).to(torch.int64) + if not isinstance(attention_mask_full_padded, torch.Tensor): + raise ValueError( + f"Expected Tensor for attention_mask_full_padded, got {type(attention_mask_full_padded)}" + ) + if cfg is not None: kwargs = copy(self.generate_kwargs) kwargs["generation_config"] = cfg else: kwargs = self.generate_kwargs - tokens_out = self.model.generate( - input_ids=input_ids, attention_mask=attention_mask, **kwargs + + tokens_out_struct = self.model( + tokens_full_padded, attention_mask=attention_mask_full_padded, **kwargs ) - sequences = tokens_out["sequences"] - sequences = sequences[:, input_ids.shape[-1] :] - mask_sequences = sequences != pad_val - sequences = _unpad_tensors(sequences, mask_sequences, as_nested=False) - if self.return_log_probs: - logits = tokens_out["logits"] - logits = torch.stack(list(logits), 1) - logits = _unpad_tensors(logits, mask_sequences, as_nested=False) - log_probs, logits = self._log_probs_generate( - sequences, logits, pad_val=pad_val - ) - out.set( - self.token_response_key, - sequences, + ( + log_probs_full_padded, + logits_full_padded, + ) = self._compute_log_probs_from_model_output( + tokens_out_struct, + tokens_full_padded, + attention_mask_full_padded, + pad_val, ) - out.set( - self.token_key, _unpad_tensors(input_ids, attention_mask, as_nested=False) + + # Build output TensorClass objects + text_obj = Text._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) ) - out.set( - self.attention_mask_key, - _unpad_tensors(attention_mask, attention_mask, as_nested=False), + text_obj.prompt = None + text_obj.response = None + text_obj.full = None + out.set(self.text_key, text_obj) + + all_assistant_mask_padded = response_tokens.get( + "assistant_masks", + as_padded_tensor=True, + padding_side="left", + padding_value=0, ) - if self.return_log_probs: - out.set( - self.log_prob_key, - _unpad_tensors(log_probs, mask_sequences, as_nested=False), + if all_assistant_mask_padded is not None: + all_assistant_mask_padded = all_assistant_mask_padded.bool() + masks_obj = Masks._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if self.pad_output: + masks_obj.all_attention_mask = attention_mask_full_padded.bool() + if all_assistant_mask_padded is not None: + masks_obj.all_assistant_mask = all_assistant_mask_padded + else: + masks_obj.all_attention_mask = _unpad_tensors( + attention_mask_full_padded.bool(), + attention_mask_full_padded, + as_nested=False, ) - out.set("logits", _unpad_tensors(logits, mask_sequences, as_nested=False)) - return out + if all_assistant_mask_padded is not None: + masks_obj.all_assistant_mask = _unpad_tensors( + all_assistant_mask_padded, + attention_mask_full_padded, + as_nested=False, + ) + masks_obj.padded = MetaData(self.pad_output) + out.set(self.masks_key, masks_obj) - def _from_transformers_logprobs_text(self, td, out, cfg=None): - pad_val = self.tokenizer.pad_token_id + tokens_obj = Tokens._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if self.pad_output: + tokens_obj.full = tokens_full_padded + else: + input_ids_full_unpadded = _unpad_tensors( + tokens_full_padded, attention_mask_full_padded, as_nested=False + ) + tokens_obj.full = input_ids_full_unpadded + tokens_obj.response = None + tokens_obj.padded = MetaData(self.pad_output) + out.set(self.tokens_key, tokens_obj) - prompt_txt = td.get(self.text_key) - response_txt = td.get(self.text_response_key) - if prompt_txt is None or response_txt is None: - if prompt_txt is not None and response_txt is not None: - raise ValueError( - "No text or history provided to the TransformersWrapper. Either both are provided or none of them." - ) - # Fallback on history parsing - history = td.get(self.history_key) - if history is None: - raise ValueError( - "No text or history provided to the TransformersWrapper." - ) - tokenizer_kwargs = {} - if self.chat_template_name is not None: - tokenizer_kwargs.setdefault( - "chat_template_name", self.chat_template_name - ) - if self.chat_template is not None: - tokenizer_kwargs.setdefault("chat_template", self.chat_template) - tokenizer_kwargs.setdefault("add_generation_prompt", False) - response_txt = history.apply_chat_template( - tokenizer=self.tokenizer, **tokenizer_kwargs + log_probs_obj = LogProbs._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if self.pad_output: + log_probs_obj.full = log_probs_full_padded + else: + log_probs_full_unpadded = _unpad_tensors( + log_probs_full_padded, attention_mask_full_padded, as_nested=False ) - if isinstance(response_txt, list): - prompt_txt = ["" for _ in response_txt] + log_probs_obj.full = log_probs_full_unpadded + log_probs_obj.response = None + log_probs_obj.padded = MetaData(self.pad_output) + out.set(self.log_probs_key, log_probs_obj) + + # Add logits to output if we're in a get_dist call + if self._in_get_dist_call: + if self.pad_output: + out.set("logits", logits_full_padded) else: - prompt_txt = "" - - if not isinstance(prompt_txt, (list, str)): - prompt_txt = prompt_txt.tolist() - if not isinstance(response_txt, (list, str)): - response_txt = response_txt.tolist() - total_txt = [x + y for x, y in _zip_strict(prompt_txt, response_txt)] - total_tokens_in = self.tokenizer(total_txt, **self.tokenizer_kwargs) - prompt_tokens_in = self.tokenizer(prompt_txt, **self.tokenizer_kwargs) - if self._device is not None: - total_tokens_in = total_tokens_in.to(self._device) - prompt_tokens_in = prompt_tokens_in.to(self._device) + logits_full_unpadded = _unpad_tensors( + logits_full_padded, attention_mask_full_padded, as_nested=False + ) + out.set("logits", logits_full_unpadded) + + return out + + def _from_transformers_generate_text(self, td, cfg, out) -> TensorDictBase: + """Generate text from text input.""" + # Validate input + if self.input_key not in td: + raise ValueError( + f"Expected '{self.input_key}' key for text input mode, " + f"but found keys: {list(td.keys())}" + ) - total_input_ids = total_tokens_in["input_ids"] - total_attention_mask = total_tokens_in["attention_mask"] - prompt_input_ids = prompt_tokens_in["input_ids"] - prompt_attention_mask = prompt_tokens_in["attention_mask"] + text = td.get(self.input_key) + if text is None: + raise ValueError(f"Expected '{self.input_key}' key for text input mode") + if isinstance(text, NonTensorStack): + text = text.tolist() + if not isinstance(text, list): + raise ValueError(f"Expected list of text for text input, got {type(text)}") + return self._generate_from_text(text, cfg, out) + + def _from_transformers_logprobs_text(self, td, cfg, out): + """Compute log-probs from text input.""" + # Validate input + if self.input_key not in td: + raise ValueError( + f"Expected '{self.input_key}' key for text input mode, " + f"but found keys: {list(td.keys())}" + ) + + text = td.get(self.input_key) + if isinstance(text, NonTensorStack): + text = text.tolist() + if text is None: + raise ValueError(f"Expected '{self.input_key}' key for text input mode") + if not isinstance(text, list): + raise ValueError(f"Expected list of text for text input, got {type(text)}") + # Tokenize the text + if self.tokenizer is None: + raise ValueError( + "Tokenizer is required for log-probs computation with text input" + ) + + # Convert text to list format + if isinstance(text, str): + text = [text] + elif not isinstance(text, list): + text = text.tolist() + + # Tokenize the text + tokenizer_kwargs = dict(self.tokenizer_kwargs) + with torch.device( + self._device + ) if self._device is not None else contextlib.nullcontext(): + tokens_in = self.tokenizer(text, **tokenizer_kwargs) if cfg is not None: kwargs = copy(self.generate_kwargs) @@ -475,138 +1140,425 @@ def _from_transformers_logprobs_text(self, td, out, cfg=None): else: kwargs = self.generate_kwargs - total_tokens_out = self.model( - total_input_ids, attention_mask=total_attention_mask, **kwargs + # We are going to map this tokens_in to a tensordict to facilitate the padding in case we need it + tokens_in = ( + TensorDict(batch_size=len(tokens_in["input_ids"])) + .to_lazystack(0) + .update(dict(tokens_in)) + ) + input_ids_full_padded = tokens_in.get( + "input_ids", + as_padded_tensor=True, + padding_side="left", + padding_value=self.padding_value, + ) + attention_mask_full_padded = tokens_in.get( + "attention_mask", + as_padded_tensor=True, + padding_side="left", + padding_value=0, + ) + + tokens_out_struct = self.model( + input_ids_full_padded, attention_mask=attention_mask_full_padded, **kwargs ) - total_input_ids = _unpad_tensors( - total_input_ids, total_attention_mask, as_nested=False + # Compute log-probs for the input tokens + ( + log_probs_full_padded, + logits_full_padded, + ) = self._compute_log_probs_from_model_output( + tokens_out_struct, + input_ids_full_padded, + attention_mask_full_padded, + self.tokenizer.pad_token_id, ) - prompt_input_ids = _unpad_tensors( - prompt_input_ids, prompt_attention_mask, as_nested=False + + # Build output TensorClass objects + text_obj = Text._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) ) - sequences = [ - _total_input_ids[_prompt_input_ids.shape[-1] :] - if _prompt_input_ids.shape[-1] > 0 - else _total_input_ids - for _total_input_ids, _prompt_input_ids in zip( - total_input_ids, prompt_input_ids + text_obj.prompt = None + text_obj.response = None + text_obj.full = text + out.set(self.text_key, text_obj) + + tokens_obj = Tokens._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if self.pad_output: + tokens_obj.full = input_ids_full_padded + else: + input_ids_full_unpadded = _unpad_tensors( + input_ids_full_padded, attention_mask_full_padded, as_nested=False ) - ] - # response_attention_mask = total_attention_mask[ - # :, prompt_attention_mask.shape[-1] : - # ] - log_probs, logits = self._log_probs_from_logits( - total_tokens_out, sequences, pad_val=pad_val + tokens_obj.full = input_ids_full_unpadded + tokens_obj.response = None + tokens_obj.padded = MetaData(self.pad_output) + out.set(self.tokens_key, tokens_obj) + + masks_obj = Masks._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if self.pad_output: + masks_obj.all_attention_mask = attention_mask_full_padded.bool() + masks_obj.all_assistant_mask = td.get(("masks", "all_assistant_mask")) + else: + attention_mask_full_unpadded = _unpad_tensors( + attention_mask_full_padded.bool(), + attention_mask_full_padded, + as_nested=False, + ) + masks_obj.all_attention_mask = attention_mask_full_unpadded + masks_obj.all_assistant_mask = td.get( + ("masks", "all_assistant_mask"), as_list=True + ) + masks_obj.padded = MetaData(self.pad_output) + out.set(self.masks_key, masks_obj) + + log_probs_obj = LogProbs._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) ) + if self.pad_output: + log_probs_obj.full = log_probs_full_padded + else: + log_probs_full_unpadded = _unpad_tensors( + log_probs_full_padded, attention_mask_full_padded, as_nested=False + ) + log_probs_obj.full = log_probs_full_unpadded + log_probs_obj.response = None + log_probs_obj.padded = MetaData(self.pad_output) + out.set(self.log_probs_key, log_probs_obj) + + # Add logits to output if we're in a get_dist call + if self._in_get_dist_call: + if self.pad_output: + out.set("logits", logits_full_padded) + else: + logits_full_unpadded = _unpad_tensors( + logits_full_padded, attention_mask_full_padded, as_nested=False + ) + out.set("logits", logits_full_unpadded) - out.set("logits", logits) - out.set(self.log_prob_key, log_probs) - out.set(self.token_response_key, sequences) return out - def _from_transformers_logprobs_tokens(self, td, out, cfg=None): + def _from_transformers_generate_tokens( + self, td: TensorDictBase, cfg: dict | None, out: TensorDictBase + ) -> TensorDictBase: + """Generate text from tokens input.""" + # Validate input + if self.input_key not in td: + raise ValueError( + f"Expected '{self.input_key}' key for tokens input mode, " + f"but found keys: {list(td.keys())}" + ) + pad_val = self.tokenizer.pad_token_id - prompt_input_ids = td.get( - self.token_key, - as_list=True, + input_ids_prompt_padded = td.get( + self.input_key, + as_padded_tensor=True, + padding_side="left", + padding_value=pad_val, ) - response_input_ids = td.get( - self.token_response_key, - as_list=True, + attention_mask_prompt_padded = td.get( + ("masks", "all_attention_mask"), + as_padded_tensor=True, + padding_side="left", + padding_value=False, ) - # prompt_attention_mask = td.get( - # self.attention_mask_key, - # as_list=True, - # ) - - total_input_ids = [ - torch.cat([_prompt_input_ids, _response_input_ids], -1) - for _prompt_input_ids, _response_input_ids in _zip_strict( - prompt_input_ids, response_input_ids + if attention_mask_prompt_padded is None: + attention_mask_prompt_padded = td.get( + self.attention_mask_key, + as_padded_tensor=True, + padding_side="left", + padding_value=False, ) - ] - total_input_ids = pad_sequence( - total_input_ids, - padding_value=pad_val, - padding_side="left", - batch_first=True, + if attention_mask_prompt_padded is None: + attention_mask_prompt_padded = input_ids_prompt_padded != pad_val + return self._generate_from_tokens( + input_ids_prompt_padded, attention_mask_prompt_padded, cfg, out ) - total_attention_mask = (total_input_ids != pad_val).to(torch.int64) - - # if prompt_attention_mask is None: - # prompt_attention_mask = [ - # (_prompt_input_ids != pad_val).to(torch.int64) - # for _prompt_input_ids in prompt_input_ids - # ] + def _generate_from_tokens( + self, + tokens_prompt_padded: torch.Tensor, + attention_mask_prompt_padded: torch.Tensor, + cfg: dict | None, + out: TensorDictBase, + ) -> TensorDictBase: if cfg is not None: kwargs = copy(self.generate_kwargs) kwargs["generation_config"] = cfg else: kwargs = self.generate_kwargs - total_tokens_out = self.model( - total_input_ids, attention_mask=total_attention_mask, **kwargs + tokens_out_struct = self.model.generate( + input_ids=tokens_prompt_padded, + attention_mask=attention_mask_prompt_padded, + **kwargs, ) - log_probs, logits = self._log_probs_from_logits( - total_tokens_out, response_input_ids, pad_val=-100 + tokens_full_padded = tokens_out_struct["sequences"] + tokens_response_padded = tokens_full_padded[:, tokens_prompt_padded.shape[-1] :] + pad_val = getattr(self.tokenizer, "pad_token_id", None) + if pad_val is None: + pad_val = self.padding_value + attention_mask_reponse_padded = tokens_response_padded != pad_val + attention_mask_full_padded = tokens_full_padded != pad_val + tokens_response_unpadded = _unpad_tensors( + tokens_response_padded, attention_mask_reponse_padded, as_nested=False ) - # for i in range(log_probs.size(0)): - # assert log_probs[i].shape[-1] == response_input_ids[i].shape[-1] - out.set("logits", logits) - out.set(self.log_prob_key, log_probs) - return out + if self.return_log_probs: + # These are only for the new tokens, not for the prompt - to get that, we'd need to run the forward pass again + logits_response_padded = tokens_out_struct["logits"] + logits_response_padded = torch.stack(list(logits_response_padded), 1) + ( + log_probs_response_padded, + logits_response_padded, + ) = self._log_probs_generate( + tokens_response_padded, + logits_response_padded, + pad_val=pad_val, + pad=False, + ) - @classmethod - def _log_probs_from_logits(cls, total_tokens_out, response_input_ids, pad_val=-100): - response_input_ids = pad_sequence( - response_input_ids, - padding_value=pad_val, - batch_first=True, - padding_side="left", + response_text = self.tokenizer.batch_decode( + tokens_response_unpadded, skip_special_tokens=False ) - pad_mask = response_input_ids != pad_val - logits = total_tokens_out["logits"] - # logits = logits.log_softmax(dim=-1) - if logits.shape[-2] != response_input_ids.shape[-1]: - logits = logits[..., -response_input_ids.shape[-1] - 1 : -1, :] + # Build output TensorClass objects + text_obj = Text._from_tensordict(out.empty()) + text_obj.prompt = None # We don't have text in tokens mode + with text_obj.view(-1) as text_obj_flat: + text_obj_flat.response = response_text + text_obj.full = None # we don't have text in tokens mode so no all_text either + out.set(self.text_key, text_obj) - td = TensorDict( - logits=logits, response_input_ids=response_input_ids - ).auto_batch_size_() - with td.flatten() as tdflat: - tdflat["log_probs"] = -torch.nn.functional.cross_entropy( - tdflat["logits"], - tdflat["response_input_ids"], - reduce=False, - ignore_index=pad_val, + tokens_obj = Tokens._from_tensordict(out.empty()) + if not self.pad_output: + input_ids_prompt_unpadded = _unpad_tensors( + tokens_prompt_padded, + attention_mask_prompt_padded, + as_nested=False, ) - log_probs = td["log_probs"] + if self.num_samples is not None: + # replicate tokens + for i in range(self.num_samples): + tokens_obj[:, i].prompt = ( + input_ids_prompt_unpadded + if not self.pad_output + else tokens_prompt_padded + ) + else: + tokens_obj.prompt = ( + input_ids_prompt_unpadded + if not self.pad_output + else tokens_prompt_padded + ) + with tokens_obj.view(-1) as tokens_obj_flat: + if self.pad_output: + tokens_obj_flat.response = tokens_response_padded + tokens_obj_flat.full = tokens_full_padded + else: + tokens_obj_flat.response = tokens_response_unpadded + tokens_full_unpadded = _unpad_tensors( + tokens_full_padded, attention_mask_full_padded, as_nested=False + ) + tokens_obj_flat.full = tokens_full_unpadded + tokens_obj.padded = MetaData(self.pad_output) + out.set(self.tokens_key, tokens_obj) + + masks_obj = Masks._from_tensordict(out.empty()) + if out.ndim == 2: + attention_mask_full_padded = attention_mask_full_padded.unflatten( + 0, (-1, self.num_samples) + ) + if self.pad_output: + # Get "real" attention masks + masks_obj.all_attention_mask = attention_mask_full_padded.bool() + else: + # Get "real" attention masks + # We can use select to avoid batch-size problems + _td = torch.ones_like( + out.select(("tokens", "full")) + .copy() + .rename_key_(("tokens", "full"), "all_attention_mask") + ).bool() + del _td["tokens"] + masks_obj.update(_td) + masks_obj.all_assistant_mask = None + masks_obj.padded = MetaData(self.pad_output) + out.set(self.masks_key, masks_obj) - # Recover the list - log_probs = _unpad_tensors(log_probs, pad_mask) - logits = _unpad_tensors(logits, pad_mask) - return log_probs, logits + if self.return_log_probs: + log_probs_obj = LogProbs._from_tensordict(out.empty()) + if self.num_samples is None: + if self.pad_output: + log_probs_obj.response = log_probs_response_padded + else: + log_probs_response_unpadded = _unpad_tensors( + log_probs_response_padded, + attention_mask_reponse_padded, + as_nested=False, + ) + log_probs_obj.response = log_probs_response_unpadded + else: + with log_probs_obj.view(-1) as log_probs_obj_flat: + if self.pad_output: + log_probs_obj_flat.response = log_probs_response_padded + else: + log_probs_response_unpadded = _unpad_tensors( + log_probs_response_padded, + attention_mask_reponse_padded, + as_nested=False, + ) + log_probs_obj_flat.response = log_probs_response_unpadded + log_probs_obj.padded = MetaData(self.pad_output) + out.set(self.log_probs_key, log_probs_obj) - @classmethod - def _log_probs_generate(cls, sequences, logits, pad_val=-100): - tokens = pad_sequence( - sequences, - padding_value=pad_val, - batch_first=True, + return out + + def _from_transformers_logprobs_tokens( + self, td: TensorDictBase, cfg: dict | None, out: TensorDictBase + ) -> TensorDictBase: + """Compute log-probs from tokens input.""" + # Validate input + if self.input_key not in td: + raise ValueError( + f"Expected '{self.input_key}' key for tokens input mode, " + f"but found keys: {list(td.keys(isinstance(self.input_key, tuple)))}" + ) + + pad_val = self.tokenizer.pad_token_id + + input_ids_full_padded = td.get( + self.input_key, + as_padded_tensor=True, padding_side="left", + padding_value=pad_val, ) - logits = pad_sequence( - logits, - padding_value=0.0, - batch_first=True, + # Attention mask: try first the regular entry, then the key provided in the constructor, finally fallback on eager attention mask + attention_mask_full_padded = td.get( + ("masks", "all_attention_mask"), + as_padded_tensor=True, padding_side="left", + padding_value=False, + ) + if attention_mask_full_padded is None: + attention_mask_full_padded = td.get( + self.attention_mask_key, + as_padded_tensor=True, + padding_side="left", + padding_value=False, + ) + if attention_mask_full_padded is None: + attention_mask_full_padded = input_ids_full_padded != pad_val + + if cfg is not None: + kwargs = copy(self.generate_kwargs) + kwargs["generation_config"] = cfg + else: + kwargs = self.generate_kwargs + + tokens_out_struct = self.model( + input_ids_full_padded, attention_mask=attention_mask_full_padded, **kwargs ) + # Compute log-probs for the input tokens + ( + log_probs_full_padded, + logits_full_padded, + ) = self._compute_log_probs_from_model_output( + tokens_out_struct, + input_ids_full_padded, + attention_mask_full_padded, + self.tokenizer.pad_token_id, + ) + + # Build output TensorClass objects + text_obj = Text._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + text_obj.prompt = None + text_obj.response = None + text_obj.full = None + out.set(self.text_key, text_obj) + + tokens_obj = Tokens._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if not self.pad_output: + input_ids_full_unpadded = _unpad_tensors( + input_ids_full_padded, attention_mask_full_padded, as_nested=False + ) + tokens_obj.full = input_ids_full_unpadded + else: + tokens_obj.full = input_ids_full_padded + tokens_obj.response = None + tokens_obj.padded = MetaData(self.pad_output) + out.set(self.tokens_key, tokens_obj) + + masks_obj = Masks._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if self.pad_output: + masks_obj.all_attention_mask = attention_mask_full_padded.bool() + masks_obj.all_assistant_mask = td.get(("masks", "all_assistant_mask")) + else: + masks_obj.all_attention_mask = _unpad_tensors( + attention_mask_full_padded.bool(), + attention_mask_full_padded, + as_nested=False, + ) + masks_obj.all_assistant_mask = td.get( + ("masks", "all_assistant_mask"), as_list=True + ) + + masks_obj.padded = MetaData(self.pad_output) + out.set(self.masks_key, masks_obj) + + log_probs_obj = LogProbs._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if self.pad_output: + log_probs_obj.full = log_probs_full_padded + else: + log_probs_full_unpadded = _unpad_tensors( + log_probs_full_padded, attention_mask_full_padded, as_nested=False + ) + log_probs_obj.full = log_probs_full_unpadded + log_probs_obj.response = None + log_probs_obj.padded = MetaData(self.pad_output) + out.set(self.log_probs_key, log_probs_obj) + + # Add logits to output if we're in a get_dist call + if self._in_get_dist_call: + if self.pad_output: + out.set("logits", logits_full_padded) + else: + logits_full_unpadded = _unpad_tensors( + logits_full_padded, attention_mask_full_padded, as_nested=False + ) + out.set("logits", logits_full_unpadded) + return out + + @classmethod + def _log_probs_generate(cls, tokens, logits, pad_val=-100, pad: bool = True): + if pad: + tokens = pad_sequence( + tokens, + padding_value=pad_val, + batch_first=True, + padding_side="left", + ) + logits = pad_sequence( + logits, + padding_value=0.0, + batch_first=True, + padding_side="left", + ) + # logits = logits.log_softmax(dim=-1) # log_probs = logits.gather(-1, tokens.unsqueeze(-1)).squeeze(-1) td = TensorDict(logits=logits, tokens=tokens).auto_batch_size_() @@ -614,5 +1566,250 @@ def _log_probs_generate(cls, sequences, logits, pad_val=-100): tdflat["log_probs"] = -torch.nn.functional.cross_entropy( tdflat["logits"], tdflat["tokens"], reduce=False, ignore_index=pad_val ) + td["log_probs"][:, 0] = 0 log_probs = td["log_probs"] return log_probs, logits + + def _compute_log_probs_from_model_output( + self, model_output, input_ids, attention_mask, pad_val + ): + """Compute log-probs from model output without modifying original tensors. + + Args: + model_output: Output from the model containing logits + input_ids: Original input token ids + attention_mask: Original attention mask + pad_val: Padding token value to ignore in loss computation + + Returns: + tuple: (log_probs, shifted_logits) where log_probs are the computed log probabilities + and shifted_logits are the logits shifted to align with tokens + """ + logits = model_output["logits"] + + # Create shifted versions for log-prob computation without modifying originals + shifted_logits = logits[:, :-1, :] + # shifted_logits = shifted_logits - shifted_logits.logsumexp(dim=-1, keepdim=True) + shifted_logits = torch.cat( + [torch.zeros_like(shifted_logits[:, :1]), shifted_logits], 1 + ) + + shifted_input_ids = input_ids[:, 1:] + shifted_input_ids = torch.cat( + [torch.zeros_like(shifted_input_ids[:, :1]), shifted_input_ids], 1 + ) + + # Check that the shape is correct + if shifted_logits.shape[-2] != shifted_input_ids.shape[-1]: + raise ValueError( + f"The logits shape {shifted_logits.shape} does not match the input ids shape {shifted_input_ids.shape}" + ) + + # Compute log-probs + td = TensorDict( + logits=shifted_logits, tokens=shifted_input_ids + ).auto_batch_size_() + with td.flatten() as tdflat: + tdflat["log_probs"] = -torch.nn.functional.cross_entropy( + tdflat["logits"], + tdflat["tokens"], + reduce=False, + ignore_index=pad_val, + ) + # For consistency with vllm, we set the log-probs of the first token to 0 + # However, the first element may not be the first - we want the first of the attention mask, + # i.e, the first element that is true on the left + attention_mask = attention_mask.bool() + attention_mask_first_left = ~attention_mask[:, :-1] & attention_mask[:, 1:] + attention_mask_first_left = torch.cat( + [ + torch.zeros_like(attention_mask_first_left[..., :1]), + attention_mask_first_left, + ], + -1, + ) + attention_mask_first_left[~(attention_mask_first_left.any(-1)), 0] = True + assert attention_mask_first_left.any(-1).all() + attention_mask_first_left = attention_mask_first_left | ~attention_mask + td["log_probs"][attention_mask_first_left] = 0 + + return td["log_probs"], shifted_logits + + def get_dist( + self, + tensordict: TensorDictBase, + tensordict_out: TensorDictBase | None = None, + logits_key: NestedKey = "logits", + mask_key: NestedKey | None = None, + as_padded_tensor: bool | None = None, + as_nested_tensor: bool | None = None, + padding_value: float | None = None, + padding_side: str = "right", + layout: torch.layout | None = None, + **kwargs, + ) -> D.Distribution: + """Get distribution from logits/log-probs with optional masking. + + This method enables logits computation for distribution creation. + """ + self._in_get_dist_call = True + self.out_keys += ["logits"] + try: + return super().get_dist( + tensordict, + tensordict_out, + logits_key, + mask_key, + as_padded_tensor, + as_nested_tensor, + padding_value, + padding_side, + layout, + **kwargs, + ) + finally: + self._in_get_dist_call = False + self.out_keys.remove("logits") + + def _get_dist_with_prompt_mask( + self, + tensordict: TensorDictBase, + tokens_key: NestedKey = ("tokens", "prompt"), + logits_key: NestedKey = "logits", + assistant_mask_key: NestedKey = ("masks", "all_assistant_mask"), + attention_mask_key: NestedKey = ("masks", "all_attention_mask"), + **kwargs, + ) -> D.Distribution: + """Get distribution masked to only include response tokens (exclude prompt). + + This method enables logits computation for distribution creation. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + self._in_get_dist_call = True + self.out_keys += ["logits"] + try: + return super()._get_dist_with_prompt_mask( + tensordict, + tokens_key, + logits_key, + assistant_mask_key, + attention_mask_key, + **kwargs, + ) + finally: + self._in_get_dist_call = False + self.out_keys.remove("logits") + + def _get_dist_with_assistant_mask( + self, + tensordict: TensorDictBase, + assistant_mask_key: NestedKey = ("masks", "all_assistant_mask"), + logits_key: NestedKey = "logits", + **kwargs, + ) -> D.Distribution: + """Get distribution masked to only include assistant tokens. + + This method enables logits computation for distribution creation. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + self._in_get_dist_call = True + self.out_keys += ["logits"] + try: + return super()._get_dist_with_assistant_mask( + tensordict, assistant_mask_key, logits_key, **kwargs + ) + finally: + self._in_get_dist_call = False + self.out_keys.remove("logits") + + def _get_dist_with_attention_mask( + self, + tensordict: TensorDictBase, + attention_mask_key: NestedKey = ("masks", "all_attention_mask"), + logits_key: NestedKey = "logits", + **kwargs, + ) -> D.Distribution: + """Get distribution masked using attention mask. + + This method enables logits computation for distribution creation. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + self._in_get_dist_call = True + self.out_keys += ["logits"] + try: + return super()._get_dist_with_attention_mask( + tensordict, attention_mask_key, logits_key, **kwargs + ) + finally: + self._in_get_dist_call = False + self.out_keys.remove("logits") + + def _get_dist_with_custom_mask( + self, + tensordict: TensorDictBase, + mask: torch.Tensor, + logits_key: NestedKey = "logits", + **kwargs, + ) -> D.Distribution: + """Get distribution with custom mask. + + This method enables logits computation for distribution creation. + """ + self._in_get_dist_call = True + self.out_keys += ["logits"] + try: + return super()._get_dist_with_custom_mask( + tensordict, mask, logits_key, **kwargs + ) + finally: + self._in_get_dist_call = False + self.out_keys.remove("logits") + + # Convenience methods for common LLM training scenarios + def _get_sft_dist(self, tensordict: TensorDictBase, **kwargs) -> D.Distribution: + """Get distribution suitable for SFT loss (response tokens only). + + This method enables logits computation for distribution creation. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + self._in_get_dist_call = True + self.out_keys += ["logits"] + try: + return super()._get_sft_dist(tensordict, **kwargs) + finally: + self._in_get_dist_call = False + self.out_keys.remove("logits") + + def _get_rlhf_dist(self, tensordict: TensorDictBase, **kwargs) -> D.Distribution: + """Get distribution suitable for RLHF loss (assistant tokens only). + + This method enables logits computation for distribution creation. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + self._in_get_dist_call = True + self.out_keys += ["logits"] + try: + return super()._get_rlhf_dist(tensordict, **kwargs) + finally: + self._in_get_dist_call = False + self.out_keys.remove("logits") + + def _get_generic_dist(self, tensordict: TensorDictBase, **kwargs) -> D.Distribution: + """Get distribution suitable for generic losses (all tokens). + + This method enables logits computation for distribution creation. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + self._in_get_dist_call = True + self.out_keys += ["logits"] + try: + return super()._get_generic_dist(tensordict, **kwargs) + finally: + self._in_get_dist_call = False + self.out_keys.remove("logits") diff --git a/torchrl/modules/llm/policies/vllm_wrapper.py b/torchrl/modules/llm/policies/vllm_wrapper.py index 7f3625a46fc..9133b98c3fe 100644 --- a/torchrl/modules/llm/policies/vllm_wrapper.py +++ b/torchrl/modules/llm/policies/vllm_wrapper.py @@ -5,164 +5,279 @@ from __future__ import annotations import collections -from typing import Literal +import warnings +from typing import Any, Literal import torch from tensordict import ( lazy_stack, - LazyStackedTensorDict, - maybe_dense_stack, - NestedKey, + MetaData, + NonTensorStack, + set_list_to_stack, TensorDict, TensorDictBase, ) -from tensordict.tensorclass import from_dataclass, NonTensorStack, TensorClass -from tensordict.utils import _zip_strict, expand_as_right +from tensordict.tensorclass import from_dataclass, TensorClass +from tensordict.utils import _zip_strict, NestedKey +from torch import distributions as D +from torch.nn.utils.rnn import pad_sequence from torchrl.envs.utils import _classproperty +from torchrl.modules.llm.policies.common import ( + ChatHistory, + LLMWrapperBase, + LogProbs, + Masks, + Text, + Tokens, +) +from torchrl.modules.utils.utils import _unpad_tensors -from torchrl.modules.llm.policies.common import CategoricalSequential - +# Type imports +try: + import transformers + import vllm + from vllm.outputs import RequestOutput + from vllm.sampling_params import SamplingParams +except ImportError: + vllm = None + transformers = None + SamplingParams = Any # type: ignore + RequestOutput = Any # type: ignore -class vLLMWrapper(CategoricalSequential): - """A wrapper class for vLLM models, providing a consistent interface for text generation and log probability computation, similar to the Hugging Face Transformers interface. - This class allows for handling both text and token inputs, enabling text generation and log probability - computation based on the specified configuration. +class vLLMWrapper(LLMWrapperBase): + """A wrapper class for vLLM models, providing a consistent interface for text generation and log probability computation. - .. note:: The default arguments of the `vLLMWrapper` class are set to make it easy to run this backend with - the :class:`~torchrl.envs.custom.llm.LLMEnv` class. + This class is a subclass of :class:`~torchrl.modules.llm.policies.LLMWrapperBase` and provides a unified API for handling different input + modalities (history, text, tokens) with consistent output structure using :class:`~tensordict.TensorClass` objects. Args: - model (vllm.LLM): The vLLM model to wrap. + model (vllm.LLM | str): The vLLM model to wrap. If a string, it will be passed to `vllm.LLM`. Keyword Args: - return_log_probs (bool | None, optional): Whether to return log probabilities of the generated tokens. + tokenizer (transformers.tokenization_utils.PreTrainedTokenizer | str | None, optional): The tokenizer to use for encoding and decoding text. + If `None`, the tokenizer associated with the model will be used. If a string, it will be passed to `transformers.AutoTokenizer.from_pretrained`. Defaults to `None`. - tokenizer (transformers.tokenization_utils.PreTrainedTokenizer | None, optional): The tokenizer to use for - encoding and decoding text. If `None`, the tokenizer associated with the model will be used. Defaults to - `None`. - from_text (bool, optional): Indicates whether the input is in text format. If `True`, the input is expected to - be text that will be tokenized. If `False`, the input is expected to be token sequences. Defaults to `True`. - - .. note:: If `from_text` is `True`, the input text can be provided in the `"text"` key or in the `"history"` key. - If using the `"history"` key, the history will be parsed from a :class:`~torchrl.data.llm.History` object to a - text string using the tokenizer. - - device (torch.device | None, optional): The device to use for computation. If `None`, the default device will - be used. Defaults to `None`. - generate (bool, optional): Whether to enable text generation. If `True`, the model will generate text based on - the input. If `False`, only log probabilities will be computed for the response tokens/text. Defaults to `True`. - generate_kwargs (dict | None, optional): Additional arguments to pass to the model's generate method. These - arguments can control aspects of the generation process, such as temperature and top-k sampling. Defaults - to `None`. - - .. note:: Sampling params can be overwritten at runtime using the kwargs of the forward method. - - tokenizer_kwargs (dict | None, optional): Additional arguments to pass to the tokenizer. These arguments can - control aspects of the tokenization process, such as padding and truncation. Defaults to `None`. - pad_output (bool, optional): Whether to pad the output sequences to a uniform length. If `True`, the output - sequences will be padded and represented as tensors. If `False`, lists of tokens will be used without - padding. Defaults to `False`. - - .. warning:: The default value of `pad_output` differs from :func:`~torchrl.modules.TransformersWrapper` - which does not handle non-padded inputs. - - inplace (Literal[True, False, "empty"] | None, optional): Determines how the module should handle in-place - operations. If `True`, operations will be performed in-place. If `False`, a new TensorDict instance will be - created. If `"empty"`, the output data structure will be initialized with `input.empty()` (i.e., it will - conserve type, batch-size, and device). Defaults to `True` when generating a single sample, `False` - otherwise. - - chat_template_name (str | None, optional): The name of the chat template to use for the history. Defaults to `None`. - chat_template (str | None, optional): The chat template to use for the history. Defaults to `None`. - - .. note:: The tokenizer is used when `from_text` is `True` to convert input text into token sequences. It is also - required (or retrieved) when `pad_output` is `True` or when using text inputs with `generate=False` to ensure proper - tokenization and padding. + input_mode (str, optional): The input modality to use. Must be one of `"history"`, `"text"`, or `"tokens"`. Defaults to `"history"`. + input_key (str | None, optional): The key for the input data. If `None`, defaults to + - `("history", "prompt")` for `"history"` when `generate=True`, `("history", "full")` for `"history"` when `generate=False` + - `("text", "prompt")` for `"text"` when `generate=True`, `("text", "full")` for `"text"` when `generate=False` + - `("tokens", "prompt")` for `"tokens"` when `generate=True`, `("tokens", "full")` for `"tokens"` when `generate=False` + attention_mask_key (str, optional): The key for attention masks (used in `"tokens"` mode). Defaults to `"attention_mask"`. + + .. warning:: This argument is under development and may change in the future. + + generate (bool, optional): Whether to enable text generation. If `True`, the model will generate text based on the input. + If `False`, only log probabilities will be computed. Defaults to `True`. + return_log_probs (bool, optional): Whether to return log probabilities. Defaults to `True`. + generate_kwargs (dict | None, optional): Additional arguments to pass to the model's generate method. Defaults to `None`. + tokenizer_kwargs (dict | None, optional): Additional arguments to pass to the tokenizer. Defaults to `None`. + pad_output (bool, optional): Whether to pad the output sequences to a uniform length. Defaults to `False`. + inplace (Literal[True, False, "empty"] | None, optional): Determines how the module should handle in-place operations. Defaults to `True`. + device (torch.device | None, optional): The device to use for computation. Defaults to `None`. + layout (torch.layout | None, optional): The layout to use for the output tensors when `pad_output=False`. Defaults to `torch.strided`. + chat_template_name (Literal["chatml_format", "qwen"] | None, optional): The name of the chat template to use when applying the chat template to the history. + Defaults to `None`. For `input_mode="history"` only. + chat_template (str | None, optional): The chat template to use when applying the chat template to the history. Defaults to `None`. + For `input_mode="history"` only. + num_samples (int | None, optional): The number of samples to generate. Defaults to `None` (one sample, and no batch-dimension for it). + Can also be set via the `generate_kwargs["n"] = value` argument. + log_probs_key (NestedKey | None, optional): The key for the log probabilities :class:`~torchrl.modules.llm.policies.LogProbs` object. Defaults to `"log_probs"`. + text_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.Text` object. Defaults to `"text"`. + tokens_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.Tokens` object. Defaults to `"tokens"`. + masks_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.Masks` object. Defaults to `"masks"`. + history_key (NestedKey | None, optional): The key for the action :class:`~torchrl.modules.llm.policies.ChatHistory` object. Defaults to `"history"`. Input Keys: - - - If `from_text` is `True`: - - - `"text"`: The input text to be tokenized. - - `"text_response"`: the response text (if `generate=False` as the log probabilities are computed for the response.) - - - If `from_text` is `False`: - - - "tokens": The input token sequences. - - "attention_mask": The attention mask for the tokens. - - "tokens_response": The response token sequences (if `generate=False` as the log probabilities are - computed for the response.) + The input key depends on both `input_mode` and `generate`: + - If `input_mode="history"` and `generate=True`: `input_key` (defaults to `("history", "prompt")`) + - If `input_mode="history"` and `generate=False`: `input_key` (defaults to `("history", "full")`) + - If `input_mode="text"` and `generate=True`: `input_key` (defaults to `("text", "prompt")`) + - If `input_mode="text"` and `generate=False`: `input_key` (defaults to `("text", "full")`) + - If `input_mode="tokens"` and `generate=True`: `input_key` (defaults to `("tokens", "prompt")`) + - If `input_mode="tokens"` and `generate=False`: `input_key` (defaults to `("tokens", "full")`) Output Keys: - - - `"tokens_response"`: The generated token sequences. - - `"log_probs"`: The log probabilities of the generated tokens (if `return_log_probs` is `True`). - - `"text_response"`: The generated text (if `from_text` is `True` and `generate` is `True`). + The output keys are automatically determined based on the input_mode: + - **Tokens**: Always returned (`tokens_key`, defaults to `"tokens"`) + - **Text**: Returned for `"text"` and `"history"` modes (`text_key`, defaults to `"text"`) + - **History**: Returned only for `"history"` mode (`history_key`, defaults to `"history"`) + - **Masks**: Always returned (`masks_key`, defaults to `"masks"`) + - **Log Probs**: Returned when `return_log_probs=True` (`log_probs_key`, defaults to `"log_probs"`) + + Example output structure for `input_mode="history"`: + ``` + TensorDict( + text=Text(prompt=..., response=..., full=...), + masks=Masks(all_attention_mask=..., all_assistant_mask=...), + tokens=Tokens(prompt=..., response=..., full=...), + log_probs=LogProbs(prompt=..., response=..., full=...), + history=ChatHistory(prompt=..., response=..., full=...) + ) + ``` Example: >>> from vllm import LLM >>> from transformers import AutoTokenizer + >>> from torchrl.data.llm import History + >>> from torchrl.modules.llm.policies import ChatHistory + >>> >>> model = LLM("gpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") + >>> + >>> # History input (recommended for RL environments) >>> wrapper = vLLMWrapper( ... model, - ... from_text=True, - ... generate=True + ... tokenizer=tokenizer, + ... input_mode="history", + ... generate=True, + ... return_log_probs=True ... ) - >>> input_data = LLMData(text=NonTensorStack("Hello, world!", "This is another text"), batch_size=1) - >>> output_data = wrapper(input_data) - >>> print(output_data.text_response) - - .. seealso:: :func:`~torchrl.modules.TransformersWrapper` for a similar interface using the Hugging Face - Transformers library. + >>> + >>> history = History.from_chats([[ + ... {"role": "user", "content": "Hello"}, + ... {"role": "assistant", "content": "Hi there!"} + ... ]]) + >>> chat_history = ChatHistory(prompt=history) + >>> result = wrapper(TensorDict(history=chat_history, batch_size=(1,))) + >>> print(result["text"].response) # Generated text + >>> print(result["log_probs"].response) # Log probabilities + >>> print(result["history"].response) # History with response + + Attributes: + collector: The collector associated with the module, if it exists. + + .. seealso:: + - :class:`~torchrl.modules.llm.policies.LLMWrapperBase` (see :ref:`ref_categorical_sequential`) + - :class:`~torchrl.modules.llm.policies.TransformersWrapper` (see :ref:`ref_transformers_wrapper`) """ - text_key: NestedKey = ("text",) - token_key: NestedKey = ("tokens",) - token_response_key: NestedKey = ("tokens_response",) - text_response_key: NestedKey = ("text_response",) - attention_mask_key: NestedKey = ("attention_mask",) - history_key: NestedKey = ("history",) - def __init__( self, - model: vllm.LLM, # noqa - # noqa + model: vllm.LLM | str, *, - return_log_probs: bool | None = None, - tokenizer: transformers.tokenization_utils.PreTrainedTokenizer # noqa - | None = None, - # noqa - from_text: bool = True, - device: torch.device | None = None, + tokenizer: callable | str | None = None, # type: ignore + input_mode: str = "history", + input_key: NestedKey | None = None, + attention_mask_key: str = "attention_mask", generate: bool = True, generate_kwargs: dict | None = None, tokenizer_kwargs: dict | None = None, pad_output: bool = False, inplace: Literal[True, False, "empty"] | None = None, - chat_template_name: str | None = None, + device: torch.device | None = None, + layout: torch.layout | None = None, + num_samples: int | None = None, + chat_template_name: Literal["chatml_format", "qwen"] | None = None, chat_template: str | None = None, + return_log_probs: bool | None = None, + history_key: NestedKey | None = "history", + text_key: NestedKey | None = "text", + tokens_key: NestedKey | None = "tokens", + masks_key: NestedKey | None = "masks", + log_probs_key: NestedKey | None = "log_probs", ): super().__init__() - import vllm + if vllm is None: + raise ImportError("vllm is required for vLLMWrapper") + if transformers is None: + raise ImportError("transformers is required for vLLMWrapper") + + if isinstance(model, str): + model = vllm.LLM(model) + + if isinstance(tokenizer, str): + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained(tokenizer) + from vllm import SamplingParams + # Validate input_mode + if input_mode not in ["history", "text", "tokens"]: + raise ValueError( + f"input_mode must be one of 'history', 'text', 'tokens'. Got '{input_mode}'" + ) + self.model = model self._remote_calls = not isinstance(model, vllm.LLM) - self.from_text = from_text - self._device = device + self.input_mode = input_mode + self.attention_mask_key = attention_mask_key self.generate = generate + + # Auto-determine what to return based on input mode + self.return_history = input_mode in ("history",) + self.return_text = input_mode in ("text", "history") + self.return_tokens = input_mode in ("tokens", "history", "text") + self.return_masks = True + if return_log_probs is False and not generate: + raise ValueError("return_log_probs must be True when generate=False.") + return_log_probs = ( + True + if (return_log_probs is None and generate) or (not generate) + else bool(return_log_probs) + ) + self.return_log_probs = return_log_probs + + self.history_key = history_key + self.log_probs_key = log_probs_key + self.masks_key = masks_key + self.text_key = text_key + self.tokens_key = tokens_key + + if not isinstance(pad_output, bool): + raise ValueError("pad_output must be a boolean") self.pad_output = pad_output - self.chat_template_name = chat_template_name - self.chat_template = chat_template + self._device = device + if not pad_output and layout is None: + layout = torch.strided + self.layout = layout padding_value = None + # Set input keys based on mode and generate parameter + if input_mode == "history": + if generate: + self.in_keys = [ + ("history", "prompt") if input_key is None else input_key + ] + else: + self.in_keys = [("history", "full") if input_key is None else input_key] + elif input_mode == "text": + if generate: + self.in_keys = [("text", "prompt") if input_key is None else input_key] + else: + self.in_keys = [("text", "full") if input_key is None else input_key] + elif input_mode == "tokens": + if generate: + self.in_keys = [ + ("tokens", "prompt") if input_key is None else input_key + ] + else: + self.in_keys = [("tokens", "full") if input_key is None else input_key] + else: + raise ValueError(f"Invalid input_mode: {input_mode}") + self.input_key = self.in_keys[0] + + # Set output keys based on auto-determined return flags + self.out_keys = [] + if self.return_text: + self.out_keys.append(self.text_key) + if self.return_masks: + self.out_keys.append(self.masks_key) + if self.return_tokens: + self.out_keys.append(self.tokens_key) + if self.return_log_probs: + self.out_keys.append(self.log_probs_key) + if self.return_history: + self.out_keys.append(self.history_key) + + # Tokenizer setup if not tokenizer_kwargs: tokenizer_kwargs = {} if not tokenizer_kwargs.setdefault("return_attention_mask", True): - raise RuntimeError + raise RuntimeError("return_attention_mask must be True") # If we don't pad, we use lists return_tensors = "pt" if self.pad_output else False @@ -180,14 +295,16 @@ def __init__( raise RuntimeError self.tokenizer_kwargs = tokenizer_kwargs - if (pad_output or (from_text and not generate)) and tokenizer is None: - # We need a tokenizer if we pad or when using text inputs with generate=False - # The latter case is due to the fact that we want the log-probs for the response only, - # but if the response is presented as a text we have to tokenize the whole prompt + response and - # identify where the prompt ends and where the response starts. - tokenizer = model.get_tokenizer() + + # Get tokenizer if needed + if tokenizer is None: + try: + tokenizer = model.get_tokenizer() + except AttributeError: + warnings.warn("No tokenizer provided and no tokenizer found in model.") self.tokenizer = tokenizer - if tokenizer is not None and ( + + if self.tokenizer is not None and ( not hasattr(self.tokenizer, "pad_token") or self.tokenizer.pad_token is None ): self.tokenizer.pad_token = self.tokenizer.eos_token @@ -195,39 +312,42 @@ def __init__( padding_value = self.tokenizer(self.tokenizer.pad_token)["input_ids"][0] self.padding_value = padding_value + # Generate kwargs setup if generate_kwargs is None: generate_kwargs = {} else: generate_kwargs = dict(generate_kwargs) - if generate_kwargs.get("n", 1) > 1: + self.num_samples = num_samples + if generate_kwargs.get("n", 1) > 1 or num_samples is not None: if inplace in (True, "empty"): raise ValueError( "inplace must be False (or None) when generating more than one sample." ) if inplace is None: inplace = False + if ( + generate_kwargs.get("n", 1) > 1 + and num_samples is not None + and generate_kwargs.get("n", 1) != num_samples + ): + raise ValueError("num_samples differs from generate_kwargs['n'].") + elif num_samples is None: + self.num_samples = generate_kwargs.get("n", 1) + generate_kwargs["n"] = self.num_samples elif inplace is None: inplace = True self.inplace = inplace - prompt_logprobs = False + prompt_logprobs = return_log_probs if not generate: # We want only the log-probs, we generate a single token (that we then discard) - # and retrieve the prompt log-probs + # and retrieve the prompt log-probs generate_kwargs["max_tokens"] = 1 - prompt_logprobs = True - if return_log_probs in (None, True): - return_log_probs = True - else: - raise ValueError( - "return_log_probs must be True or None when generate=False." - ) - elif return_log_probs in (None, False): - return_log_probs = False - self.return_log_probs = return_log_probs + if not return_log_probs: + raise ValueError("return_log_probs must be True when generate=False.") generate_kwargs.setdefault("detokenize", not pad_output) generate_kwargs.setdefault("prompt_logprobs", prompt_logprobs) @@ -238,16 +358,144 @@ def __init__( sampling_params = SamplingParams(**generate_kwargs) self.sampling_params = sampling_params - if from_text: - self.in_keys = [self.text_key] - else: - self.in_keys = [self.token_key, self.attention_mask_key] - self.out_keys = [self.token_response_key] - if from_text: - self.out_keys += [self.text_response_key, self.token_key] - if self.return_log_probs: - self.out_keys += [self.log_prob_key] + # Additional transformers-specific settings + self.chat_template_name = chat_template_name + self.chat_template = chat_template + + def get_new_version(self, **kwargs): + """Returns a new version of the module with altered parameters. + + For instance, the generate parameter can be altered to enable text generation or log-probabilities computation. + This is especially useful when one wants to avoid re-initializing the module with a new set of parameters, when the + same parameters could be used to gather log-probs. + + Positional arguments are not supported. + + See the class constructor for more details about the parameters. + """ + # Build the constructor arguments by using current values for missing parameters + constructor_kwargs = {} + + # Model is always required + constructor_kwargs["model"] = kwargs.get("model", self.model) + + # Check for each parameter and use current value if not provided + if "tokenizer" in kwargs: + constructor_kwargs["tokenizer"] = kwargs["tokenizer"] + elif hasattr(self, "tokenizer"): + constructor_kwargs["tokenizer"] = self.tokenizer + + if "input_mode" in kwargs: + constructor_kwargs["input_mode"] = kwargs["input_mode"] + elif hasattr(self, "input_mode"): + constructor_kwargs["input_mode"] = self.input_mode + + if "input_key" in kwargs: + constructor_kwargs["input_key"] = kwargs["input_key"] + # Since the input_key is dynamically determined, we don't want to set it here + # elif hasattr(self, "input_key"): + # constructor_kwargs["input_key"] = self.input_key + + if "attention_mask_key" in kwargs: + constructor_kwargs["attention_mask_key"] = kwargs["attention_mask_key"] + elif hasattr(self, "attention_mask_key"): + constructor_kwargs["attention_mask_key"] = self.attention_mask_key + + if "generate" in kwargs: + constructor_kwargs["generate"] = kwargs["generate"] + elif hasattr(self, "generate"): + constructor_kwargs["generate"] = self.generate + + if "return_log_probs" in kwargs: + constructor_kwargs["return_log_probs"] = kwargs["return_log_probs"] + elif not constructor_kwargs.get("generate", True): + # if we are not generating, we want to return log-probs + constructor_kwargs["return_log_probs"] = True + elif hasattr(self, "return_log_probs"): + constructor_kwargs["return_log_probs"] = self.return_log_probs + + if "generate_kwargs" in kwargs: + constructor_kwargs["generate_kwargs"] = kwargs["generate_kwargs"] + elif hasattr(self, "generate_kwargs"): + constructor_kwargs["generate_kwargs"] = self.generate_kwargs + + if "pad_output" in kwargs: + constructor_kwargs["pad_output"] = kwargs["pad_output"] + elif hasattr(self, "pad_output"): + constructor_kwargs["pad_output"] = self.pad_output + + if "tokenizer_kwargs" in kwargs: + constructor_kwargs["tokenizer_kwargs"] = kwargs["tokenizer_kwargs"] + elif hasattr(self, "tokenizer_kwargs"): + constructor_kwargs["tokenizer_kwargs"] = dict(self.tokenizer_kwargs) + if ( + "pad_output" in kwargs + and kwargs.get("pad_output") + != constructor_kwargs["tokenizer_kwargs"]["padding"] + ): + constructor_kwargs["tokenizer_kwargs"]["padding"] = kwargs.get( + "pad_output" + ) + if "inplace" in kwargs: + constructor_kwargs["inplace"] = kwargs["inplace"] + elif hasattr(self, "inplace"): + constructor_kwargs["inplace"] = self.inplace + + if "device" in kwargs: + constructor_kwargs["device"] = kwargs["device"] + elif hasattr(self, "_device"): + constructor_kwargs["device"] = self._device + + if "layout" in kwargs: + constructor_kwargs["layout"] = kwargs["layout"] + elif hasattr(self, "layout"): + constructor_kwargs["layout"] = self.layout + + if "num_samples" in kwargs: + constructor_kwargs["num_samples"] = kwargs["num_samples"] + elif hasattr(self, "num_samples"): + constructor_kwargs["num_samples"] = self.num_samples + + if "chat_template_name" in kwargs: + constructor_kwargs["chat_template_name"] = kwargs["chat_template_name"] + elif hasattr(self, "chat_template_name"): + constructor_kwargs["chat_template_name"] = self.chat_template_name + + if "chat_template" in kwargs: + constructor_kwargs["chat_template"] = kwargs["chat_template"] + elif hasattr(self, "chat_template"): + constructor_kwargs["chat_template"] = self.chat_template + + if "history_key" in kwargs: + constructor_kwargs["history_key"] = kwargs["history_key"] + elif hasattr(self, "history_key"): + constructor_kwargs["history_key"] = self.history_key + + if "text_key" in kwargs: + constructor_kwargs["text_key"] = kwargs["text_key"] + elif hasattr(self, "text_key"): + constructor_kwargs["text_key"] = self.text_key + + if "tokens_key" in kwargs: + constructor_kwargs["tokens_key"] = kwargs["tokens_key"] + elif hasattr(self, "tokens_key"): + constructor_kwargs["tokens_key"] = self.tokens_key + + if "masks_key" in kwargs: + constructor_kwargs["masks_key"] = kwargs["masks_key"] + elif hasattr(self, "masks_key"): + constructor_kwargs["masks_key"] = self.masks_key + + if "log_probs_key" in kwargs: + constructor_kwargs["log_probs_key"] = kwargs["log_probs_key"] + elif hasattr(self, "log_probs_key"): + constructor_kwargs["log_probs_key"] = self.log_probs_key + + # Create and return new instance + return type(self)(**constructor_kwargs) + + @set_list_to_stack(True) def forward( self, tensordict: TensorDictBase, @@ -265,415 +513,1276 @@ def forward( elif tensordict.ndim > 1: return self(tensordict.reshape(-1)).view(tensordict.shape) - if kwargs: - sampling_params = self.sampling_params.clone() - for key, val in kwargs.items(): - setattr(sampling_params, key, val) - else: - sampling_params = self.sampling_params - _source_device = None if self._device: _source_device = tensordict.device if tensordict.device: tensordict = tensordict.copy().clear_device_() - out = LazyStackedTensorDict( - *[ + if kwargs: + from vllm import SamplingParams + + sampling_params = SamplingParams(**kwargs) + else: + sampling_params = self.sampling_params + + if self.num_samples is not None: + out = ( TensorDict( - device=tensordict.device, batch_size=tensordict.batch_size[1:] + device=tensordict.device, + batch_size=( + tensordict.batch_size[0], + self.num_samples, + *tensordict.batch_size[1:], + ), ) - for _ in range(tensordict.shape[0]) - ] - ) - if self.from_text: + .to_lazystack(1) + .to_lazystack(0) + ) + else: + out = TensorDict( + device=tensordict.device, batch_size=tensordict.batch_size + ).to_lazystack(0) + + if self.input_mode == "history": if self.generate: - out = self._from_vllm_generate_text( - tensordict, sampling_params=sampling_params, out=out - ) + out = self._from_vllm_generate_history(tensordict, sampling_params, out) else: - out = self._from_vllm_logprobs_text( - tensordict, sampling_params=sampling_params, out=out - ) - else: + out = self._from_vllm_logprobs_history(tensordict, sampling_params, out) + elif self.input_mode == "text": if self.generate: - out = self._from_vllm_generate_tokens( - tensordict, sampling_params=sampling_params, out=out - ) + out = self._from_vllm_generate_text(tensordict, sampling_params, out) else: - out = self._from_vllm_logprobs_tokens( - tensordict, sampling_params=sampling_params, out=out - ) + out = self._from_vllm_logprobs_text(tensordict, sampling_params, out) + elif self.input_mode == "tokens": + if self.generate: + out = self._from_vllm_generate_tokens(tensordict, sampling_params, out) + else: + out = self._from_vllm_logprobs_tokens(tensordict, sampling_params, out) + if _source_device: out = out.to(_source_device) if tensordict_out is None: if self.inplace is True: + # The output is the input tensordict_out = tensordict elif self.inplace is False: + # The output is the new structure tensordict_out = out elif self.inplace == "empty": + # The output is empty tensordict_out = tensordict.empty() if tensordict_out is not None and tensordict_out is not out: - result = tensordict_out + result = tensordict_out.exclude(*self.out_keys, inplace=True) result.update(out, keys_to_update=self.out_keys) - elif tensordict_out is not out: + elif tensordict_out is out: + result = out.select(*self.out_keys) + elif self.inplace: result = out keys = list(set(self.out_keys + list(tensordict.keys(True, True)))) - return tensordict.update(result, keys_to_update=keys) + result = tensordict.exclude(*self.out_keys, inplace=True).update( + result, keys_to_update=keys + ) else: result = out return result - def _from_vllm_generate_text(self, td, sampling_params, out) -> TensorDictBase: - kwargs = {"sampling_params": sampling_params} - args = () - input_ids = None - attention_mask = None - text = td.get(self.text_key) - if text is None: - # Fallback on history parsing - history = td.get(self.history_key) - if history is None: - raise ValueError("No text or history provided to the vLLMWrapper.") - tokenizer_kwargs = {} - if self.chat_template_name is not None: - tokenizer_kwargs["chat_template_name"] = self.chat_template_name - if self.chat_template is not None: - tokenizer_kwargs["chat_template"] = self.chat_template - text = history.apply_chat_template(self.tokenizer, **tokenizer_kwargs) - if self.pad_output: - tokenizer_kwargs = self.tokenizer_kwargs - if not isinstance(text, (list, str)): - text = text.tolist() - tokens_in = TensorDict.from_dict(self.tokenizer(text, **tokenizer_kwargs)) - # out.set("tokens_in", tokens_in) - input_ids, attention_mask = ( - tokens_in["input_ids"], - tokens_in["attention_mask"], - ) - prompt_token_ids = self._to_list(input_ids, attention_mask) - kwargs["prompt_token_ids"] = prompt_token_ids - else: - text = td.get(self.text_key) - if not isinstance(text, (list, str)): - text = text.tolist() - args = (text,) + def _from_vllm_generate_history( + self, + tensordict_input: TensorDictBase, + sampling_params: SamplingParams, + out: TensorDictBase, + ) -> TensorDictBase: + """Generate text from history input.""" + from torchrl.data.llm import History + + assert isinstance( + tensordict_input, TensorDictBase + ), f"tensordict_input must be TensorDictBase, got {type(tensordict_input)}" + assert isinstance( + sampling_params, SamplingParams + ), f"sampling_params must be SamplingParams, got {type(sampling_params)}" + assert isinstance( + out, TensorDictBase + ), f"out must be TensorDictBase, got {type(out)}" + + # Validate input + if self.input_key not in tensordict_input: + raise ValueError( + f"Expected '{self.input_key}' key for history input mode, " + f"but found keys: {list(tensordict_input.keys())}" + ) - if not self._remote_calls: - tokens_out = self.model.generate(*args, **kwargs) + history = tensordict_input.get(self.input_key) + if not isinstance(history, History): + raise TypeError( + f"Expected History object for '{self.input_key}', got {type(history)}" + ) + + # Apply chat template + tokenizer_kwargs = {} + if self.chat_template_name is not None: + tokenizer_kwargs.setdefault("chat_template_name", self.chat_template_name) + if self.chat_template is not None: + tokenizer_kwargs.setdefault("chat_template", self.chat_template) + tokenizer_kwargs.setdefault("add_generation_prompt", True) + text_prompt = history.apply_chat_template( + tokenizer=self.tokenizer, **tokenizer_kwargs + ) + + tokenizer_kwargs.setdefault("return_assistant_tokens_mask", False) + tokenizer_kwargs.setdefault("tokenize", True) + tokenizer_kwargs.setdefault("padding", False) + tokenizer_kwargs.setdefault("return_dict", True) + response_struct = history.apply_chat_template( + tokenizer=self.tokenizer, **tokenizer_kwargs + ) + tokens_prompt_padded = None + tokens_prompt_unpadded = None + if self.pad_output: + tokens_prompt_padded = response_struct.get( + "input_ids", + as_padded_tensor=True, + padding_value=self.padding_value, + padding_side="left", + ) else: - import ray + tokens_prompt_unpadded = response_struct.get("input_ids", as_list=True) - tokens_out = ray.get(self.model.generate.remote(*args, **kwargs)) + result = self._generate_from_tokens( + tokens_prompt_padded=tokens_prompt_padded, + tokens_prompt_unpadded=tokens_prompt_unpadded, + sampling_params=sampling_params, + out=out, + ) - tokens_out = self._get_output_tokens_and_log_probs(tokens_out) + # Generate using text path if self.pad_output: - tokens_out.set( - self.text_response_key, - NonTensorStack( - *self.tokenizer.batch_decode(tokens_out[self.token_response_key]) - ), - ) - in_keys = [ - self.log_prob_key, - self.token_response_key, - self.text_response_key, - self.token_key, - self.attention_mask_key, - ] - out = out.update(tokens_out.select(*in_keys, strict=False)) - # We might already have the tokens - if input_ids is not None and self.token_key not in out: - out[self.token_key] = input_ids - if attention_mask is not None and self.attention_mask_key not in out: - out[self.attention_mask_key] = attention_mask - inputs = td.select(*self.in_keys, strict=False) - if inputs.ndim < out.ndim: - # This happens when n > 1 - inputs = inputs.unsqueeze(-1).expand(out.shape) - out.update(inputs) - return out + result[(self.tokens_key, "prompt")] = ( + tokens_prompt_padded + if not self.num_samples + else tokens_prompt_padded.unsqueeze(1).repeat(1, self.num_samples, 1) + ) + else: + tokens_prompt_nested = torch.nested.as_nested_tensor(tokens_prompt_unpadded) + if not self.num_samples: + result[(self.tokens_key, "prompt")] = tokens_prompt_nested + else: + for r in result.unbind(1): + r[(self.tokens_key, "prompt")] = tokens_prompt_nested - def _from_vllm_logprobs_text(self, td, sampling_params, out): - text_prompt = td.get(self.text_key) - text_response = td.get(self.text_response_key) - if text_response is None or text_prompt is None: - if text_response is not None and text_prompt is not None: - raise ValueError( - "No text or history provided to the vLLMWrapper. Either both are provided or none of them." - ) - # Fallback on history parsing - history = td.get(self.history_key) - if history is None: - raise ValueError( - "No text or history provided to the TransformersWrapper." + text_result = Text._from_tensordict(result.empty()) + result.set(self.text_key, text_result) + if not self.num_samples: + text_result.prompt = text_prompt + else: + for r in result.unbind(1): + r[self.text_key, "prompt"] = text_prompt + with result.view(-1) as result_flat: + if self.pad_output: + tokens_full_padded = result_flat.get( + (self.tokens_key, "full"), + as_padded_tensor=True, + padding_side="right", + padding_value=self.padding_value, ) - tokenizer_kwargs = {} - if self.chat_template_name is not None: - tokenizer_kwargs.setdefault( - "chat_template_name", self.chat_template_name + if tokens_full_padded is None: + raise ValueError("tokens_full_padded is None") + text_full = self.tokenizer.batch_decode( + tokens_full_padded, skip_special_tokens=False ) - if self.chat_template is not None: - tokenizer_kwargs.setdefault("chat_template", self.chat_template) - tokenizer_kwargs.setdefault("add_generation_prompt", False) - text_response = history.apply_chat_template( - tokenizer=self.tokenizer, **tokenizer_kwargs - ) - if isinstance(text_response, list): - text_prompt = ["" for _ in text_response] else: - text_prompt = "" - if not isinstance(text_prompt, list): - text_prompt = text_prompt.tolist() - if not isinstance(text_response, list): - text_response = text_response.tolist() - text = [_x + _y for _x, _y in _zip_strict(text_prompt, text_response)] - - tokenized_total = self.tokenizer(text, **self.tokenizer_kwargs) - tokenized_prompt_only = self.tokenizer(text_prompt, **self.tokenizer_kwargs) - - input_ids_total = tokenized_total["input_ids"] - attention_mask_total = tokenized_total["attention_mask"] - - if not self.pad_output: - input_ids_prompt = tokenized_prompt_only["input_ids"] - attention_mask_prompt = tokenized_prompt_only["attention_mask"] - input_ids_response = [] - for token_total, token_prompt in zip(input_ids_total, input_ids_prompt): - input_ids_response.append(token_total[len(token_prompt) :]) - attention_mask_response = [] - for mask, mask_prompt in zip(attention_mask_total, attention_mask_prompt): - attention_mask_response.append(mask[len(mask_prompt) :]) - else: - input_ids_prompt: torch.Tensor = tokenized_prompt_only["input_ids"] - # attention_mask_prompt: torch.Tensor = tokenized_prompt_only[ - # "attention_mask" - # ] - input_ids_response: torch.Tensor = input_ids_total[ - :, input_ids_prompt.shape[1] : + tokens_full_unpadded = result_flat.get( + (self.tokens_key, "full"), as_list=True + ) + # print("shapes of assistant masks", [t.shape for t in result_flat.get(("masks", "all_assistant_mask"), as_list=True)]) + if tokens_full_unpadded is None: + raise ValueError("tokens_full_unpadded is None") + text_full = self.tokenizer.batch_decode( + tokens_full_unpadded, skip_special_tokens=False + ) + text_prompt = result_flat[self.text_key, "prompt"] + text_response = [ + txt[len(prompt) :] + for txt, prompt in _zip_strict(text_full, text_prompt) ] - # response_attention_mask: torch.Tensor = attention_mask_total[ - # :, attention_mask_prompt.shape[1] : - # ] + result_flat.set((self.text_key, "full"), text_full) + result_flat.set((self.text_key, "response"), text_response) + + # Now parse the full text back to a history object, and use the extra history objects + # as response + history_chat = ChatHistory._from_tensordict(result.empty()) + if self.num_samples is None: + history_chat.prompt = history + else: + for h in history_chat.unbind(1): + h.prompt = history + with history_chat.view(-1) as history_chat_flat: + history_chat_flat.full = full_histories = History.from_text(text_full) + prompt_histories = history_chat_flat.prompt + # iterate over batch + h_responses = [] + for h_full, h_prompt in _zip_strict( + full_histories.unbind(0), prompt_histories.unbind(0) + ): + if h_full.shape[0] <= h_prompt.shape[0]: + raise RuntimeError("Full history is shorter than prompt history") + # Note: there can be more than one response, so the response has the same number of dims as prompt + h_responses.append(h_full[h_prompt.shape[0] :]) + history_chat_flat.response = torch.stack(h_responses) + result.set(self.history_key, history_chat) + return result - input_ids_total = self._to_list(input_ids_total, attention_mask_total) - kwargs = {"sampling_params": sampling_params} - if self.tokenizer is not None: - kwargs.update({"prompt_token_ids": input_ids_total}) - args = () + def _from_vllm_logprobs_history( + self, + tensordict_input: TensorDictBase, + sampling_params: SamplingParams, + out: TensorDictBase, + ) -> TensorDictBase: + """Compute log-probs from history input.""" + assert isinstance( + tensordict_input, TensorDictBase + ), f"tensordict_input must be TensorDictBase, got {type(tensordict_input)}" + assert isinstance( + sampling_params, SamplingParams + ), f"sampling_params must be SamplingParams, got {type(sampling_params)}" + assert isinstance( + out, TensorDictBase + ), f"out must be TensorDictBase, got {type(out)}" + + from torchrl.data.llm import History + + # Validate input + if self.input_key not in tensordict_input: + raise ValueError( + f"Expected '{self.input_key}' key for history input mode, " + f"but found keys: {list(tensordict_input.keys())}" + ) + + history = tensordict_input.get(self.input_key) + if not isinstance(history, History): + raise TypeError( + f"Expected History object for '{self.input_key}', got {type(history)}" + ) + + # Apply chat template + tokenizer_kwargs = {} + if self.chat_template_name is not None: + tokenizer_kwargs.setdefault("chat_template_name", self.chat_template_name) + if self.chat_template is not None: + tokenizer_kwargs.setdefault("chat_template", self.chat_template) + tokenizer_kwargs.setdefault("add_generation_prompt", False) + text_full = history.apply_chat_template( + tokenizer=self.tokenizer, **tokenizer_kwargs + ) + tokenizer_kwargs.setdefault("return_assistant_tokens_mask", True) + tokenizer_kwargs.setdefault("tokenize", True) + tokenizer_kwargs.setdefault("padding", False) + tokenizer_kwargs.setdefault("return_dict", True) + response_struct = history.apply_chat_template( + tokenizer=self.tokenizer, **tokenizer_kwargs + ) + + result = self._logprobs_from_tokens( + response_struct=response_struct, sampling_params=sampling_params, out=out + ) + text_result = Text._from_tensordict(result.empty()) + result.set(self.text_key, text_result) + result[self.text_key, "full"] = text_full + result.set(self.history_key, ChatHistory(full=history)) + return result + + def _from_vllm_generate_text( + self, td: TensorDictBase, sampling_params: SamplingParams, out: TensorDictBase + ) -> TensorDictBase: + """Generate text from text input.""" + # Type assertions + assert isinstance( + td, TensorDictBase + ), f"td must be TensorDictBase, got {type(td)}" + assert isinstance( + sampling_params, SamplingParams + ), f"sampling_params must be SamplingParams, got {type(sampling_params)}" + assert isinstance( + out, TensorDictBase + ), f"out must be TensorDictBase, got {type(out)}" + + # Validate input + if self.input_key not in td: + raise ValueError( + f"Expected '{self.input_key}' key for text input mode, " + f"but found keys: {list(td.keys())}" + ) + + text = td.get(self.input_key) + if text is None: + raise ValueError(f"Expected '{self.input_key}' key for text input mode") + + return self._generate_from_text(text, sampling_params, out) + + def _from_vllm_logprobs_text( + self, td: TensorDictBase, sampling_params: SamplingParams, out: TensorDictBase + ) -> TensorDictBase: + """Compute log-probs from text input.""" + # Type assertions + assert isinstance( + td, TensorDictBase + ), f"td must be TensorDictBase, got {type(td)}" + assert isinstance( + sampling_params, SamplingParams + ), f"sampling_params must be SamplingParams, got {type(sampling_params)}" + assert isinstance( + out, TensorDictBase + ), f"out must be TensorDictBase, got {type(out)}" + + # Validate input + if self.input_key not in td: + raise ValueError( + f"Expected '{self.input_key}' key for text input mode, " + f"but found keys: {list(td.keys())}" + ) + + text = td.get(self.input_key) + if text is None: + raise ValueError(f"Expected '{self.input_key}' key for text input mode") + + return self._logprobs_from_text(text, sampling_params, out) + + def _from_vllm_generate_tokens( + self, td: TensorDictBase, sampling_params: SamplingParams, out: TensorDictBase + ) -> TensorDictBase: + """Generate text from tokens input.""" + # Type assertions + assert isinstance( + td, TensorDictBase + ), f"td must be TensorDictBase, got {type(td)}" + assert isinstance( + sampling_params, SamplingParams + ), f"sampling_params must be SamplingParams, got {type(sampling_params)}" + assert isinstance( + out, TensorDictBase + ), f"out must be TensorDictBase, got {type(out)}" + + # Validate input + if self.input_key not in td: + raise ValueError( + f"Expected '{self.input_key}' key for tokens input mode, " + f"but found keys: {list(td.keys())}" + ) + + tokens_prompt_padded = None + tokens_prompt_unpadded = None + if self.pad_output: + tokens_prompt_padded = td.get(self.input_key) + else: + tokens_prompt_unpadded = list(td.get(self.input_key, as_list=True)) + # make sure we remove the padding tokens + tokens_prompt_unpadded = [ + tokens[tokens != self.padding_value] + for tokens in tokens_prompt_unpadded + ] + + return self._generate_from_tokens( + tokens_prompt_unpadded=tokens_prompt_unpadded, + tokens_prompt_padded=tokens_prompt_padded, + sampling_params=sampling_params, + out=out, + ) + + def _from_vllm_logprobs_tokens( + self, td: TensorDictBase, sampling_params: SamplingParams, out: TensorDictBase + ) -> TensorDictBase: + """Compute log-probs from tokens input.""" + # Type assertions + assert isinstance( + td, TensorDictBase + ), f"td must be TensorDictBase, got {type(td)}" + assert isinstance( + sampling_params, SamplingParams + ), f"sampling_params must be SamplingParams, got {type(sampling_params)}" + assert isinstance( + out, TensorDictBase + ), f"out must be TensorDictBase, got {type(out)}" + + # Validate input + if self.input_key not in td: + raise ValueError( + f"Expected '{self.input_key}' key for tokens input mode, " + f"but found keys: {list(td.keys())}" + ) + + tokens_full_padded = None + tokens_full_unpadded = None + if self.pad_output: + tokens_full_padded = td.get(self.input_key) else: - # TODO: this is unreachable as of now - but ultimately we may want to pass the text directly - args = (td[self.text_key],) + tokens_full_unpadded = list(td.get(self.input_key, as_list=True)) + # make sure we remove the padding tokens + tokens_full_unpadded = [ + tokens[tokens != self.padding_value] for tokens in tokens_full_unpadded + ] + + return self._logprobs_from_tokens( + response_struct=None, + tokens_full_unpadded=tokens_full_unpadded, + tokens_full_padded=tokens_full_padded, + sampling_params=sampling_params, + out=out, + ) + + def _cat_text( + self, text: str | list[str], response_text: str | list[str] + ) -> str | list[str]: + """Concatenate text and response text.""" + assert isinstance( + text, (str, list) + ), f"text must be str or list, got {type(text)}" + assert isinstance( + response_text, (str, list) + ), f"response_text must be str or list, got {type(response_text)}" + + if isinstance(text, list): + return [self._cat_text(t, t_) for t, t_ in _zip_strict(text, response_text)] + else: + return text + response_text + + def _generate_from_text( + self, + text: str | list[str] | NonTensorStack, + sampling_params: SamplingParams, + out: TensorDictBase, + ) -> TensorDictBase: + """Generate text from text input.""" + # Convert text to list format + if isinstance(text, str): + text = [text] + elif not isinstance(text, list): + text = text.tolist() + + assert isinstance( + text, (str, list) + ), f"text must be str or list, got {type(text)}" + assert isinstance( + sampling_params, SamplingParams + ), f"sampling_params must be SamplingParams, got {type(sampling_params)}" + assert isinstance( + out, TensorDictBase + ), f"out must be TensorDictBase, got {type(out)}" + + generate_kwargs = {"sampling_params": sampling_params} + args = () + + # Convert text to list format + if isinstance(text, str): + text = [text] + elif not isinstance(text, list): + text = text.tolist() + if not self._remote_calls: - tokens_out = self.model.generate(*args, **kwargs) + request_output = self.model.generate(text, *args, **generate_kwargs) else: import ray - tokens_out = ray.get(self.model.generate.remote(*args, **kwargs)) + request_output = ray.get( + self.model.generate.remote(text, *args, **generate_kwargs) + ) - tokens_out = _RequestOutput_tc.from_request_output(tokens_out) - tokens_out = tokens_out.select( - "prompt_token_ids", "prompt_logprobs", strict=False - )._tensordict + request_output_tc = _RequestOutput_tc.from_request_output(request_output) - # we disregard the tokens from the prompt to focus on those of the response + # Extract response tokens and text + outputs = ( + request_output_tc.outputs.view(-1) + if self.num_samples is not None + else request_output_tc.outputs + ) if self.pad_output: - lps = tokens_out.get( - "prompt_logprobs", as_padded_tensor=True, padding_side="left" + response_tokens_padded = outputs.view(-1).get( + "token_ids", + as_padded_tensor=self.pad_output, + padding_value=self.padding_value, + padding_side="right", + ) + response_tokens_list = outputs.view(-1).get( + "token_ids", + as_list=True, + ) + self._check_not_padded(response_tokens_list) + if self.tokenizer is not None: + response_text = self.tokenizer.batch_decode( + response_tokens_list, skip_special_tokens=False ) - lps = lps[..., -input_ids_response.shape[1] :] - padded = input_ids_response == self.padding_value - lps = torch.where(~padded, lps, 0.0) else: - lps = tokens_out.get( - "prompt_logprobs", - as_list=True, - ) - # We use a nested tensor as it will be unbound during writing - lps = torch.nested.nested_tensor( - [lp[..., -len(tr) :] for lp, tr in zip(lps, input_ids_response)] - ) - - out = out.update(tokens_out.empty(recurse=True)) - if isinstance(input_ids_response, list): - input_ids_response = torch.nested.nested_tensor(input_ids_response) - out["tokens_response"] = input_ids_response - out[self.log_prob_key] = lps - inputs = td.select(*self.in_keys, strict=False) - if inputs.ndim < out.ndim: - # This happens when n > 1 - inputs = inputs.unsqueeze(-1).expand(out.shape) - out.update(inputs) + response_text = None + + # Build output TensorClass objects + + masks_obj = Masks._from_tensordict(out.empty()) + masks_obj.all_attention_mask = None + masks_obj.all_assistant_mask = None + masks_obj.padded = MetaData(self.pad_output) + out.set(self.masks_key, masks_obj) + + if self.num_samples is not None: + text = [txt for txt in text for _ in range(self.num_samples)] + text_obj = Text._from_tensordict(out.empty()) + with text_obj.view(-1) as text_obj_flat: + text_obj_flat.prompt = text + text_obj_flat.response = response_text + text_obj_flat.full = self._cat_text(text, response_text) + out.set(self.text_key, text_obj) + + tokens_obj = Tokens._from_tensordict(out.empty()) + with tokens_obj.view(-1) as tokens_obj_flat: + tokens_obj_flat.prompt = None # We don't have prompt tokens in this path + if self.pad_output: + tokens_obj_flat.response = response_tokens_padded + self._check_padded(response_tokens_padded) + else: + tokens_obj_flat.response = response_tokens_list + self._check_not_padded(response_tokens_list) + tokens_obj_flat.full = ( + None # we don't have prompt tokens in this path so no all_tokens either + ) + tokens_obj.padded = MetaData(self.pad_output) + out.set(self.tokens_key, tokens_obj) + + if self.return_log_probs: + log_probs_obj = LogProbs._from_tensordict(out.empty()) + with log_probs_obj.view(-1) as log_probs_obj_flat: + if self.pad_output: + log_probs_padded = outputs.get( + "logprobs", + as_padded_tensor=self.pad_output, + padding_value=self.padding_value, + padding_side="right", + ) + self._check_padded(log_probs_padded) + log_probs_obj_flat.response = log_probs_padded + log_probs_obj_flat.full = log_probs_padded + else: + log_probs_list = outputs.get( + "logprobs", + as_list=True, + ) + self._check_not_padded(log_probs_list) + log_probs_obj_flat.response = log_probs_list + log_probs_obj_flat.full = log_probs_list + log_probs_obj_flat.prompt = None + log_probs_obj.padded = MetaData(self.pad_output) + out.set(self.log_probs_key, log_probs_obj) + return out - def _from_vllm_generate_tokens(self, td, sampling_params, out): - input_ids = td.get(self.token_key) - attention_mask = td.get(self.attention_mask_key) - input_ids_list = self._to_list(input_ids, attention_mask) - args = () - kwargs = { + def _logprobs_from_text( + self, + text: str | list[str] | NonTensorStack, + sampling_params: SamplingParams, + out: TensorDictBase, + ) -> TensorDictBase: + """Compute log-probs from text input.""" + # Convert text to list format + if isinstance(text, str): + text = [text] + elif not isinstance(text, list): + text = text.tolist() + + assert isinstance( + text, (str, list) + ), f"text must be str or list, got {type(text)}" + assert isinstance( + sampling_params, SamplingParams + ), f"sampling_params must be SamplingParams, got {type(sampling_params)}" + assert isinstance( + out, TensorDictBase + ), f"out must be TensorDictBase, got {type(out)}" + + # Tokenize the text + if self.tokenizer is None: + raise ValueError( + "Tokenizer is required for log-probs computation with text input" + ) + + # Tokenize the text + tokenized_output = self.tokenizer(text, **self.tokenizer_kwargs) + if self.pad_output: + tokens_full_padded = tokenized_output["input_ids"] + attention_mask_full_padded = tokenized_output["attention_mask"] + tokens_full_list = self._to_list( + tokens_full_padded, attention_mask_full_padded + ) + else: + tokens_full_unpadded = tokenized_output["input_ids"] + tokens_full_list = self._to_list(tokens_full_unpadded, None) + attention_mask_full_unpadded = tokenized_output["attention_mask"] + attention_mask_full_unpadded = [ + am.bool() + if isinstance(am, torch.Tensor) + else torch.tensor(am, dtype=torch.bool) + for am in attention_mask_full_unpadded + ] + + # Convert to list format for vLLM + generate_kwargs = { "sampling_params": sampling_params, - "prompt_token_ids": input_ids_list, + "prompt_token_ids": tokens_full_list, } + + # Generate with vLLM to get prompt_logprobs if not self._remote_calls: - tokens_out = self.model.generate(*args, **kwargs) + request_output = self.model.generate(**generate_kwargs) else: import ray - tokens_out = ray.get(self.model.generate.remote(*args, **kwargs)) - tokens_out = _RequestOutput_tc.from_request_output(tokens_out) - # When not generate, we don't want to overwrite this - tokens_response_td = tokens_out.outputs._tensordict.select( - "token_ids", "logprobs", strict=False + request_output = ray.get(self.model.generate.remote(**generate_kwargs)) + + request_output_tc = _RequestOutput_tc.from_request_output(request_output) + + # Extract log-probs from prompt_logprobs + if self.pad_output: + # For padded case, use all prompt_logprobs + log_probs_full_padded = request_output_tc.get( + "prompt_logprobs", + as_padded_tensor=True, + padding_value=0, + padding_side="left", + ) + + # Mask out padding + attention_mask_full_padded = tokens_full_padded != self.padding_value + log_probs_full_padded = torch.where( + attention_mask_full_padded, log_probs_full_padded, 0.0 + ) + else: + # For unpadded case, extract from each sequence + log_probs_full_unpadded = request_output_tc.get( + "prompt_logprobs", as_list=True + ) + self._check_not_padded(log_probs_full_unpadded) + + masks_obj = Masks._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) ) if self.pad_output: - tokens_response_td = tokens_response_td.densify( - layout=torch.strided - ).to_padded_tensor(padding=self.padding_value) - tokens_response_td.rename_key_("token_ids", "tokens_response") + self._check_padded(attention_mask_full_padded) + masks_obj.all_attention_mask = attention_mask_full_padded.bool() + else: + self._check_not_padded(attention_mask_full_unpadded) + masks_obj.all_attention_mask = attention_mask_full_unpadded + masks_obj.padded = MetaData(self.pad_output) + out.set(self.masks_key, masks_obj) + + # Build output TensorClass objects + text_obj = Text._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + text_obj.prompt = None + text_obj.response = None + text_obj.full = text + out.set(self.text_key, text_obj) + + tokens_obj = Tokens._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if self.pad_output: + self._check_padded(tokens_full_padded) + tokens_obj.full = tokens_full_padded + else: + tokens_obj.full = tokens_full_unpadded + tokens_obj.response = None + tokens_obj.padded = MetaData(self.pad_output) + out.set(self.tokens_key, tokens_obj) + if self.return_log_probs: - tokens_response_td.rename_key_("logprobs", self.log_prob_key) + log_probs_obj = LogProbs._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) if self.pad_output: - padded_values = ( - tokens_response_td["tokens_response"] == self.padding_value - ) - if padded_values.any(): - lps = tokens_response_td[self.log_prob_key] - lps = torch.where(expand_as_right(~padded_values, lps), lps, 0.0) - tokens_response_td[self.log_prob_key] = lps - out = out.update(tokens_response_td.empty(recurse=True)) - out.update( - tokens_response_td, - keys_to_update=(self.token_response_key, self.log_prob_key), - ) - inputs = td.select(*self.in_keys, strict=False) - if inputs.ndim < out.ndim: - # This happens when n > 1 - inputs = inputs.unsqueeze(-1).expand(out.shape) - out.update(inputs) + self._check_padded(log_probs_full_padded) + log_probs_obj.full = log_probs_full_padded + else: + self._check_not_padded(log_probs_full_unpadded) + log_probs_obj.full = log_probs_full_unpadded + log_probs_obj.response = None + log_probs_obj.padded = MetaData(self.pad_output) + out.set(self.log_probs_key, log_probs_obj) + return out - def _from_vllm_logprobs_tokens(self, td, sampling_params, out): + def _cat_tensors( + self, + tokens: list[torch.Tensor] | torch.Tensor, + response_tokens: list[torch.Tensor] | torch.Tensor, + ) -> list[torch.Tensor] | torch.Tensor: + """Concatenate tokens and response tokens.""" + if isinstance(tokens, list) or isinstance(response_tokens, list): + return [ + self._cat_tensors(t, t_) + for t, t_ in _zip_strict(tokens, response_tokens) + ] + else: + return torch.cat([tokens, response_tokens], dim=-1) - tokens = td.get(self.token_key) - tokens_response = td.get(self.token_response_key) - attention_mask = td.get(self.attention_mask_key) + def _generate_from_tokens( + self, + tokens_prompt_unpadded: list[torch.Tensor] | None, + tokens_prompt_padded: torch.Tensor | None, + sampling_params: SamplingParams, + out: TensorDictBase, + ) -> TensorDictBase: + """Generate text from tokens input.""" + assert isinstance( + tokens_prompt_padded, (torch.Tensor, type(None)) + ), f"tokens_prompt_padded must be torch.Tensor or None, got {type(tokens_prompt_padded)}" + assert isinstance( + tokens_prompt_unpadded, (list, type(None)) + ), f"tokens_prompt_unpadded must be list or None, got {type(tokens_prompt_unpadded)}" + assert isinstance( + sampling_params, SamplingParams + ), f"sampling_params must be SamplingParams, got {type(sampling_params)}" + assert isinstance( + out, TensorDictBase + ), f"out must be TensorDictBase, got {type(out)}" + + generate_kwargs = {"sampling_params": sampling_params} + args = () - tokens = torch.cat([tokens, tokens_response], -1) - if attention_mask is not None: - attention_mask = torch.cat( - [attention_mask, attention_mask.new_ones(tokens_response.shape)], -1 + if tokens_prompt_unpadded is None: + # TODO: To be on the safe side, we may do this even in the unpadded case since we're not sure + # the user passed an unpadded tensor in the first place. + tokens_prompt_list = self._to_list( + tokens_prompt_padded, tokens_prompt_padded != self.padding_value ) - input_ids_list = self._to_list(tokens, attention_mask) - args = () - kwargs = { + else: + tokens_prompt_list = self._to_list(tokens_prompt_unpadded, None) + generate_kwargs.update({"prompt_token_ids": tokens_prompt_list}) + + if not self._remote_calls: + request_output = self.model.generate(*args, **generate_kwargs) + else: + import ray + + request_output = ray.get( + self.model.generate.remote(*args, **generate_kwargs) + ) + + request_output_tc = _RequestOutput_tc.from_request_output(request_output) + + # Extract response tokens and text + outputs = ( + request_output_tc.outputs.view(-1) + if self.num_samples is not None + else request_output_tc.outputs + ) + if self.pad_output: + tokens_response_padded = outputs.get( + "token_ids", + as_padded_tensor=self.pad_output, + padding_value=self.padding_value, + padding_side="right", + ) + self._check_padded(tokens_response_padded) + tokens_response_unpadded = outputs.get( + "token_ids", + as_list=True, + ) + self._check_not_padded(tokens_response_unpadded) + + tokens_obj = Tokens._from_tensordict(out.empty()) + if self.pad_output: + self._check_padded(tokens_response_padded) + self._check_padded(tokens_prompt_padded) + else: + self._check_not_padded(tokens_response_unpadded) + self._check_not_padded(tokens_prompt_unpadded) + + if self.num_samples is not None: + # replicate tokens + for i in range(self.num_samples): + tokens_obj[:, i].prompt = ( + tokens_prompt_unpadded + if not self.pad_output + else tokens_prompt_padded + ) + else: + tokens_obj.prompt = ( + tokens_prompt_unpadded if not self.pad_output else tokens_prompt_padded + ) + with tokens_obj.view(-1) as tokens_obj_flat: + if self.pad_output: + tokens_obj_flat.response = tokens_response_padded + tokens_full_padded = self._cat_tensors( + tokens_obj_flat.prompt, tokens_response_padded + ) + tokens_obj_flat.full = tokens_full_padded + else: + tokens_obj_flat.response = tokens_response_unpadded + tokens_full_unpadded = self._cat_tensors( + tokens_obj_flat.get("prompt", as_list=True), + tokens_response_unpadded, + ) + tokens_obj_flat.full = tokens_full_unpadded + tokens_obj.padded = MetaData(self.pad_output) + out.set(self.tokens_key, tokens_obj) + + masks_obj = Masks._from_tensordict(out.empty()) + # self.return_tokens must be True + if self.pad_output: + # Get "real" attention masks + full_attention_mask_padded = tokens_obj.get("full") != self.padding_value + masks_obj.all_attention_mask = full_attention_mask_padded.bool() + else: + # Get "real" attention masks + # We can use select to avoid batch-size problems + _td = torch.ones_like( + out.select(("tokens", "full")) + .copy() + .rename_key_(("tokens", "full"), "all_attention_mask") + ).bool() + del _td["tokens"] + masks_obj.update(_td) + masks_obj.all_assistant_mask = None + masks_obj.padded = MetaData(self.pad_output) + out.set(self.masks_key, masks_obj) + + if self.return_log_probs: + if self.pad_output: + log_probs_padded = outputs.get( + "logprobs", + as_padded_tensor=self.pad_output, + padding_value=self.padding_value, + padding_side="right", + ) + else: + log_probs_list = outputs.get( + "logprobs", + as_list=True, + ) + self._check_not_padded(log_probs_list) + if self.num_samples is None: + # TODO: this is not correct, we should use the prompt_logprobs + # but they're not returned by vLLM + if self.pad_output: + prompt_logprobs_padded = request_output_tc.get( + "prompt_logprobs", + as_padded_tensor=self.pad_output, + padding_value=self.padding_value, + padding_side="right", + ) + else: + prompt_logprobs_list = request_output_tc.get( + "prompt_logprobs", + as_list=True, + ) + self._check_not_padded(prompt_logprobs_list) + log_probs_obj = LogProbs._from_tensordict(out.empty()) + if self.pad_output: + self._check_padded(log_probs_padded) + if self.num_samples is None: + self._check_padded(prompt_logprobs_padded) + log_probs_obj.prompt = prompt_logprobs_padded + else: + self._check_not_padded(log_probs_list) + if self.num_samples is None: + self._check_not_padded(prompt_logprobs_list) + log_probs_obj.prompt = prompt_logprobs_list + with log_probs_obj.view(-1) as log_probs_obj_flat: + log_probs_obj_flat.response = ( + log_probs_padded if self.pad_output else log_probs_list + ) + if self.num_samples is None: + if self.pad_output: + log_probs_obj_flat.full = self._cat_tensors( + log_probs_obj_flat.prompt, log_probs_padded + ) + else: + log_probs_obj_flat.full = self._cat_tensors( + log_probs_obj_flat.get("prompt", as_list=True), + log_probs_list, + ) + else: + log_probs_obj_flat.full = None + log_probs_obj.padded = MetaData(self.pad_output) + out.set(self.log_probs_key, log_probs_obj) + return out + + def _logprobs_from_tokens( + self, + *, + response_struct: TensorDictBase | None = None, + tokens_full_unpadded: list[torch.Tensor] | None = None, + tokens_full_padded: torch.Tensor | None = None, + sampling_params: SamplingParams | None = None, + out: TensorDictBase | None = None, + ) -> TensorDictBase: + """Compute log-probs from tokens input.""" + assert isinstance( + response_struct, (TensorDictBase, type(None)) + ), f"response_struct must be TensorDictBase or None, got {type(response_struct)}" + assert isinstance( + tokens_full_unpadded, (list, type(None)) + ), f"tokens_full_unpadded must be list or None, got {type(tokens_full_unpadded)}" + assert isinstance( + tokens_full_padded, (torch.Tensor, type(None)) + ), f"tokens_full_padded must be torch.Tensor or None, got {type(tokens_full_padded)}" + assert isinstance( + sampling_params, (SamplingParams, type(None)) + ), f"sampling_params must be SamplingParams or None, got {type(sampling_params)}" + assert isinstance( + out, (TensorDictBase, type(None)) + ), f"out must be TensorDictBase or None, got {type(out)}" + + # Convert to list format for vLLM + if response_struct is not None: + tokens_full_padded = response_struct.get( + "input_ids", + as_padded_tensor=True, + padding_value=self.padding_value, + padding_side="left", + ) + attention_mask_full_padded = response_struct.get( + "attention_mask", + as_padded_tensor=True, + padding_value=False, + padding_side="left", + ).bool() + attention_mask_full_unpadded = _unpad_tensors( + attention_mask_full_padded, attention_mask_full_padded, as_nested=False + ) + elif tokens_full_unpadded is not None: + tokens_full_padded = pad_sequence( + tokens_full_unpadded, + padding_value=self.padding_value, + batch_first=True, + padding_side="left", + ) + attention_mask_full_unpadded = [ + t != self.padding_value for t in tokens_full_unpadded + ] + attention_mask_full_padded = pad_sequence( + attention_mask_full_unpadded, + padding_value=False, + batch_first=True, + padding_side="left", + ) + elif tokens_full_padded is not None: + attention_mask_full_padded = tokens_full_padded != self.padding_value + else: + raise ValueError("Either response_struct or tokens must be provided") + + assert isinstance(tokens_full_padded, torch.Tensor) + assert isinstance(attention_mask_full_padded, torch.Tensor) + if tokens_full_unpadded is None: + tokens_full_list = self._to_list( + tokens_full_padded, attention_mask_full_padded + ) + else: + tokens_full_list = self._to_list(tokens_full_unpadded, None) + + generate_kwargs = { "sampling_params": sampling_params, - "prompt_token_ids": input_ids_list, + "prompt_token_ids": tokens_full_list, } + + # Generate with vLLM to get prompt_logprobs if not self._remote_calls: - tokens_out = self.model.generate(*args, **kwargs) + tokens_out_stuct = self.model.generate(**generate_kwargs) else: import ray - tokens_out = ray.get(self.model.generate.remote(*args, **kwargs)) - tokens_out = _RequestOutput_tc.from_request_output(tokens_out) - prompt_logprobs = tokens_out.prompt_logprobs - prompt_logprobs = prompt_logprobs[..., -tokens_response.shape[-1] :] - padded = tokens_response == self.padding_value - prompt_logprobs = torch.where(~padded, prompt_logprobs, 0.0) - out = out.update(tokens_out._tensordict.empty(recurse=True)) - out.set(self.log_prob_key, prompt_logprobs) - out.set(self.token_response_key, tokens_response) - inputs = td.select(*self.in_keys, strict=False) - if inputs.ndim < out.ndim: - # This happens when n > 1 - inputs = inputs.unsqueeze(-1).expand(out.shape) - out.update(inputs) - return out + tokens_out_stuct = ray.get(self.model.generate.remote(**generate_kwargs)) + + request_output_tc = _RequestOutput_tc.from_request_output(tokens_out_stuct) - def _get_output_tokens_and_log_probs(self, tokens_out): - padding_value = self.padding_value - tokens_out = _RequestOutput_tc.from_request_output(tokens_out) + # Extract log-probs from prompt_logprobs + if self.pad_output: + # For padded case, use all prompt_logprobs + log_probs_full_padded = request_output_tc.get( + "prompt_logprobs", + as_padded_tensor=True, + padding_value=0, + padding_side="left", + ) + + # Mask out padding + attention_mask_full_padded = tokens_full_padded != self.padding_value + log_probs_full_padded = torch.where( + attention_mask_full_padded, log_probs_full_padded, 0.0 + ) + else: + # For unpadded case, extract from each sequence + log_probs_full_unpadded = request_output_tc.get( + "prompt_logprobs", as_list=True + ) + self._check_not_padded(log_probs_full_unpadded) + + assistant_mask_full_padded = None + if response_struct is not None: + assistant_mask_full_padded = response_struct.get( + "assistant_masks", + as_padded_tensor=True, + padding_side="left", + padding_value=0, + ) + if assistant_mask_full_padded is not None: + assistant_mask_full_padded = assistant_mask_full_padded.bool() + if not self.pad_output: + assistant_mask_full_unpadded = _unpad_tensors( + assistant_mask_full_padded, + attention_mask_full_padded, + as_nested=False, + ) + else: + assistant_mask_full_unpadded = None + else: + assistant_mask_full_unpadded = None - # When not generate, we don't want to overwrite this - tokens_response_td = tokens_out.outputs._tensordict.select( - "text", "token_ids", "logprobs", strict=False + masks_obj = Masks._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) ) if self.pad_output: - tokens_response_td = tokens_response_td.densify( - layout=torch.strided - ).to_padded_tensor(padding=padding_value) - tokens_response_td.rename_key_("token_ids", "tokens_response") - tokens_response_td.rename_key_("text", "text_response") - if not self.pad_output: - # Then we can safely move the input tokens, but otherwise they - # may need padding - tokens_out = tokens_out.select("prompt_token_ids") - if tokens_out.ndim < tokens_response_td.ndim: - tokens_out = tokens_out.unsqueeze(1).expand(tokens_response_td.shape) - tokens_response_td.update(tokens_out).rename_key_( - "prompt_token_ids", self.token_key - ) - - if self.return_log_probs or "logprobs" in tokens_response_td: - tokens_response_td.rename_key_("logprobs", self.log_prob_key) - if self.pad_output: - padded_values = tokens_response_td["tokens_response"] == padding_value - if padded_values.any(): - lps = tokens_response_td[self.log_prob_key] - lps = torch.where(expand_as_right(~padded_values, lps), lps, 0.0) - tokens_response_td[self.log_prob_key] = lps - return tokens_response_td - - def _to_list(self, tokens, attention_mask): - """Converts a tensor of integer in a masked list (of lists) of integers.""" - if isinstance(tokens, torch.Tensor): - # TODO: make this an ND NonTensorStack + self._check_padded(attention_mask_full_padded) + masks_obj.all_attention_mask = attention_mask_full_padded.bool() + if assistant_mask_full_padded is not None: + masks_obj.all_assistant_mask = assistant_mask_full_padded + else: + self._check_not_padded(attention_mask_full_unpadded) + masks_obj.all_attention_mask = attention_mask_full_unpadded + if assistant_mask_full_unpadded is not None: + masks_obj.all_assistant_mask = assistant_mask_full_unpadded + masks_obj.padded = MetaData(self.pad_output) + out.set(self.masks_key, masks_obj) + + tokens_obj = Tokens._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if self.pad_output: + self._check_padded(tokens_full_padded) + tokens_obj.full = tokens_full_padded + else: + tokens_obj.full = tokens_full_unpadded + tokens_obj.response = None + tokens_obj.padded = MetaData(self.pad_output) + out.set(self.tokens_key, tokens_obj) + + log_probs_obj = LogProbs._from_tensordict( + TensorDict(batch_size=out.batch_size).to_lazystack(0) + ) + if self.pad_output: + self._check_padded(log_probs_full_padded) + log_probs_obj.full = log_probs_full_padded + else: + self._check_not_padded(log_probs_full_unpadded) + log_probs_obj.full = log_probs_full_unpadded + log_probs_obj.response = None + log_probs_obj.padded = MetaData(self.pad_output) + out.set(self.log_probs_key, log_probs_obj) + + return out + + def _to_list( + self, + tokens_padded: torch.Tensor | list[torch.Tensor], + attention_mask_padded: torch.Tensor | None, + ) -> list[list[int]]: + """Converts a tensor of integers into a masked list (of lists) of integers.""" + if isinstance(tokens_padded, torch.Tensor): parent = [] queue = collections.deque() - if attention_mask is None: - attention_mask = torch.ones_like(tokens) - queue.append((tokens, attention_mask.bool(), parent)) + if attention_mask_padded is None: + attention_mask_padded = torch.ones_like(tokens_padded) + queue.append((tokens_padded, attention_mask_padded.bool(), parent)) while queue: - token, amask, _parent = queue.popleft() - if token.ndim == 1: - _parent.extend(token[amask].tolist()) + token_tensor, attention_mask_bool, _parent = queue.popleft() + if token_tensor.ndim == 1: + _parent.extend(token_tensor[attention_mask_bool].tolist()) else: - _parent.extend([[] for _ in range(token.shape[0])]) + _parent.extend([[] for _ in range(token_tensor.shape[0])]) queue.extend( [ (t, m, local_parent) - for t, m, local_parent in zip(token, amask, _parent) + for t, m, local_parent in zip( + token_tensor, attention_mask_bool, _parent + ) ] ) - tokens = parent - return tokens + tokens_list = parent + elif isinstance(tokens_padded, list): + parent = [] + queue = collections.deque() + queue.append((tokens_padded, parent)) + while queue: + tokens_list, _parent = queue.popleft() + if isinstance(tokens_list, list) and isinstance( + tokens_list[0], (list, torch.Tensor) + ): + _parent.extend([[] for _ in tokens_list]) + queue.extend( + [ + (t, local_parent) + for t, local_parent in zip(tokens_list, _parent) + ] + ) + continue + elif isinstance(tokens_list, torch.Tensor): + tokens_list = tokens_list.tolist() + _parent.extend(tokens_list) + tokens_list = parent + + return tokens_list @_classproperty def CompletionOutput_tc(cls): - import vllm + if vllm is None: + raise ImportError("vllm is required for CompletionOutput_tc") if hasattr(cls, "_CompletionOutput_tc"): return cls._CompletionOutput_tc - CompletionOutput_tc = from_dataclass(vllm.outputs.CompletionOutput) + CompletionOutput_tc = from_dataclass(vllm.outputs.CompletionOutput) # type: ignore cls._CompletionOutput_tc = CompletionOutput_tc return CompletionOutput_tc + def get_dist( + self, + tensordict: TensorDictBase, + tensordict_out: TensorDictBase | None = None, + logits_key: NestedKey = "logits", + mask_key: NestedKey | None = None, + as_padded_tensor: bool | None = None, + as_nested_tensor: bool | None = None, + padding_value: float | None = None, + padding_side: str = "right", + layout: torch.layout | None = None, + **kwargs, + ) -> D.Distribution: + """Get distribution from logits/log-probs with optional masking. + + vLLM does not return logits, so this method is not supported. + """ + raise NotImplementedError( + "vLLM does not return logits, so get_dist is not supported" + ) + + def get_dist_with_prompt_mask( + self, + tensordict: TensorDictBase, + tokens_key: NestedKey = ("tokens", "full"), + logits_key: NestedKey = "logits", + assistant_mask_key: NestedKey = ("masks", "all_assistant_mask"), + attention_mask_key: NestedKey = ("masks", "all_attention_mask"), + **kwargs, + ) -> D.Distribution: + """Get distribution masked to only include response tokens (exclude prompt). + + vLLM does not return logits, so this method is not supported. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + raise NotImplementedError( + "vLLM does not return logits, so get_dist_with_prompt_mask is not supported" + ) + + def _get_dist_with_assistant_mask( + self, + tensordict: TensorDictBase, + assistant_mask_key: NestedKey = ("masks", "all_assistant_mask"), + logits_key: NestedKey = "logits", + **kwargs, + ) -> D.Distribution: + """Get distribution masked to only include assistant tokens. + + vLLM does not return logits, so this method is not supported. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + raise NotImplementedError( + "vLLM does not return logits, so get_dist_with_assistant_mask is not supported" + ) + + def _get_dist_with_attention_mask( + self, + tensordict: TensorDictBase, + attention_mask_key: NestedKey = ("masks", "all_attention_mask"), + logits_key: NestedKey = "logits", + **kwargs, + ) -> D.Distribution: + """Get distribution masked using attention mask. + + vLLM does not return logits, so this method is not supported. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + raise NotImplementedError( + "vLLM does not return logits, so get_dist_with_attention_mask is not supported" + ) + + def _get_dist_with_custom_mask( + self, + tensordict: TensorDictBase, + mask: torch.Tensor, + logits_key: NestedKey = "logits", + **kwargs, + ) -> D.Distribution: + """Get distribution with custom mask. + + vLLM does not return logits, so this method is not supported. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + raise NotImplementedError( + "vLLM does not return logits, so get_dist_with_custom_mask is not supported" + ) + + # Convenience methods for common LLM training scenarios + def _get_sft_dist(self, tensordict: TensorDictBase, **kwargs) -> D.Distribution: + """Get distribution suitable for SFT loss (response tokens only). + + vLLM does not return logits, so this method is not supported. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + raise NotImplementedError( + "vLLM does not return logits, so get_sft_dist is not supported" + ) + + def _get_rlhf_dist(self, tensordict: TensorDictBase, **kwargs) -> D.Distribution: + """Get distribution suitable for RLHF loss (assistant tokens only). + + vLLM does not return logits, so this method is not supported. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + raise NotImplementedError( + "vLLM does not return logits, so get_rlhf_dist is not supported" + ) + + def _get_generic_dist(self, tensordict: TensorDictBase, **kwargs) -> D.Distribution: + """Get distribution suitable for generic losses (all tokens). + + vLLM does not return logits, so this method is not supported. + + This is a provisional method that will be replaced by the `get_dist` method once we have a better masking strategy. + """ + raise NotImplementedError( + "vLLM does not return logits, so get_generic_dist is not supported" + ) + class _RequestOutput_tc(TensorClass["nocast"]): + """TensorClass wrapper for vLLM RequestOutput.""" + request_id: str prompt: str - prompt_token_ids: str - prompt_logprobs: str - outputs: str + prompt_token_ids: torch.Tensor + prompt_logprobs: torch.Tensor + outputs: list # type: ignore finished: str metrics: str lora_request: str encoder_prompt: str encoder_prompt_token_ids: str - num_cached_tokens: str + num_cached_tokens: torch.Tensor def __post_init__(self): CompletionOutput_tc = vLLMWrapper.CompletionOutput_tc @@ -703,37 +1812,77 @@ def get_logprob(output): if len(outputs) == 1: self.outputs = outputs[0] else: - self.outputs = maybe_dense_stack(outputs) - if self.prompt_logprobs is not None: - self.prompt_logprobs = torch.tensor( - [ - v[int(tid)].logprob if v is not None else 0.0 - for v, tid in _zip_strict( - self.prompt_logprobs, self.prompt_token_ids - ) - ] - ) - self.prompt_token_ids = torch.as_tensor(self.prompt_token_ids) - self.num_cached_tokens = torch.as_tensor(self.num_cached_tokens) + # Check if we can stack the outputs (they should have the same shape) + try: + self.outputs = lazy_stack(outputs) + except RuntimeError: + # If stacking fails (different sizes), keep as list + self.outputs = outputs @classmethod - def from_request_output(cls, requests): - out = lazy_stack( - [ + def from_request_output( + cls, requests: RequestOutput | list[RequestOutput] + ) -> _RequestOutput_tc | list[_RequestOutput_tc]: + """Create _RequestOutput_tc from vLLM RequestOutput.""" + # Type assertions + assert isinstance( + requests, (RequestOutput, list) + ), f"requests must be RequestOutput or list, got {type(requests)}" + + # Check if we can stack the outputs + try: + out = lazy_stack( + [ + cls( + request_id=request.request_id, + prompt=request.prompt, + prompt_token_ids=torch.as_tensor(request.prompt_token_ids), + prompt_logprobs=torch.tensor( + [ + v[int(tid)].logprob if v is not None else 0.0 + for v, tid in _zip_strict( + request.prompt_logprobs, request.prompt_token_ids + ) + ] + ) + if request.prompt_logprobs is not None + else torch.tensor([]), + outputs=request.outputs, + finished=request.finished, + metrics=request.metrics, + lora_request=request.lora_request, + encoder_prompt=request.encoder_prompt, + encoder_prompt_token_ids=request.encoder_prompt_token_ids, + num_cached_tokens=torch.as_tensor(request.num_cached_tokens), + ) + for request in requests + ] + ) + return out + except RuntimeError: + # If stacking fails, return a list of individual _RequestOutput_tc objects + return [ cls( request_id=request.request_id, prompt=request.prompt, - prompt_token_ids=request.prompt_token_ids, - prompt_logprobs=request.prompt_logprobs, + prompt_token_ids=torch.as_tensor(request.prompt_token_ids), + prompt_logprobs=torch.tensor( + [ + v[int(tid)].logprob if v is not None else 0.0 + for v, tid in _zip_strict( + request.prompt_logprobs, request.prompt_token_ids + ) + ] + ) + if request.prompt_logprobs is not None + else torch.tensor([]), outputs=request.outputs, finished=request.finished, metrics=request.metrics, lora_request=request.lora_request, encoder_prompt=request.encoder_prompt, encoder_prompt_token_ids=request.encoder_prompt_token_ids, - num_cached_tokens=request.num_cached_tokens, + num_cached_tokens=torch.as_tensor(request.num_cached_tokens), ) for request in requests ] - ) - return out diff --git a/torchrl/objectives/llm/grpo.py b/torchrl/objectives/llm/grpo.py index 376b2e8cedd..be418f3d87e 100644 --- a/torchrl/objectives/llm/grpo.py +++ b/torchrl/objectives/llm/grpo.py @@ -5,6 +5,8 @@ from __future__ import annotations from collections import defaultdict, deque +from dataclasses import dataclass +from typing import Literal import torch from tensordict import ( @@ -16,16 +18,17 @@ TensorDictParams, ) from tensordict.nn import ( + ProbabilisticTensorDictModule, ProbabilisticTensorDictSequential, TensorDictModule, - TensorDictModuleBase, ) +from tensordict.utils import expand_as_right from torch import distributions as d - from torchrl._utils import logger as torchrl_logger from torchrl.envs.transforms.transforms import Transform +from torchrl.modules.llm import LLMWrapperBase from torchrl.objectives.ppo import ClipPPOLoss -from torchrl.objectives.utils import _maybe_get_or_select, _reduce, _sum_td_features +from torchrl.objectives.utils import _reduce, _sum_td_features class GRPOLossOutput(TensorClass["nocast"]): @@ -50,7 +53,7 @@ class GRPOLoss(ClipPPOLoss): loss = -min( weight * advantage, min(max(weight, 1-eps), 1+eps) * advantage) Args: - actor_network (ProbabilisticTensorDictSequential): policy operator. + actor_network (LLMWrapperBase): policy operator. .. note:: It is critical to keep your model in eval mode during GRPO training to ensure deterministic behavior and correct @@ -63,6 +66,15 @@ class GRPOLoss(ClipPPOLoss): A value of 1 indicates that all importance weights are equal (ideal case). If ESS drops or increases significantly, it usually indicates a problem with the model configuration, such as a train/eval mode mismatch or a large policy update. + .. note:: + The masking_strategy parameter is crucial for LLM training scenarios. It determines which tokens are included + in the loss computation: + - "sft": Only response tokens (excludes prompt tokens) - suitable for single-turn conversations + - "rlhf": Only assistant tokens (excludes user/system tokens) - suitable for multi-turn conversations + - "generic": All valid tokens (excludes padding tokens) - suitable for generic scenarios + + The masking strategy must match the strategy used for advantage computation to avoid shape mismatches. + Keyword Args: clip_epsilon (scalar, optional): weight clipping threshold in the clipped PPO loss equation. default: 0.2 @@ -93,34 +105,48 @@ class GRPOLoss(ClipPPOLoss): kl_to_ref_coeff (float, optional): coefficient for the KL divergence to the reference policy. Defaults to ``None`` (no KL divergence). kl_to_inference_coeff (float, optional): coefficient for the KL divergence to the inference policy. Defaults to ``None`` (no KL divergence). device (torch.device, optional): device of the buffers. Defaults to ``None``. + masking_strategy (Literal["sft", "rlhf", "generic"], optional): The masking strategy to use for distribution creation. + - "sft": Use prompt masking (response tokens only, suitable for single-turn) + - "rlhf": Use assistant masking (assistant tokens only, suitable for multi-turn) + - "generic": Use attention masking (all valid tokens) + Defaults to "sft" since we can't guarantee assistant masks are available. .. note:: Parameters and buffers from the policy / critic will not be cast to that device to ensure that the storages match the ones that are passed to other components, such as data collectors. """ - actor_network: TensorDictModule + actor_network: LLMWrapperBase critic_network: TensorDictModule actor_network_params: TensorDictParams critic_network_params: TensorDictParams target_actor_network_params: TensorDictParams target_critic_network_params: TensorDictParams + @dataclass + class _AcceptedKeys(ClipPPOLoss._AcceptedKeys): + """Maintains default values for all configurable tensordict keys. + + This class defines which tensordict keys can be set using '.set_keys(key_name=key_value)' and their + default values + """ + + ref_log_probs: NestedKey = ("next", "ref_log_probs", "full") + def __init__( self, - actor_network: ProbabilisticTensorDictSequential - | TensorDictModuleBase - | None = None, + actor_network: LLMWrapperBase | None = None, *, clip_epsilon: float = 0.2, entropy_bonus: bool = True, samples_mc_entropy: int = 1, entropy_coeff: float = 0.01, gamma: float | None = None, - reduction: str = None, + reduction: str | None = None, clip_value: bool | float | None = None, kl_to_ref_coeff: float | None = None, kl_to_inference_coeff: float | None = None, - device: torch.device = None, + device: torch.device | None = None, + masking_strategy: Literal["sft", "rlhf", "generic"] = "sft", **kwargs, ): # Define clipping of the value loss @@ -143,12 +169,77 @@ def __init__( ) # We don't want to use the string action but the tokens self._set_in_keys() - self.set_keys(sample_log_prob="log_probs", action="tokens_response") + self.masking_strategy = masking_strategy + # Always use the full tokens for the action + self.set_keys(sample_log_prob=("log_probs", "full"), action=("tokens", "full")) # TODO: make this a buffer self.kl_to_ref_coeff = kl_to_ref_coeff self.kl_to_inference_coeff = kl_to_inference_coeff + def _get_cur_log_prob(self, tensordict): + """Override to use LLM-specific distribution with explicit masking strategy. + + This ensures that the loss is computed with the correct masking strategy, + and provides helpful error messages when there are shape mismatches. + """ + if isinstance( + self.actor_network, + (ProbabilisticTensorDictSequential, ProbabilisticTensorDictModule), + ) or hasattr(self.actor_network, "get_dist"): + # Use the specified masking strategy + # dists are always defined over the whole sequence, so we can re-use the mask as the dist will always + # be a MaskedCategorical + # TODO: eventually, we want to always use `get_dist` and just pass the key of the mask + # Masks should contain: prompt and response masks, assistant, and attention. + # Additionally, we should make sure that the masks are properly updated when log-probs is called (using vllm and transformers) + # because in some instances it looks like they can be overwritten with None values. + if self.masking_strategy == "sft" and hasattr( + self.actor_network, "_get_sft_dist" + ): + dist = self.actor_network._get_sft_dist(tensordict) + elif self.masking_strategy == "rlhf" and hasattr( + self.actor_network, "_get_rlhf_dist" + ): + dist = self.actor_network._get_rlhf_dist(tensordict) + elif self.masking_strategy == "generic" and hasattr( + self.actor_network, "_get_generic_dist" + ): + dist = self.actor_network._get_generic_dist(tensordict) + elif hasattr(self.actor_network, "get_dist"): + # Fallback to generic distribution method + dist = self.actor_network.get_dist( + tensordict, + logits_key="logits", + ) + else: + raise NotImplementedError( + f"Actor network must have get_dist method or the appropriate method for " + f"masking strategy '{self.masking_strategy}'." + ) + + action = tensordict.get( + self.tensor_keys.action, + as_padded_tensor=True, + padding_side="left", + padding_value=-100, + ) + log_prob = dist.log_prob(action) + else: + raise NotImplementedError( + "Only probabilistic modules from tensordict.nn are currently supported. " + "If you need to implement a custom logic to retrieve the log-probs (to compute " + "the PPO objective) or the distribution (for the PPO entropy), please augment " + f"the {type(self).__class__} by implementing your own logic in _get_cur_log_prob." + ) + return log_prob, dist, False + def forward(self, tensordict: TensorDictBase) -> GRPOLossOutput: + # Some sanity checks and housekeeping: + # - We may not have the tokens yet. If not, we will use the tokenizer of the actor to tokenize the text. + # We default to history rather than text because the history will account for multiturn, or multimodal inputs. + if self.tensor_keys.action not in tensordict: + raise ValueError + tensordict = tensordict.copy() advantage = tensordict.get( self.tensor_keys.advantage, None, as_padded_tensor=True @@ -156,15 +247,24 @@ def forward(self, tensordict: TensorDictBase) -> GRPOLossOutput: log_weight, dist, kl_approx = self._log_weight( tensordict, adv_shape=advantage.shape[:-1] ) + mask = dist.mask # ESS for logging with torch.no_grad(): # In theory, ESS should be computed on particles sampled from the same source. Here we sample according # to different, unrelated trajectories, which is not standard. Still, it can give an idea of the weights' # dispersion. - lw = log_weight.squeeze() + lw = log_weight.squeeze(-1)[mask] + batch = mask.sum() ess = (2 * lw.logsumexp(0) - (2 * lw).logsumexp(0)).exp() - batch = log_weight.shape[0] + if advantage.ndim != log_weight.ndim: + raise ValueError( + f"advantage and log_weight must have the same number of dimensions, got {advantage.ndim=} and {log_weight.ndim=}" + ) + print(f"log_weight: {log_weight.shape}") + print(f"advantage: {advantage.shape}") + print(f"mask: {mask.shape}") + print(f"data: {tensordict}") gain1 = log_weight.exp() * advantage log_weight_clip = log_weight.clamp(*self._clip_bounds) @@ -191,14 +291,27 @@ def forward(self, tensordict: TensorDictBase) -> GRPOLossOutput: if value_clip_fraction is not None: td_out.set("value_clip_fraction", value_clip_fraction) - td_out.set("ESS", _reduce(ess, self.reduction) / batch) + td_out.set("ESS", _reduce(ess / batch, self.reduction)) td_out = td_out.named_apply( - lambda name, value: _reduce(value, reduction=self.reduction).squeeze(-1) + lambda name, value: _reduce( + value, reduction=self.reduction, mask=mask + ).squeeze(-1) if name.startswith("loss_") else value, ) if self.kl_to_ref_coeff is not None: - loss_kl, kl_penalty = self._kl_to_ref(tensordict) + # FIXME: parameterize this + loss_kl, kl_penalty = self._kl_to_ref( + tensordict, + mask=mask, + dist=dist, + ref_log_prob=tensordict.get( + self.tensor_keys.ref_log_probs, + as_padded_tensor=True, + padding_side="left", + padding_value=0.0, + ), + ) td_out["loss_kl_to_ref"] = loss_kl td_out["kl_to_ref"] = kl_penalty.detach() if self.kl_to_inference_coeff is not None: @@ -206,6 +319,8 @@ def forward(self, tensordict: TensorDictBase) -> GRPOLossOutput: tensordict, key=self.tensor_keys.sample_log_prob, coeff=self.kl_to_inference_coeff, + mask=mask, + dist=dist, ) td_out["loss_kl_to_inference"] = loss_kl td_out["kl_to_inference"] = kl_penalty.detach() @@ -218,6 +333,8 @@ def _kl_to_ref( key: NestedKey = ("next", "ref_log_prob"), ref_log_prob: torch.Tensor | None = None, coeff: float | None = None, + mask: torch.Tensor | None = None, + dist: d.Distribution | None = None, ): if coeff is None: coeff = self.kl_to_ref_coeff @@ -226,16 +343,27 @@ def _kl_to_ref( ref_log_prob = tensordict.get( key, as_padded_tensor=True, - ).squeeze(-1) + padding_side="left", + padding_value=0.0, + ) + if ref_log_prob is None: + raise KeyError( + f"Couldn't find the ref log-prob {key} in the input data ({tensordict.keys(True)=})." + ) + ref_log_prob = ref_log_prob.squeeze(-1) cur_log_prob = tensordict.get("_cur_log_prob") # TODO: remove this - assert cur_log_prob.shape == ref_log_prob.shape, ( - cur_log_prob.shape, - ref_log_prob.shape, - ) - mask = cur_log_prob != 0 - ref_log_prob = ref_log_prob[mask] - cur_log_prob = cur_log_prob[mask] + if cur_log_prob.shape != ref_log_prob.shape: + raise ValueError( + f"cur_log_prob and ref_log_prob must have the same shape, got {cur_log_prob.shape=} and {ref_log_prob.shape=}" + ) + if mask is not None: + ref_log_prob = torch.where( + expand_as_right(mask, ref_log_prob), ref_log_prob, 0.0 + ) + cur_log_prob = torch.where( + expand_as_right(mask, cur_log_prob), cur_log_prob, 0.0 + ) diff = ref_log_prob - cur_log_prob kl_penalty = (diff.expm1() - diff).mean() return coeff * kl_penalty, kl_penalty @@ -244,12 +372,15 @@ def _log_weight( self, tensordict: TensorDictBase, adv_shape: torch.Size ) -> tuple[torch.Tensor, d.Distribution, torch.Tensor]: - prev_log_prob = _maybe_get_or_select( - tensordict, + cur_log_prob, dist, is_composite = self._get_cur_log_prob(tensordict) + + prev_log_prob = tensordict.get( self.tensor_keys.sample_log_prob, - adv_shape, + as_padded_tensor=True, + padding_side="left", + padding_value=0.0, ) - padding_mask = prev_log_prob != 0 + if prev_log_prob is None: raise KeyError( f"Couldn't find the log-prob {self.tensor_keys.sample_log_prob} in the input data." @@ -259,8 +390,30 @@ def _log_weight( f"tensordict stored {self.tensor_keys.sample_log_prob} requires grad." ) - cur_log_prob, dist, is_composite = self._get_cur_log_prob(tensordict) - cur_log_prob = torch.where(padding_mask, cur_log_prob, 0.0) + # Check for shape mismatches and provide helpful error messages + if cur_log_prob.shape != prev_log_prob.shape: + # Try to provide helpful debugging information + error_msg = ( + f"Shape mismatch detected in GRPOLoss: current log-prob shape {cur_log_prob.shape} " + f"!= previous log-prob shape {prev_log_prob.shape}. " + f"This usually indicates a mismatch between the masking strategy used for " + f"advantage computation and the masking strategy used for loss computation.\n" + f"Current masking strategy: '{self.masking_strategy}'\n" + f"Possible solutions:\n" + f"1. If using RLHF (multi-turn conversations), set masking_strategy='rlhf'\n" + f"2. If using SFT (single-turn conversations), set masking_strategy='sft'\n" + f"3. If using generic scenarios, set masking_strategy='generic'\n" + f"4. Ensure the advantage was computed with the same masking strategy as the loss" + ) + raise ValueError(error_msg) + + attention_mask = dist.mask + cur_log_prob = torch.where( + expand_as_right(attention_mask, cur_log_prob), cur_log_prob, 0.0 + ) + prev_log_prob = torch.where( + expand_as_right(attention_mask, prev_log_prob), prev_log_prob, 0.0 + ) if is_composite: raise NotImplementedError @@ -295,7 +448,7 @@ class MCAdvantage(Transform): Args: grpo_size (int): Number of trajectories to keep in memory for the advantage computation. - prompt_key (NestedKey): Key to the prompt in the tensordict. Defaults to "text". + prompt_key (NestedKey): Key to the prompt in the tensordict. Defaults to ("text", "prompt"). rewards_key (NestedKey): Key to the rewards in the tensordict. Defaults to ("next", "reward"). advantage_key (NestedKey): Key to the advantage in the tensordict. Defaults to "advantage". done_key (NestedKey): Key to the done state in the tensordict. Defaults to ("next", "done"). @@ -306,7 +459,7 @@ class MCAdvantage(Transform): def __init__( self, grpo_size: int, - prompt_key: NestedKey = "text", + prompt_key: NestedKey = "query", rewards_key: NestedKey = ("next", "reward"), advantage_key: NestedKey = "advantage", done_key: NestedKey = ("next", "done"), diff --git a/torchrl/objectives/llm/sft.py b/torchrl/objectives/llm/sft.py index 7bc83256bf1..1b2568c31f6 100644 --- a/torchrl/objectives/llm/sft.py +++ b/torchrl/objectives/llm/sft.py @@ -246,9 +246,9 @@ class _AcceptedKeys: Defaults to ``"log_probs"``. """ - history: NestedKey = ("next", "history") - ref_log_prob: NestedKey = ("next", "ref_log_prob") - log_probs: NestedKey = "log_probs" + history: NestedKey = ("history", "full") + ref_log_prob: NestedKey = ("next", "ref_log_prob", "full") + log_probs: NestedKey = ("log_probs", "full") default_keys = _AcceptedKeys tensor_keys: _AcceptedKeys @@ -335,23 +335,28 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: # Gather history history: History = tensordict[self.tensor_keys.history] - # Apply tokenizer to history and gather mask - with torch.device( - self.device - ) if self.device is not None else contextlib.nullcontext(): - token_struct = history.apply_chat_template( - tokenizer=self.tokenizer, **self.tokenizer_kwargs - ) - if "assistant_masks" not in token_struct: - raise ValueError( - f"Assistant masks are not present in the token structure: {token_struct=}." + # Try to get mask from td + token_struct = None + assistant_masks = tensordict.get(("masks", "all_assistant_mask"), as_list=True) + attention_mask = tensordict.get(("masks", "all_attention_mask"), as_list=True) + if assistant_masks is None: + # Apply tokenizer to history and gather mask + with torch.device( + self.device + ) if self.device is not None else contextlib.nullcontext(): + token_struct = history.apply_chat_template( + tokenizer=self.tokenizer, **self.tokenizer_kwargs + ) + if "assistant_masks" not in token_struct: + raise ValueError( + f"Assistant masks are not present in the token structure: {token_struct=}." + ) + assistant_masks = token_struct.get( + "assistant_masks", + as_list=True, ) - assistant_masks = token_struct.get( - "assistant_masks", - as_list=True, - ) + attention_mask = token_struct.get("attention_mask", as_list=True) assistant_masks = [mask.bool() for mask in assistant_masks] - attention_mask = token_struct.get("attention_mask", as_list=True) attention_mask = [mask.bool() for mask in attention_mask] assistant_masks = [ mask & a_mask for mask, a_mask in zip(assistant_masks, attention_mask) @@ -359,12 +364,8 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: if not any(mask.any(-1).all() for mask in assistant_masks): raise ValueError("Some inputs have no valid assistant masks.") + input_loss = tensordict.select(self.tensor_keys.history) - if ( - isinstance(self.tensor_keys.history, tuple) - and self.tensor_keys.history[0] == "next" - ): - input_loss = input_loss["next"] with torch.device( self.device @@ -376,13 +377,19 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: self.tensor_keys.log_probs, as_list=True, ) + # apply mask if not all( mask.shape == lp.shape for mask, lp in _zip_strict(assistant_masks, log_probs) ): + if token_struct is not None: + suffix = f"Tokens from current template: {[inp.shape for inp in token_struct.get('input_ids', as_padded_tensor=True)]}" + else: + suffix = "" raise ValueError( - f"Assistant masks and log_probs have different shapes: {[mask.shape for mask in assistant_masks]} vs {[lp.shape for lp in log_probs]}. Tokens from current template: {[inp.shape for inp in token_struct.get('input_ids', as_padded_tensor=True)]}" + f"Assistant masks and log_probs have different shapes: {[mask.shape for mask in assistant_masks]} vs " + f"{[lp.shape for lp in log_probs]}. {suffix}" ) log_probs_masked = [ @@ -413,7 +420,8 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: ) if ref_log_probs is None: raise ValueError( - "Reference log probs not found in tensordict but kl_to_ref_coeff was set" + f"Reference log probs not found in tensordict at key {self.tensor_keys.ref_log_prob} but kl_to_ref_coeff was set. " + f"Existing keys in tensordict: {set(tensordict.keys(include_nested=True, leaves_only=True))}" ) loss_kl, kl_penalty = self._kl_to_ref( @@ -431,7 +439,7 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: ref_log_probs = tensordict.get(self.tensor_keys.ref_log_prob, as_list=True) if ref_log_probs is None: raise ValueError( - f"Reference log probs not found at {self.tensor_keys.ref_log_prob=} in tensordict but loss_function is 'minor_sft'" + f"Reference log probs not found at {self.tensor_keys.ref_log_prob=} in tensordict with keys {tensordict.keys()} but loss_function is 'minor_sft'" ) # we need to re-sum ref_log_probs as they are not summed per-sequence diff --git a/torchrl/objectives/ppo.py b/torchrl/objectives/ppo.py index e9de0d7753c..152804512e0 100644 --- a/torchrl/objectives/ppo.py +++ b/torchrl/objectives/ppo.py @@ -609,7 +609,6 @@ def _get_cur_log_prob(self, tensordict): self.actor_network ) if self.functional else contextlib.nullcontext(): dist = self.actor_network.get_dist(tensordict) - is_composite = isinstance(dist, CompositeDistribution) if is_composite: diff --git a/torchrl/objectives/utils.py b/torchrl/objectives/utils.py index 59a1897053f..8a95b6979da 100644 --- a/torchrl/objectives/utils.py +++ b/torchrl/objectives/utils.py @@ -596,14 +596,31 @@ def new_func(*args, in_dims=in_dims, out_dims=out_dims, **kwargs): return new_func -def _reduce(tensor: torch.Tensor, reduction: str) -> float | torch.Tensor: - """Reduces a tensor given the reduction method.""" +def _reduce( + tensor: torch.Tensor, reduction: str, mask: torch.Tensor | None = None +) -> float | torch.Tensor: + """Reduces a tensor given the reduction method. + + Args: + tensor (torch.Tensor): The tensor to reduce. + reduction (str): The reduction method. + mask (torch.Tensor, optional): A mask to apply to the tensor before reducing. + + Returns: + float | torch.Tensor: The reduced tensor. + """ if reduction == "none": result = tensor elif reduction == "mean": - result = tensor.mean() + if mask is not None: + result = tensor[mask].mean() + else: + result = tensor.mean() elif reduction == "sum": - result = tensor.sum() + if mask is not None: + result = tensor[mask].sum() + else: + result = tensor.sum() else: raise NotImplementedError(f"Unknown reduction method {reduction}") return result @@ -668,9 +685,20 @@ def _sum_td_features(data: TensorDictBase) -> torch.Tensor: return data.sum(dim="feature", reduce=True) -def _maybe_get_or_select(td, key_or_keys, target_shape=None): +def _maybe_get_or_select( + td, + key_or_keys, + target_shape=None, + padding_side: str = "left", + padding_value: int = 0, +): if isinstance(key_or_keys, (str, tuple)): - return td.get(key_or_keys, as_padded_tensor=True) + return td.get( + key_or_keys, + as_padded_tensor=True, + padding_side=padding_side, + padding_value=padding_value, + ) result = td.select(*key_or_keys) if target_shape is not None and result.shape != target_shape: result.batch_size = target_shape diff --git a/tutorials/sphinx-tutorials/llm_wrappers.py b/tutorials/sphinx-tutorials/llm_wrappers.py new file mode 100644 index 00000000000..fe1ae0f9411 --- /dev/null +++ b/tutorials/sphinx-tutorials/llm_wrappers.py @@ -0,0 +1,363 @@ +""" +LLM Wrappers in TorchRL +======================= + +This tutorial demonstrates how to use TorchRL's LLM wrappers for integrating Large Language Models +into reinforcement learning workflows. TorchRL provides two main wrappers: + +- :class:`~torchrl.modules.llm.policies.vLLMWrapper` for vLLM models +- :class:`~torchrl.modules.llm.policies.TransformersWrapper` for Hugging Face Transformers models + +Both wrappers provide a unified API with consistent input/output interfaces using TensorClass objects, +making them interchangeable in RL environments. + +Key Features: +- Multiple input modes: history, text, or tokens +- Configurable outputs: text, tokens, masks, and log probabilities +- TensorClass-based structured outputs +- Seamless integration with TorchRL's TensorDict framework +""" + +# %% +# Setup and Imports +# ----------------- +# First, let's set up the environment and import the necessary modules. + +import os +import warnings + +# Suppress warnings for cleaner output +warnings.filterwarnings("ignore") + +# Set vLLM environment variables +os.environ["VLLM_USE_V1"] = "0" + +import torch +from tensordict import TensorDict +from torchrl.data.llm import History +from torchrl.modules.llm.policies import TransformersWrapper, vLLMWrapper + +# %% +# Example 1: vLLM Wrapper with History Input +# ------------------------------------------ +# The vLLM wrapper is optimized for high-performance inference and is ideal for production environments. + +try: + from transformers import AutoTokenizer + from vllm import LLM + + print("Loading vLLM model...") + model = LLM(model="Qwen/Qwen2.5-0.5B") + tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") + + # Create conversation history + chats = [ + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the capital of France?"}, + ], + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the capital of Canada?"}, + ], + ] + history = History.from_chats(chats) + + # Create vLLM wrapper with history input (recommended for RL environments) + vllm_wrapper = vLLMWrapper( + model, + tokenizer=tokenizer, + input_mode="history", + generate=True, + return_log_probs=True, + return_text=True, + return_tokens=True, + return_masks=True, + pad_output=False, # Use False to avoid stacking issues + ) + + print(f"vLLM wrapper input keys: {vllm_wrapper.in_keys}") + print(f"vLLM wrapper output keys: {vllm_wrapper.out_keys}") + + # Process the data + data_history = TensorDict(history=history, batch_size=(2,)) + result = vllm_wrapper(data_history) + + print("vLLM Results:") + print(f"Generated responses: {result['text'].response}") + print( + f"Response tokens shape: {result['tokens'].response.shape if result['tokens'].response is not None else 'None'}" + ) + print(f"Log probabilities available: {result['log_probs'].response is not None}") + +except ImportError: + print("vLLM not available, skipping vLLM example") + +# %% +# Example 2: Transformers Wrapper with History Input +# -------------------------------------------------- +# The Transformers wrapper provides more flexibility and is great for research and development. + +try: + from transformers import AutoModelForCausalLM, AutoTokenizer + + print("\nLoading Transformers model...") + transformers_model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B") + transformers_tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") + + # Create Transformers wrapper with same interface + transformers_wrapper = TransformersWrapper( + transformers_model, + tokenizer=transformers_tokenizer, + input_mode="history", + generate=True, + return_log_probs=True, + return_text=True, + return_tokens=True, + return_masks=True, + pad_output=True, # Transformers typically use padded outputs + generate_kwargs={"max_new_tokens": 50}, + ) + + print(f"Transformers wrapper input keys: {transformers_wrapper.in_keys}") + print(f"Transformers wrapper output keys: {transformers_wrapper.out_keys}") + + # Process the same data + result_tf = transformers_wrapper(data_history) + + print("Transformers Results:") + print(f"Generated responses: {result_tf['text'].response}") + print( + f"Response tokens shape: {result_tf['tokens'].response.shape if result_tf['tokens'].response is not None else 'None'}" + ) + print(f"Log probabilities available: {result_tf['log_probs'].response is not None}") + +except ImportError: + print("Transformers not available, skipping Transformers example") + +# %% +# Example 3: Text Input Mode +# -------------------------- +# Both wrappers support direct text input for simpler use cases. + +try: + # Create text input data + prompts = ["The capital of France is", "The capital of Canada is"] + data_text = TensorDict(text=prompts, batch_size=(2,)) + + # vLLM with text input + vllm_text_wrapper = vLLMWrapper( + model, + tokenizer=tokenizer, + input_mode="text", + generate=True, + return_text=True, + return_tokens=True, + pad_output=False, + ) + + result_vllm_text = vllm_text_wrapper(data_text) + print("\nvLLM Text Input Results:") + print(f"Generated text: {result_vllm_text['text'].response}") + + # Transformers with text input + transformers_text_wrapper = TransformersWrapper( + transformers_model, + tokenizer=transformers_tokenizer, + input_mode="text", + generate=True, + return_text=True, + return_tokens=True, + pad_output=True, + generate_kwargs={"max_new_tokens": 20}, + ) + + result_tf_text = transformers_text_wrapper(data_text) + print("Transformers Text Input Results:") + print(f"Generated text: {result_tf_text['text'].response}") + +except NameError: + print("Models not loaded, skipping text input example") + +# %% +# Example 4: Log Probabilities Only Mode +# -------------------------------------- +# Both wrappers can compute log probabilities without generating new tokens. + +try: + # vLLM log-probs only + vllm_logprobs_wrapper = vLLMWrapper( + model, + tokenizer=tokenizer, + input_mode="history", + generate=False, # Only compute log-probs + return_log_probs=True, + return_text=True, + return_tokens=True, + pad_output=False, + ) + + result_vllm_lp = vllm_logprobs_wrapper(data_history) + print("\nvLLM Log Probabilities:") + print( + f"Prompt log-probs shape: {result_vllm_lp['log_probs'].prompt.shape if result_vllm_lp['log_probs'].prompt is not None else 'None'}" + ) + + # Transformers log-probs only + transformers_logprobs_wrapper = TransformersWrapper( + transformers_model, + tokenizer=transformers_tokenizer, + input_mode="history", + generate=False, + return_log_probs=True, + return_text=True, + return_tokens=True, + pad_output=True, + ) + + result_tf_lp = transformers_logprobs_wrapper(data_history) + print("Transformers Log Probabilities:") + print( + "Prompt log-probs shape: {result_tf_lp['log_probs'].prompt.shape if result_tf_lp['log_probs'].prompt is not None else 'None'}" + ) + +except NameError: + print("Models not loaded, skipping log-probs example") + +# %% +# Example 5: TensorClass Structure Exploration +# ------------------------------------------- +# Let's explore the structured outputs provided by both wrappers. + +try: + # Get a result from vLLM wrapper + result = vllm_wrapper(data_history) + + print("\nTensorClass Structure Analysis:") + print("=" * 50) + + # Explore Text TensorClass + print("\nText TensorClass:") + print(f" Fields: {list(result['text'].__class__.__annotations__.keys())}") + print(f" Prompt: {result['text'].prompt}") + print(f" Response: {result['text'].response}") + print(f" Full: {result['text'].full}") + print(f" Padded: {result['text'].padded}") + + # Explore Tokens TensorClass + print("\nTokens TensorClass:") + print(f" Fields: {list(result['tokens'].__class__.__annotations__.keys())}") + print( + f" Prompt tokens shape: {result['tokens'].prompt.shape if result['tokens'].prompt is not None else 'None'}" + ) + print( + f" Response tokens shape: {result['tokens'].response.shape if result['tokens'].response is not None else 'None'}" + ) + print( + f" Full tokens shape: {result['tokens'].full.shape if result['tokens'].full is not None else 'None'}" + ) + + # Explore LogProbs TensorClass + print("\nLogProbs TensorClass:") + print(f" Fields: {list(result['log_probs'].__class__.__annotations__.keys())}") + print( + f" Prompt log-probs shape: {result['log_probs'].prompt.shape if result['log_probs'].prompt is not None else 'None'}" + ) + print( + f" Response log-probs shape: {result['log_probs'].response.shape if result['log_probs'].response is not None else 'None'}" + ) + + # Explore Masks TensorClass + print("\nMasks TensorClass:") + print(f" Fields: {list(result['masks'].__class__.__annotations__.keys())}") + print( + f" Attention mask shape: {result['masks'].all_attention_mask.shape if result['masks'].all_attention_mask is not None else 'None'}" + ) + print( + f" Assistant mask shape: {result['masks'].all_assistant_mask.shape if result['masks'].all_assistant_mask is not None else 'None'}" + ) + +except NameError: + print("Models not loaded, skipping structure exploration") + +# %% +# Example 6: Error Handling and Validation +# ---------------------------------------- +# Both wrappers provide clear error messages for invalid inputs. + +print("\nError Handling Examples:") +print("=" * 30) + +# Example of missing required key +try: + wrapper = vLLMWrapper( + model, + tokenizer=tokenizer, + input_mode="tokens", + input_key="tokens", + ) + result = wrapper(TensorDict(batch_size=(2,))) # Missing tokens key +except (ValueError, NameError) as e: + print(f"Expected error for missing key: {e}") + +# Example of invalid input mode +try: + wrapper = vLLMWrapper( + model, + tokenizer=tokenizer, + input_mode="invalid_mode", # Invalid mode + ) +except ValueError as e: + print(f"Expected error for invalid input mode: {e}") + +# %% +# Example 7: RL Environment Integration +# ------------------------------------ +# The wrappers are designed to work seamlessly with TorchRL environments. + +print("\nRL Environment Integration:") +print("=" * 35) + +# Simulate an RL environment step +try: + # Create a simple environment state + env_state = TensorDict( + { + "history": history, + "action_mask": torch.ones(2, 1000), # Example action mask + "reward": torch.zeros(2), + "done": torch.zeros(2, dtype=torch.bool), + }, + batch_size=(2,), + ) + + # Use the wrapper as a policy + action_output = vllm_wrapper(env_state) + + print("Environment integration successful!") + print(f"Generated actions: {action_output['text'].response}") + print( + f"Action log probabilities: {action_output['log_probs'].response is not None}" + ) + +except NameError: + print("Models not loaded, skipping RL integration example") + +# %% +# Conclusion +# ---------- +# TorchRL's LLM wrappers provide a unified interface for integrating Large Language Models +# into reinforcement learning workflows. Key benefits include: +# +# 1. **Consistent API**: Both vLLM and Transformers wrappers share the same interface +# 2. **Flexible Input Modes**: Support for history, text, and token inputs +# 3. **Structured Outputs**: TensorClass-based outputs for easy data handling +# 4. **RL Integration**: Seamless integration with TorchRL's TensorDict framework +# 5. **Configurable Outputs**: Selective return of text, tokens, masks, and log probabilities +# +# The wrappers are designed to be interchangeable, allowing you to switch between +# different LLM backends without changing your RL code. + +print("\n" + "=" * 60) +print("Tutorial completed successfully!") +print("=" * 60) From 92f7b8a32d18e3f9d6f0ac1075e15348a87c64d6 Mon Sep 17 00:00:00 2001 From: vmoens Date: Mon, 7 Jul 2025 17:17:24 +0100 Subject: [PATCH 02/28] amend --- sota-implementations/grpo/grpo-async.py | 3 +++ torchrl/collectors/collectors.py | 5 ++++- torchrl/collectors/llm/ray_collector.py | 3 +++ torchrl/objectives/llm/grpo.py | 5 +---- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/sota-implementations/grpo/grpo-async.py b/sota-implementations/grpo/grpo-async.py index 70323f7836a..710dba53b8d 100644 --- a/sota-implementations/grpo/grpo-async.py +++ b/sota-implementations/grpo/grpo-async.py @@ -177,6 +177,9 @@ def train( start_time = time.time() for step in range(total_steps): + if not collector.is_running(): + torchrl_logger.info("Collector stopped, stopping training") + break pbar.update(1) pbar.set_description(f"Step {step}, writes: {replay_buffer.write_count}") diff --git a/torchrl/collectors/collectors.py b/torchrl/collectors/collectors.py index d3ad80aa829..38cab4d90c3 100644 --- a/torchrl/collectors/collectors.py +++ b/torchrl/collectors/collectors.py @@ -1358,7 +1358,7 @@ def start(self): """ if self.replay_buffer is None: raise RuntimeError("Replay buffer must be defined for execution.") - if not hasattr(self, "_thread") or not self._thread.is_alive(): + if not self.is_running(): self._stop = False self._thread = threading.Thread(target=self._run_iterator) self._thread.daemon = ( @@ -1371,6 +1371,9 @@ def _run_iterator(self): if self._stop: return + def is_running(self): + return hasattr(self, "_thread") and self._thread.is_alive() + def async_shutdown( self, timeout: float | None = None, close_env: bool = True ) -> None: diff --git a/torchrl/collectors/llm/ray_collector.py b/torchrl/collectors/llm/ray_collector.py index 255d92f2192..32330e6ed9d 100644 --- a/torchrl/collectors/llm/ray_collector.py +++ b/torchrl/collectors/llm/ray_collector.py @@ -170,6 +170,9 @@ def start(self): pending_task = self._collector.start.remote() return ray.get(pending_task) + def is_running(self): + return ray.get(self._collector.is_running.remote()) + def shutdown(self): """Shuts down the collector.""" pending_task = self._collector.shutdown.remote() diff --git a/torchrl/objectives/llm/grpo.py b/torchrl/objectives/llm/grpo.py index be418f3d87e..5195fa6ca3c 100644 --- a/torchrl/objectives/llm/grpo.py +++ b/torchrl/objectives/llm/grpo.py @@ -261,10 +261,6 @@ def forward(self, tensordict: TensorDictBase) -> GRPOLossOutput: raise ValueError( f"advantage and log_weight must have the same number of dimensions, got {advantage.ndim=} and {log_weight.ndim=}" ) - print(f"log_weight: {log_weight.shape}") - print(f"advantage: {advantage.shape}") - print(f"mask: {mask.shape}") - print(f"data: {tensordict}") gain1 = log_weight.exp() * advantage log_weight_clip = log_weight.clamp(*self._clip_bounds) @@ -503,6 +499,7 @@ def _inv_call(self, tensordict: TensorDictBase) -> TensorDictBase: torchrl_logger.info(f"Computing advantage for {prompt=}") # Cat is the most robust way to combine the trajs tds = torch.cat(list(self.queues[prompt]), -1) + del self.queues[prompt] # Collect rewards reward = tds.get(self.rewards_key, as_nested_tensor=True) reward_mean = reward.values().mean() From f4fdb3c408aec122451b9a989b72d620361142ff Mon Sep 17 00:00:00 2001 From: vmoens Date: Mon, 7 Jul 2025 17:29:42 +0100 Subject: [PATCH 03/28] amend --- sota-implementations/grpo/grpo-sync.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index b0f9a081c7f..01ddf520d6f 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -156,6 +156,11 @@ def train( project="grpo-sync", exp_name="-".join(["grpo-sync"] + experiment_name) ) + # Wait for the replay buffer to be filled + while (replay_buffer.write_count < replay_buffer.batch_size): + torchrl_logger.info(f"Waiting for replay buffer to be filled, {replay_buffer.write_count=}") + time.sleep(1) + # Training loop torchrl_logger.info("Starting training loop.") pbar = tqdm.tqdm(collector) From 60169a8f1c6c564524ea6b0627b2e3f276bc649c Mon Sep 17 00:00:00 2001 From: vmoens Date: Mon, 7 Jul 2025 17:37:31 +0100 Subject: [PATCH 04/28] amend --- torchrl/data/replay_buffers/ray_buffer.py | 4 ++++ torchrl/data/replay_buffers/replay_buffers.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/torchrl/data/replay_buffers/ray_buffer.py b/torchrl/data/replay_buffers/ray_buffer.py index 7ab7ce2958c..65dadc1f4f2 100644 --- a/torchrl/data/replay_buffers/ray_buffer.py +++ b/torchrl/data/replay_buffers/ray_buffer.py @@ -163,6 +163,10 @@ def _replay_lock(self): """ return contextlib.nullcontext() + @property + def batch_size(self): + return ray.get(self._rb._getattr.remote("_batch_size")) + def sample(self, *args, **kwargs): pending_task = self._rb.sample.remote(*args, **kwargs) return ray.get(pending_task) diff --git a/torchrl/data/replay_buffers/replay_buffers.py b/torchrl/data/replay_buffers/replay_buffers.py index 93dc73d2fb1..5726a1ea704 100644 --- a/torchrl/data/replay_buffers/replay_buffers.py +++ b/torchrl/data/replay_buffers/replay_buffers.py @@ -383,6 +383,10 @@ def set_rng(self, generator): def dim_extend(self): return self._dim_extend + @property + def batch_size(self): + return self._batch_size + @dim_extend.setter def dim_extend(self, value): if ( From 744538b152f3aaeaaa583aa3c53679ca3d90cc91 Mon Sep 17 00:00:00 2001 From: vmoens Date: Mon, 7 Jul 2025 17:50:17 +0100 Subject: [PATCH 05/28] amend --- sota-implementations/grpo/grpo-sync.py | 13 ++++++++----- torchrl/data/replay_buffers/replay_buffers.py | 7 +++++++ torchrl/data/replay_buffers/writers.py | 2 +- torchrl/envs/transforms/rb_transforms.py | 2 +- 4 files changed, 17 insertions(+), 7 deletions(-) diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index 01ddf520d6f..0cb1028945a 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -156,11 +156,6 @@ def train( project="grpo-sync", exp_name="-".join(["grpo-sync"] + experiment_name) ) - # Wait for the replay buffer to be filled - while (replay_buffer.write_count < replay_buffer.batch_size): - torchrl_logger.info(f"Waiting for replay buffer to be filled, {replay_buffer.write_count=}") - time.sleep(1) - # Training loop torchrl_logger.info("Starting training loop.") pbar = tqdm.tqdm(collector) @@ -171,6 +166,14 @@ def train( global_step = 0 start_time = time.time() for data in pbar: + # Wait for the replay buffer to be filled - when reasoning, we collect trajectories + #  so the buffer may not be filled straight away + while replay_buffer.write_count < replay_buffer.batch_size: + torchrl_logger.info( + f"Waiting for replay buffer to be filled, {replay_buffer.write_count=}" + ) + time.sleep(1) + pbar.update(1) # data is None as the collector directly writes to the replay buffer diff --git a/torchrl/data/replay_buffers/replay_buffers.py b/torchrl/data/replay_buffers/replay_buffers.py index 5726a1ea704..6c3756e470c 100644 --- a/torchrl/data/replay_buffers/replay_buffers.py +++ b/torchrl/data/replay_buffers/replay_buffers.py @@ -385,6 +385,13 @@ def dim_extend(self): @property def batch_size(self): + """The batch size of the replay buffer. + + The batch size can be overriden by setting the `batch_size` parameter in the :meth:`sample` method. + + It defines both the number of samples returned by :meth:`sample` and the number of samples that are + yielded by the :class:`ReplayBuffer` iterator. + """ return self._batch_size @dim_extend.setter diff --git a/torchrl/data/replay_buffers/writers.py b/torchrl/data/replay_buffers/writers.py index 245e0b55913..237d17abae0 100644 --- a/torchrl/data/replay_buffers/writers.py +++ b/torchrl/data/replay_buffers/writers.py @@ -189,7 +189,7 @@ def extend(self, data: Sequence) -> torch.Tensor: else: batch_size = len(tree_leaves(data)[0]) if batch_size == 0: - raise RuntimeError("Expected at least one element in extend.") + raise RuntimeError(f"Expected at least one element in extend. Got {data=}") device = data.device if hasattr(data, "device") else None max_size_along0 = self._storage._max_size_along_dim0(batched_data=data) index = ( diff --git a/torchrl/envs/transforms/rb_transforms.py b/torchrl/envs/transforms/rb_transforms.py index 8507ce6d8f3..a2ff42522d2 100644 --- a/torchrl/envs/transforms/rb_transforms.py +++ b/torchrl/envs/transforms/rb_transforms.py @@ -192,7 +192,7 @@ def _inv_call(self, tensordict: TensorDictBase) -> TensorDictBase: self._validate() total_cat = self._append_tensordict(tensordict) - if total_cat.shape[-1] >= self.n_steps: + if total_cat.shape[-1] > self.n_steps: out = _multi_step_func( total_cat, done_key=self.done_key, From 60b0307ff83a0e9dca25cd5ee0bee599fb64b9d0 Mon Sep 17 00:00:00 2001 From: vmoens Date: Mon, 7 Jul 2025 17:51:00 +0100 Subject: [PATCH 06/28] amend --- sota-implementations/grpo/grpo-sync.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index 0cb1028945a..63efa8e8475 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -168,11 +168,12 @@ def train( for data in pbar: # Wait for the replay buffer to be filled - when reasoning, we collect trajectories #  so the buffer may not be filled straight away - while replay_buffer.write_count < replay_buffer.batch_size: - torchrl_logger.info( + if replay_buffer.write_count < replay_buffer.batch_size: + torchrl_logger.info( f"Waiting for replay buffer to be filled, {replay_buffer.write_count=}" ) - time.sleep(1) + continue + pbar.update(1) From 18ee85f54a563785ea1391c9ca7522e775407c5e Mon Sep 17 00:00:00 2001 From: vmoens Date: Mon, 7 Jul 2025 17:56:55 +0100 Subject: [PATCH 07/28] amend --- sota-implementations/grpo/grpo-sync.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index 63efa8e8475..b65bd9f3e0d 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -102,6 +102,9 @@ def train( masking_strategy="rlhf" if cfg.env.reasoning else "sft", device=train_device, ) + if cfg.env.reasoning: + # TODO: this is clunky, we should find a way to do this more naturally + loss_fn.set_keys(sample_log_prob=("next", "log_probs", "full")) if cfg.model.compile: loss_fn = torch.compile(loss_fn) From ba224a59f88a091b95a11273b7f99e3a176b5bd6 Mon Sep 17 00:00:00 2001 From: vmoens Date: Mon, 7 Jul 2025 18:04:19 +0100 Subject: [PATCH 08/28] amend --- sota-implementations/grpo/grpo-sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index b65bd9f3e0d..f9d795632bd 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -286,7 +286,7 @@ def train( [ t.numel() for t in rb_content.get( - "tokens_response", as_list=True + ("tokens", "response"), as_list=True ) ], dtype=torch.float, From ed332cdb5316847a9482853e6c7b73682b67ae62 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 10:30:36 +0100 Subject: [PATCH 09/28] amend --- sota-implementations/grpo/grpo-async.py | 84 +++------------- sota-implementations/grpo/grpo-sync.py | 98 +++--------------- sota-implementations/grpo/grpo_utils.py | 94 ++++++++++++++++++ test/llm/mocking_classes_llm.py | 2 +- test/llm/test_collectors.py | 127 ++++++++++-------------- torchrl/collectors/llm/base.py | 26 +++-- torchrl/envs/llm/chat.py | 100 ++++++++++++++++++- torchrl/envs/llm/envs.py | 10 ++ torchrl/envs/utils.py | 2 + 9 files changed, 302 insertions(+), 241 deletions(-) diff --git a/sota-implementations/grpo/grpo-async.py b/sota-implementations/grpo/grpo-async.py index 710dba53b8d..db5ac02410d 100644 --- a/sota-implementations/grpo/grpo-async.py +++ b/sota-implementations/grpo/grpo-async.py @@ -31,6 +31,7 @@ compute_device_allocation, get_inference_model, get_train_model, + log_training_metrics, make_env, make_weight_updater, ) @@ -239,76 +240,19 @@ def train( # Update metrics if (step % cfg.train.logging_frequency) == 0: - with torch.no_grad(): - rb_content = replay_buffer[:] - step_count = ( - rb_content.get(("next", "step_count")).view(-1).float().mean() - ) - batch_policy_version = batch["next", "policy_version"].view(-1).min() - batch_policy_age = collector.policy_version - batch_policy_version - metrics = { - "step_count from buffer": float(step_count), - "reward from buffer": float( - torch.cat( - rb_content.get(("next", "reward"), as_list=True) - ).mean() - ), - "kl_penalty (inference to ref) from buffer": float( - torch.cat( - rb_content.get(("next", "kl_penalty"), as_list=True) - ).mean() - ), - "seq_length from buffer": float( - torch.tensor( - [ - t.numel() - for t in rb_content.get( - ("tokens", "response"), as_list=True - ) - ], - dtype=torch.float, - ).mean() - ), - "ESS, from loss": float(loss.ESS), - "loss_objective, from loss": float(loss.loss_objective), - "clip_fraction, from loss": float(loss.clip_fraction), - "kl_approx (train to inference), from loss": float(loss.kl_approx), - "kl_to_inference (train to inference - differentiable), from loss": float( - loss.kl_to_inference.mean() - ), - "kl_to_ref, from loss": float(loss.kl_to_ref.mean()), - "loss_kl_to_inference, from loss": float( - loss.loss_kl_to_inference.mean() - ), - "loss_kl_to_ref, from loss": float(loss.loss_kl_to_ref.mean()), - "entropy loss, from loss": float(loss.loss_entropy.mean()), - "grad_norm": float(grad_norm) - if step % cfg.train.gradient_accumulation_steps == 0 - else metrics.get("grad_norm", 0.0), - "write_count, from buffer": int(replay_buffer.write_count), - # how many gradient steps per write - "gradient_step_throughput (gradient step per write)": float( - step / replay_buffer.write_count - ), - # how many optim steps per write - "optim_step_throughput (optim step per write)": float( - (step // cfg.train.gradient_accumulation_steps) - / replay_buffer.write_count - ), - "data_read_count (total)": data_read_count, - "current_policy_version (collector)": collector.policy_version, - # FIXME: Assume batch is a single trajectory - # FIXME: The addition of the transform after the env instantiation + _shuttle creation - # is messed up - we need the next data - "batch_policy_version (sampled batch)": batch_policy_version, - "batch_policy_age (sampled batch)": batch_policy_age, - "throughput (steps per second)": float( - step / (time.time() - start_time) - ), - } - for name, value in metrics.items(): - wandb_logger.log_scalar(name, value) - wandb_logger.log_str("history", history_str, step=step) + log_training_metrics( + wandb_logger=wandb_logger, + replay_buffer=replay_buffer, + batch=batch, + loss=loss, + grad_norm=grad_norm, + global_step=step, + data_read_count=data_read_count, + collector=collector, + start_time=start_time, + gradient_accumulation_steps=cfg.train.gradient_accumulation_steps, + history_str=history_str, + ) # Update policy weights if step % cfg.train.weight_update_frequency == 0: diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index f9d795632bd..df09f876b85 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -32,6 +32,7 @@ compute_device_allocation, get_inference_model, get_train_model, + log_training_metrics, make_env, make_weight_updater, ) @@ -170,13 +171,12 @@ def train( start_time = time.time() for data in pbar: # Wait for the replay buffer to be filled - when reasoning, we collect trajectories - #  so the buffer may not be filled straight away + # so the buffer may not be filled straight away if replay_buffer.write_count < replay_buffer.batch_size: torchrl_logger.info( f"Waiting for replay buffer to be filled, {replay_buffer.write_count=}" ) continue - pbar.update(1) @@ -255,87 +255,19 @@ def train( # Update metrics if (global_step % cfg.train.logging_frequency) == 0: - with torch.no_grad(): - rb_content = replay_buffer[:] - step_count = ( - rb_content.get(("next", "step_count")) - .view(-1) - .float() - .mean() - ) - batch_policy_version = ( - batch["next", "policy_version"].view(-1).min() - ) - batch_policy_age = ( - collector.policy_version - batch_policy_version - ) - metrics = { - "step_count from buffer": float(step_count), - "reward from buffer": float( - torch.cat( - rb_content.get(("next", "reward"), as_list=True) - ).mean() - ), - "kl_penalty (inference to ref) from buffer": float( - torch.cat( - rb_content.get(("next", "kl_penalty"), as_list=True) - ).mean() - ), - "seq_length from buffer": float( - torch.tensor( - [ - t.numel() - for t in rb_content.get( - ("tokens", "response"), as_list=True - ) - ], - dtype=torch.float, - ).mean() - ), - "ESS, from loss": float(loss.ESS), - "loss_objective, from loss": float(loss.loss_objective), - "clip_fraction, from loss": float(loss.clip_fraction), - "kl_approx (train to inference), from loss": float( - loss.kl_approx - ), - "kl_to_inference (train to inference - differentiable), from loss": float( - loss.kl_to_inference.mean() - ), - "kl_to_ref, from loss": float(loss.kl_to_ref.mean()), - "loss_kl_to_inference, from loss": float( - loss.loss_kl_to_inference.mean() - ), - "loss_kl_to_ref, from loss": float( - loss.loss_kl_to_ref.mean() - ), - "entropy loss, from loss": float(loss.loss_entropy.mean()), - "grad_norm": float(grad_norm) - if global_step % cfg.train.gradient_accumulation_steps == 0 - else metrics.get("grad_norm", 0.0), - "write_count, from buffer": int(replay_buffer.write_count), - # how many gradient steps per write - "gradient_step_throughput (gradient step per write)": float( - global_step / replay_buffer.write_count - ), - # how many optim steps per write - "optim_step_throughput (optim step per write)": float( - (global_step // cfg.train.gradient_accumulation_steps) - / replay_buffer.write_count - ), - "data_read_count (total)": data_read_count, - "current_policy_version (collector)": collector.policy_version, - # FIXME: Assume batch is a single trajectory - # FIXME: The addition of the transform after the env instantiation + _shuttle creation - # is messed up - we need the next data - "batch_policy_version (sampled batch)": batch_policy_version, - "batch_policy_age (sampled batch)": batch_policy_age, - "throughput (steps per second)": float( - global_step / (time.time() - start_time) - ), - } - for name, value in metrics.items(): - wandb_logger.log_scalar(name, value) - wandb_logger.log_str("history", history_str, step=global_step) + log_training_metrics( + wandb_logger=wandb_logger, + replay_buffer=replay_buffer, + batch=batch, + loss=loss, + grad_norm=grad_norm, + global_step=global_step, + data_read_count=data_read_count, + collector=collector, + start_time=start_time, + gradient_accumulation_steps=cfg.train.gradient_accumulation_steps, + history_str=history_str, + ) # Checkpointing disabled to prevent disk space issues # if (global_step + 1) % cfg.train.checkpoint_frequency == 0: diff --git a/sota-implementations/grpo/grpo_utils.py b/sota-implementations/grpo/grpo_utils.py index a0978c5dad9..723c57690e4 100644 --- a/sota-implementations/grpo/grpo_utils.py +++ b/sota-implementations/grpo/grpo_utils.py @@ -4,6 +4,7 @@ # LICENSE file in the root directory of this source tree. from __future__ import annotations +import time from typing import Any, Callable, Literal import torch @@ -581,3 +582,96 @@ def make_env(cfg: DictConfig, devices: list[int] | None = None): ) ) return env + + +def log_training_metrics( + wandb_logger, + replay_buffer, + batch, + loss, + grad_norm, + global_step, + data_read_count, + collector, + start_time, + gradient_accumulation_steps, + history_str=None, +): + """Log training metrics to wandb. + + Args: + wandb_logger: The wandb logger instance + replay_buffer: The replay buffer containing collected data + batch: The current training batch + loss: The computed loss object + grad_norm: The gradient norm value + global_step: Current global training step + data_read_count: Total data read count + collector: The collector instance + start_time: Training start time + gradient_accumulation_steps: Number of gradient accumulation steps + history_str: Optional history string for logging + """ + with torch.no_grad(): + rb_content = replay_buffer[:] + step_count = rb_content.get(("next", "step_count")).view(-1).float().mean() + batch_policy_version = batch["next", "policy_version"].view(-1).min() + batch_policy_age = collector.policy_version - batch_policy_version + + metrics = { + "step_count from buffer": float(step_count), + "reward from buffer": float( + torch.cat(rb_content.get(("next", "reward"), as_list=True)).mean() + ), + "kl_penalty (inference to ref) from buffer": float( + torch.cat(rb_content.get(("next", "kl_penalty"), as_list=True)).mean() + ), + "seq_length from buffer": float( + torch.tensor( + [ + t.numel() + for t in rb_content.get(("tokens", "response"), as_list=True) + ], + dtype=torch.float, + ).mean() + ), + "ESS, from loss": float(loss.ESS), + "loss_objective, from loss": float(loss.loss_objective), + "clip_fraction, from loss": float(loss.clip_fraction), + "kl_approx (train to inference), from loss": float(loss.kl_approx), + "kl_to_inference (train to inference - differentiable), from loss": float( + loss.kl_to_inference.mean() + ), + "kl_to_ref, from loss": float(loss.kl_to_ref.mean()), + "loss_kl_to_inference, from loss": float(loss.loss_kl_to_inference.mean()), + "loss_kl_to_ref, from loss": float(loss.loss_kl_to_ref.mean()), + "entropy loss, from loss": float(loss.loss_entropy.mean()), + "grad_norm": float(grad_norm) + if global_step % gradient_accumulation_steps == 0 + else 0.0, + "write_count, from buffer": int(replay_buffer.write_count), + # how many gradient steps per write + "gradient_step_throughput (gradient step per write)": float( + global_step / replay_buffer.write_count + ), + # how many optim steps per write + "optim_step_throughput (optim step per write)": float( + (global_step // gradient_accumulation_steps) / replay_buffer.write_count + ), + "data_read_count (total)": data_read_count, + "current_policy_version (collector)": collector.policy_version, + # FIXME: Assume batch is a single trajectory + # FIXME: The addition of the transform after the env instantiation + _shuttle creation + # is messed up - we need the next data + "batch_policy_version (sampled batch)": batch_policy_version, + "batch_policy_age (sampled batch)": batch_policy_age, + "throughput (steps per second)": float( + global_step / (time.time() - start_time) + ), + } + + for name, value in metrics.items(): + wandb_logger.log_scalar(name, value, step=global_step) + + if history_str is not None: + wandb_logger.log_str("history", history_str, step=global_step) diff --git a/test/llm/mocking_classes_llm.py b/test/llm/mocking_classes_llm.py index 60ddc4b8dd9..59865bb3232 100644 --- a/test/llm/mocking_classes_llm.py +++ b/test/llm/mocking_classes_llm.py @@ -28,7 +28,7 @@ def __next__(self): return {"text": self.generate_random_string()} else: return { - "text": [self.generate_random_string() for _ in range(self.batch_size)] + "query": [self.generate_random_string() for _ in range(self.batch_size)] } diff --git a/test/llm/test_collectors.py b/test/llm/test_collectors.py index 8e4047b34f0..856b5219578 100644 --- a/test/llm/test_collectors.py +++ b/test/llm/test_collectors.py @@ -17,14 +17,19 @@ from torchrl.collectors.llm.weight_update.vllm import vLLMUpdater from torchrl.data import LazyStackStorage, ReplayBuffer from torchrl.envs import AsyncEnvPool, StepCounter -from torchrl.envs.llm import LLMEnv +from torchrl.envs.llm.chat import ChatEnv from torchrl.modules.llm import TransformersWrapper, vLLMWrapper - +from tensordict import set_list_to_stack from torchrl.modules.llm.backends.vllm import make_vllm_worker _has_transformers = importlib.util.find_spec("transformers") is not None _has_vllm = importlib.util.find_spec("vllm") is not None +@pytest.fixture(scope="module", autouse=True) +def set_list_to_stack_fixture(): + with set_list_to_stack(True): + yield + @pytest.mark.skipif(not _has_transformers, reason="missing transformers dependencies") @pytest.mark.skipif(not _has_vllm, reason="missing vllm dependencies") @@ -90,7 +95,7 @@ def test_llm_collector_with_transformers( policy = TransformersWrapper( model, tokenizer=tokenizer, - from_text=True, + input_mode="history", generate=True, return_log_probs=True, ) @@ -100,12 +105,11 @@ def _run_collector_test(self, total_steps, rb, queue, policy, tokenizer): bsz = 4 dataloader = DummyStrDataLoader(bsz) - env = LLMEnv.from_dataloader( + env = ChatEnv.from_dataloader( dataloader=dataloader, - from_text=True, batch_size=bsz, group_repeats=True, - eos_token_id=tokenizer.eos_token_id, + input_mode="history", ) queue = None if rb: @@ -138,13 +142,13 @@ def _run_collector_test(self, total_steps, rb, queue, policy, tokenizer): assert sample.shape == (4,) assert not sample._has_exclusive_keys # Should match length - assert len(sample["text"]) == 4 + assert len(sample["text", "prompt"]) == 4 # assert len(sample["text"][0]) == 10, sample["text"][0] # Should be non-empty - assert sample["text_response"] is not None + assert sample["text", "response"] is not None for i in range(4): # Check that there are more chars in the next step - assert len(sample["text"][i]) < len(sample["next", "text"][i]) + assert len(sample["history", "prompt"][i]) < len(sample["next", "history", "prompt"][i]) else: stack = torch.cat(stack) assert not stack._has_exclusive_keys @@ -152,7 +156,7 @@ def _run_collector_test(self, total_steps, rb, queue, policy, tokenizer): stack = stack.view(-1) for i in range(stack.numel()): # Check that there are more chars in the next step - assert len(stack["text"][i]) < len(stack["next", "text"][i]) + assert len(stack["history", "prompt"][i]) < len(stack["next", "history", "prompt"][i]) assert collector._frames >= total_steps @pytest.mark.slow @@ -164,11 +168,11 @@ def test_llm_collector_start(self, vllm_instance): bsz = 4 dataloader = DummyStrDataLoader(bsz) - env = LLMEnv.from_dataloader( + env = ChatEnv.from_dataloader( dataloader=dataloader, - from_text=True, batch_size=bsz, group_repeats=True, + input_mode="history", ) rb = ReplayBuffer(storage=LazyStackStorage(max_size=total_steps * 2)) @@ -191,7 +195,7 @@ def test_llm_collector_start(self, vllm_instance): assert sample.ndim == 1 for i in range(10): # Check that there are more chars in the next step - assert len(sample["text"][i]) < len(sample["next", "text"][i]) + assert len(sample["history", "prompt"][i]) < len(sample["next", "history", "prompt"][i]) assert not sample._has_exclusive_keys, sample j += 1 if rb.write_count >= total_steps: @@ -201,10 +205,11 @@ def test_llm_collector_start(self, vllm_instance): collector.async_shutdown(timeout=10) @pytest.mark.slow - @pytest.mark.parametrize("rb", [False, True]) - @pytest.mark.parametrize("yield_only_last_steps", [False, True]) + @pytest.mark.parametrize("rb", [False, True], ids=["rb_false", "rb_true"]) + @pytest.mark.parametrize("yield_only_last_steps", [False, True], ids=["yield_only_last_steps_false", "yield_only_last_steps_true"]) + @pytest.mark.parametrize("dialog_turns_per_batch", [4, None], ids=["dialog_turns_per_batch_4", "dialog_turns_per_batch_none"]) def test_llm_collector_completed( - self, vllm_instance_opt, rb, yield_only_last_steps + self, vllm_instance_opt, rb, yield_only_last_steps, dialog_turns_per_batch ): torch.manual_seed(0) policy = vLLMWrapper(vllm_instance_opt) @@ -214,12 +219,11 @@ def test_llm_collector_completed( max_steps = 20 dataloader = DummyStrDataLoader(bsz) - env = LLMEnv.from_dataloader( + env = ChatEnv.from_dataloader( dataloader=dataloader, - from_text=True, + input_mode="history", batch_size=bsz, group_repeats=True, - eos_token_id=tokenizer.eos_token_id, ) # To make sure the env breaks at some point env = env.append_transform(StepCounter(max_steps=max_steps)) @@ -228,15 +232,19 @@ def test_llm_collector_completed( rb = ReplayBuffer(storage=LazyStackStorage(max_size=total_steps * 2)) else: rb = None + + kwargs = {"dialog_turns_per_batch": dialog_turns_per_batch} if dialog_turns_per_batch is not None else {} collector = LLMCollector( env=env, policy_factory=lambda: policy, - dialog_turns_per_batch=env.batch_size[0], replay_buffer=rb, total_dialog_turns=total_steps, yield_completed_trajectories=True, yield_only_last_steps=yield_only_last_steps, + **kwargs, ) + if not dialog_turns_per_batch: + assert collector.dialog_turns_per_batch == 1 assert collector.yield_completed_trajectories assert collector.yield_only_last_steps is yield_only_last_steps @@ -250,51 +258,37 @@ def test_llm_collector_completed( for i in range(data.numel()): if data[i]["next", "step_count"] == max_steps: continue - if data[i]["text_response"]: - # Check that there are more chars in the next step - assert len(data["text"][i]) < len(data["next", "text"][i]), ( - i, - data[i]["next", "step_count"], - data[i]["next", "done"], - data[i]["text_response"], - ) - else: - assert len(data["text"][i]) == len(data["next", "text"][i]), ( + # Check that there are more chars in the next step + assert len(data["history", "prompt"][i]) < len(data["next", "history", "prompt"][i]), ( i, data[i]["next", "step_count"], data[i]["next", "done"], data[i]["text_response"], ) + expected_shape = collector.dialog_turns_per_batch if collector.dialog_turns_per_batch else 1 + # since we want only completed trajs, either we have all the steps (and hence the number of elements is + # bigger than dialog_turns_per_batch) or we have all the last steps in number strictly equal to dialog_turns_per_batch if yield_only_last_steps: - assert data.shape == (1,) + assert data.numel() == expected_shape, (data.shape, expected_shape) else: - has_found_one_with_more_steps |= data.numel() > 1 + assert data.numel() >= expected_shape, (data.shape, expected_shape) + has_found_one_with_more_steps |= data.numel() > 1 else: assert data is None sample = rb.sample(5) for i in range(sample.numel()): if sample[i]["next", "step_count"] == max_steps: continue - if sample[i]["text_response"]: - # Check that there are more chars in the next step - assert len(sample["text"][i]) < len( - sample["next", "text"][i] - ), ( - i, - sample[i]["next", "step_count"], - sample[i]["next", "done"], - sample[i]["text_response"], - ) - else: - assert len(sample["text"][i]) == len( - sample["next", "text"][i] - ), ( - i, - sample[i]["next", "step_count"], - sample[i]["next", "done"], - sample[i]["text_response"], - ) + # Check that there are more chars in the next step + assert len(sample["history", "prompt"][i]) < len( + sample["next", "history", "prompt"][i] + ), ( + i, + sample[i]["next", "step_count"], + sample[i]["next", "done"], + sample[i]["text_response"], + ) assert sample.ndim == 1 assert sample.shape == (5,) @@ -320,12 +314,11 @@ def test_llm_collector_completed_async( dataloader = DummyStrDataLoader(bsz) def env_maker(): - env = LLMEnv.from_dataloader( + env = ChatEnv.from_dataloader( dataloader=dataloader, from_text=True, batch_size=(), group_repeats=True, - eos_token_id=tokenizer.eos_token_id, ) # To make sure the env breaks at some point env = env.append_transform(StepCounter(max_steps=max_steps)) @@ -359,16 +352,8 @@ def env_maker(): for i in range(data.numel()): if data[i]["next", "step_count"] == max_steps: continue - if data[i]["text_response"]: - # Check that there are more chars in the next step - assert len(data["text"][i]) < len(data["next", "text"][i]), ( - i, - data[i]["next", "step_count"], - data[i]["next", "done"], - data[i]["text_response"], - ) - else: - assert len(data["text"][i]) == len(data["next", "text"][i]), ( + # Check that there are more chars in the next step + assert len(data["history", "prompt"][i]) < len(data["next", "history", "prompt"][i]), ( i, data[i]["next", "step_count"], data[i]["next", "done"], @@ -385,19 +370,9 @@ def env_maker(): for i in range(sample.numel()): if sample[i]["next", "step_count"] == max_steps: continue - if sample[i]["text_response"]: - # Check that there are more chars in the next step - assert len(sample["text"][i]) < len( - sample["next", "text"][i] - ), ( - i, - sample[i]["next", "step_count"], - sample[i]["next", "done"], - sample[i]["text_response"], - ) - else: - assert len(sample["text"][i]) == len( - sample["next", "text"][i] + # Check that there are more chars in the next step + assert len(sample["history", "prompt"][i]) < len( + sample["next", "history", "prompt"][i] ), ( i, sample[i]["next", "step_count"], diff --git a/torchrl/collectors/llm/base.py b/torchrl/collectors/llm/base.py index 6d6bcb50a19..a76bb2f3662 100644 --- a/torchrl/collectors/llm/base.py +++ b/torchrl/collectors/llm/base.py @@ -173,7 +173,7 @@ def __init__( # disguise the queue as a replay buffer replay_buffer = _QueueAsRB(queue) if dialog_turns_per_batch is None and yield_completed_trajectories: - dialog_turns_per_batch = 0 + dialog_turns_per_batch = 1 super().__init__( create_env_fn=env, policy=policy, @@ -325,6 +325,8 @@ def _rollout_all(self) -> TensorDictBase: # A simplified version of rollout return trajectory.view(-1) return trajectory + _result_numel = 0 + def _rollout_yield_trajs(self) -> TensorDictBase: # A simplified version of rollout if self._shuttle is None: raise RuntimeError("Data shuttle not found") @@ -335,7 +337,7 @@ def _rollout_yield_trajs(self) -> TensorDictBase: # A simplified version of rol collected_steps = 0 dones = torch.zeros(self.env.batch_size, dtype=torch.bool) while True: - if self._trajectory_queue: + if self._result_numel >= self.dialog_turns_per_batch: break env_input = self.policy(next_output) cur_output, next_output = self.env.step_and_maybe_reset(env_input) @@ -359,18 +361,24 @@ def _rollout_yield_trajs(self) -> TensorDictBase: # A simplified version of rol if dones.any(): for idx in dones.nonzero(as_tuple=True)[0].tolist(): if not self.yield_only_last_steps: - self._trajectory_queue.append( - lazy_stack(self._yield_queues[idx], -1) - ) + _result = lazy_stack(self._yield_queues[idx], -1) + self._trajectory_queue.append(_result) else: # FIXME: We need to increment the step count here because iterator() won't # see the extra steps # We use lazy-stack because unsqueeze doesn't nest the strings in lists - self._trajectory_queue.append( - lazy_stack([self._yield_queues[idx][-1]]) - ) + _result = lazy_stack([self._yield_queues[idx][-1]]) + self._trajectory_queue.append(_result) + self._result_numel += _result.numel() self._yield_queues[idx].clear() - result = self._trajectory_queue.popleft() + result = [self._trajectory_queue.popleft()] + elt = result[0].numel() + self._result_numel -= result[0].numel() + while elt < self.dialog_turns_per_batch: + result.append(self._trajectory_queue.popleft()) + elt += result[-1].numel() + self._result_numel -= result[-1].numel() + result = torch.cat(result, -1) if self.verbose: torchrl_logger.info( f"LLMCollector: Yielding completed trajectory with shape {result.shape}." diff --git a/torchrl/envs/llm/chat.py b/torchrl/envs/llm/chat.py index 5c4961e1c1d..c73f1734f63 100644 --- a/torchrl/envs/llm/chat.py +++ b/torchrl/envs/llm/chat.py @@ -269,6 +269,55 @@ def _make_specs_tokens(self): device=self.device, ) + @classmethod + def from_dataloader( + cls, + dataloader: DataLoader, + *, + repeats: int | None = None, + device: torch.device | None = None, + group_repeats: bool = False, + batch_size: tuple | torch.Size | None = None, + primers: Composite | None = None, + tokenizer: transformers.AutoTokenizer | None = None, + template_kwargs: dict[str, Any] | None = None, + input_mode: Literal["history", "text", "tokens"] = "history", + data_key: str | None = None, + ): + """Create a chat environment from a dataloader. + + Args: + dataloader (DataLoader): The dataloader to use. + + Keyword Args: + repeats (int | None, optional): The number of times to repeat each sample from the dataset (mainly for Monte-Carlo + based value estimation). If `None`, the dataset is not repeated. Defaults to `None`. + device (torch.device | None, optional): The device to use for computations. Defaults to None. + group_repeats (bool, optional): Whether to group repeated samples together. Defaults to `False`. + batch_size (tuple | torch.Size | None, optional): The batch size for data loading. Defaults to `1`. + primers (Composite | None, optional): The primers to use for data loading. Defaults to `None`. + tokenizer (transformers.AutoTokenizer | None, optional): The tokenizer to use for text processing. Defaults to `None`. + template_kwargs (dict[str, Any] | None, optional): Additional keyword arguments for the template. Defaults to `None`. + input_mode (Literal["history", "text", "tokens"], optional): The mode of input to the environment. Defaults to `"history"`. + data_key (str, optional): The spec of the data returned by the dataloader (or better, its collate_fn). + Defaults to `None` (automatically determined based on the input_mode). + + Returns: + DatasetChatEnv: The chat environment. + """ + return DatasetChatEnv.from_dataloader( + dataloader=dataloader, + repeats=repeats, + device=device, + group_repeats=group_repeats, + batch_size=batch_size, + primers=primers, + tokenizer=tokenizer, + template_kwargs=template_kwargs, + input_mode=input_mode, + data_key=data_key, + ) + # def _post_step_mdp_hooks(self, tensordict: TensorDictBase) -> TensorDictBase: # """Allows modification of the tensordict after the step_mdp.""" # if self.input_mode == "history": @@ -506,7 +555,52 @@ def __init__( collate_fn=collate_fn if collate_fn is not None else _default_collate_fn, generator=generator, ) + return self.from_dataloader( + dataloader=dataloader, + repeats=repeats, + device=device, + group_repeats=group_repeats, + batch_size=batch_size, + primers=primers, + tokenizer=tokenizer, + template_kwargs=template_kwargs, + input_mode=input_mode, + data_key=data_key, + ) + @classmethod + def from_dataloader( + cls, + dataloader: DataLoader, + *, + repeats: int | None = None, + device: torch.device | None = None, + group_repeats: bool = False, + batch_size: tuple | torch.Size | None = None, + primers: Composite | None = None, + tokenizer: transformers.AutoTokenizer | None = None, + template_kwargs: dict[str, Any] | None = None, + input_mode: Literal["history", "text", "tokens"] = "history", + data_key: str | None = None, + ): + """Create a chat environment from a dataloader. + + Args: + dataloader (DataLoader): The dataloader to use. + + Keyword Args: + repeats (int | None, optional): The number of times to repeat each sample from the dataset (mainly for Monte-Carlo + based value estimation). If `None`, the dataset is not repeated. Defaults to `None`. + device (torch.device | None, optional): The device to use for computations. Defaults to None. + group_repeats (bool, optional): Whether to group repeated samples together. Defaults to `False`. + batch_size (tuple | torch.Size | None, optional): The batch size for data loading. Defaults to `1`. + primers (Composite | None, optional): The primers to use for data loading. Defaults to `None`. + tokenizer (transformers.AutoTokenizer | None, optional): The tokenizer to use for text processing. Defaults to `None`. + template_kwargs (dict[str, Any] | None, optional): Additional keyword arguments for the template. Defaults to `None`. + + Returns: + ChatEnv: The chat environment. + """ primer = DataLoadingPrimer( dataloader=dataloader, repeats=repeats, @@ -517,14 +611,16 @@ def __init__( ) env_base = ChatEnv( batch_size=batch_size, - system_prompt=self.SYSTEM_PROMPT, + system_prompt=cls.SYSTEM_PROMPT, tokenizer=tokenizer, template_kwargs=template_kwargs, input_mode=input_mode, data_key=data_key, device=device, ) - return super().__init__(env_base, primer) + new_env = cls.__new__(cls) + TransformedEnv.__init__(new_env, env_base, primer) + return new_env def reset_dataloader(self): """Reset the dataloader. diff --git a/torchrl/envs/llm/envs.py b/torchrl/envs/llm/envs.py index 7b560678fd5..fc97f887109 100644 --- a/torchrl/envs/llm/envs.py +++ b/torchrl/envs/llm/envs.py @@ -119,6 +119,7 @@ def __init__( as_llm_data: bool = False, eos_token_id: int | None = None, ) -> None: + self._warn_deprecated() self.as_llm_data = as_llm_data if token_key is None: token_key = self._DEFAULT_TOKEN_KEY @@ -255,6 +256,13 @@ def __init__( terminated=Unbounded(shape=(1,), dtype=torch.bool, device=device), ) + @classmethod + def _warn_deprecated(cls): + warnings.warn( + "LLMEnv is deprecated. Please use ChatEnv instead.", + category=DeprecationWarning, + ) + @classmethod def from_dataloader( cls, @@ -346,6 +354,8 @@ def from_dataloader( Returns: LLMEnv: The created LLMEnv instance. """ + cls._warn_deprecated() + from torchrl.envs.llm import DataLoadingPrimer, Tokenizer if str_key is None: diff --git a/torchrl/envs/utils.py b/torchrl/envs/utils.py index 81485ff8e4e..1185f4a44c1 100644 --- a/torchrl/envs/utils.py +++ b/torchrl/envs/utils.py @@ -1453,6 +1453,8 @@ def _update_during_reset( # by contract, a reset signal at one level cannot # be followed by other resets at nested levels, so it's safe to # simply update + print("update", node_reset) + print("node", node) node.update(node_reset, update_batch_size=True) else: # there can be two cases: (1) the key is present in both tds, From ec828ebaeeea634d0485f5140c682b0d90cb73eb Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 11:00:55 +0100 Subject: [PATCH 10/28] amend --- sota-implementations/grpo/grpo-async.py | 1 - sota-implementations/grpo/grpo-sync.py | 1 - test/llm/test_collectors.py | 89 ++++++++++++++++--------- torchrl/envs/llm/chat.py | 17 +++-- torchrl/envs/utils.py | 2 - 5 files changed, 71 insertions(+), 39 deletions(-) diff --git a/sota-implementations/grpo/grpo-async.py b/sota-implementations/grpo/grpo-async.py index db5ac02410d..c5c0c5fdec7 100644 --- a/sota-implementations/grpo/grpo-async.py +++ b/sota-implementations/grpo/grpo-async.py @@ -172,7 +172,6 @@ def train( torchrl_logger.info(f"Total steps: {total_steps}") pbar = tqdm.tqdm(total=total_steps) - metrics = {} # Initialize metrics dict grad_norm = 0.0 # Initialize grad_norm data_read_count = 0 start_time = time.time() diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index df09f876b85..ee53cf848dc 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -163,7 +163,6 @@ def train( # Training loop torchrl_logger.info("Starting training loop.") pbar = tqdm.tqdm(collector) - metrics = {} # Initialize metrics dict grad_norm = 0.0 # Initialize grad_norm data_read_count = 0 diff --git a/test/llm/test_collectors.py b/test/llm/test_collectors.py index 856b5219578..01318eecbf5 100644 --- a/test/llm/test_collectors.py +++ b/test/llm/test_collectors.py @@ -12,6 +12,7 @@ import pytest import torch from mocking_classes_llm import DummyStrDataLoader +from tensordict import set_list_to_stack from torchrl import logger as torchrl_logger from torchrl.collectors.llm import LLMCollector from torchrl.collectors.llm.weight_update.vllm import vLLMUpdater @@ -19,12 +20,12 @@ from torchrl.envs import AsyncEnvPool, StepCounter from torchrl.envs.llm.chat import ChatEnv from torchrl.modules.llm import TransformersWrapper, vLLMWrapper -from tensordict import set_list_to_stack from torchrl.modules.llm.backends.vllm import make_vllm_worker _has_transformers = importlib.util.find_spec("transformers") is not None _has_vllm = importlib.util.find_spec("vllm") is not None + @pytest.fixture(scope="module", autouse=True) def set_list_to_stack_fixture(): with set_list_to_stack(True): @@ -148,7 +149,9 @@ def _run_collector_test(self, total_steps, rb, queue, policy, tokenizer): assert sample["text", "response"] is not None for i in range(4): # Check that there are more chars in the next step - assert len(sample["history", "prompt"][i]) < len(sample["next", "history", "prompt"][i]) + assert len(sample["history", "prompt"][i]) < len( + sample["next", "history", "prompt"][i] + ) else: stack = torch.cat(stack) assert not stack._has_exclusive_keys @@ -156,7 +159,9 @@ def _run_collector_test(self, total_steps, rb, queue, policy, tokenizer): stack = stack.view(-1) for i in range(stack.numel()): # Check that there are more chars in the next step - assert len(stack["history", "prompt"][i]) < len(stack["next", "history", "prompt"][i]) + assert len(stack["history", "prompt"][i]) < len( + stack["next", "history", "prompt"][i] + ) assert collector._frames >= total_steps @pytest.mark.slow @@ -195,7 +200,9 @@ def test_llm_collector_start(self, vllm_instance): assert sample.ndim == 1 for i in range(10): # Check that there are more chars in the next step - assert len(sample["history", "prompt"][i]) < len(sample["next", "history", "prompt"][i]) + assert len(sample["history", "prompt"][i]) < len( + sample["next", "history", "prompt"][i] + ) assert not sample._has_exclusive_keys, sample j += 1 if rb.write_count >= total_steps: @@ -206,14 +213,22 @@ def test_llm_collector_start(self, vllm_instance): @pytest.mark.slow @pytest.mark.parametrize("rb", [False, True], ids=["rb_false", "rb_true"]) - @pytest.mark.parametrize("yield_only_last_steps", [False, True], ids=["yield_only_last_steps_false", "yield_only_last_steps_true"]) - @pytest.mark.parametrize("dialog_turns_per_batch", [4, None], ids=["dialog_turns_per_batch_4", "dialog_turns_per_batch_none"]) + @pytest.mark.parametrize( + "yield_only_last_steps", + [False, True], + ids=["yield_only_last_steps_false", "yield_only_last_steps_true"], + ) + @pytest.mark.parametrize( + "dialog_turns_per_batch", + [4, None], + ids=["dialog_turns_per_batch_4", "dialog_turns_per_batch_none"], + ) def test_llm_collector_completed( self, vllm_instance_opt, rb, yield_only_last_steps, dialog_turns_per_batch ): torch.manual_seed(0) policy = vLLMWrapper(vllm_instance_opt) - tokenizer = vllm_instance_opt.get_tokenizer() + vllm_instance_opt.get_tokenizer() bsz = 4 total_steps = 20 max_steps = 20 @@ -232,8 +247,12 @@ def test_llm_collector_completed( rb = ReplayBuffer(storage=LazyStackStorage(max_size=total_steps * 2)) else: rb = None - - kwargs = {"dialog_turns_per_batch": dialog_turns_per_batch} if dialog_turns_per_batch is not None else {} + + kwargs = ( + {"dialog_turns_per_batch": dialog_turns_per_batch} + if dialog_turns_per_batch is not None + else {} + ) collector = LLMCollector( env=env, policy_factory=lambda: policy, @@ -259,16 +278,22 @@ def test_llm_collector_completed( if data[i]["next", "step_count"] == max_steps: continue # Check that there are more chars in the next step - assert len(data["history", "prompt"][i]) < len(data["next", "history", "prompt"][i]), ( - i, - data[i]["next", "step_count"], - data[i]["next", "done"], - data[i]["text_response"], - ) - - expected_shape = collector.dialog_turns_per_batch if collector.dialog_turns_per_batch else 1 + assert len(data["history", "prompt"][i]) < len( + data["next", "history", "prompt"][i] + ), ( + i, + data[i]["next", "step_count"], + data[i]["next", "done"], + data[i]["text_response"], + ) + + expected_shape = ( + collector.dialog_turns_per_batch + if collector.dialog_turns_per_batch + else 1 + ) # since we want only completed trajs, either we have all the steps (and hence the number of elements is - # bigger than dialog_turns_per_batch) or we have all the last steps in number strictly equal to dialog_turns_per_batch + # bigger than dialog_turns_per_batch) or we have all the last steps in number strictly equal to dialog_turns_per_batch if yield_only_last_steps: assert data.numel() == expected_shape, (data.shape, expected_shape) else: @@ -307,7 +332,7 @@ def test_llm_collector_completed_async( ): torch.manual_seed(0) policy = vLLMWrapper(vllm_instance_opt) - tokenizer = vllm_instance_opt.get_tokenizer() + vllm_instance_opt.get_tokenizer() bsz = 4 total_steps = 20 max_steps = 20 @@ -353,12 +378,14 @@ def env_maker(): if data[i]["next", "step_count"] == max_steps: continue # Check that there are more chars in the next step - assert len(data["history", "prompt"][i]) < len(data["next", "history", "prompt"][i]), ( - i, - data[i]["next", "step_count"], - data[i]["next", "done"], - data[i]["text_response"], - ) + assert len(data["history", "prompt"][i]) < len( + data["next", "history", "prompt"][i] + ), ( + i, + data[i]["next", "step_count"], + data[i]["next", "done"], + data[i]["text_response"], + ) if yield_only_last_steps: assert data.shape == (1,) @@ -373,12 +400,12 @@ def env_maker(): # Check that there are more chars in the next step assert len(sample["history", "prompt"][i]) < len( sample["next", "history", "prompt"][i] - ), ( - i, - sample[i]["next", "step_count"], - sample[i]["next", "done"], - sample[i]["text_response"], - ) + ), ( + i, + sample[i]["next", "step_count"], + sample[i]["next", "done"], + sample[i]["text_response"], + ) assert sample.ndim == 1 assert sample.shape == (5,) diff --git a/torchrl/envs/llm/chat.py b/torchrl/envs/llm/chat.py index c73f1734f63..4231e6a5bdc 100644 --- a/torchrl/envs/llm/chat.py +++ b/torchrl/envs/llm/chat.py @@ -165,8 +165,6 @@ def __init__( self.system_prompt = system_prompt - self.system_prompt = system_prompt - if template_kwargs is None: template_kwargs = {} self.template_kwargs = template_kwargs @@ -283,6 +281,7 @@ def from_dataloader( template_kwargs: dict[str, Any] | None = None, input_mode: Literal["history", "text", "tokens"] = "history", data_key: str | None = None, + system_prompt: str | None = None, ): """Create a chat environment from a dataloader. @@ -301,6 +300,7 @@ def from_dataloader( input_mode (Literal["history", "text", "tokens"], optional): The mode of input to the environment. Defaults to `"history"`. data_key (str, optional): The spec of the data returned by the dataloader (or better, its collate_fn). Defaults to `None` (automatically determined based on the input_mode). + system_prompt (str | None, optional): The system prompt to use for the environment. Defaults to `None`. Returns: DatasetChatEnv: The chat environment. @@ -316,6 +316,7 @@ def from_dataloader( template_kwargs=template_kwargs, input_mode=input_mode, data_key=data_key, + system_prompt=system_prompt, ) # def _post_step_mdp_hooks(self, tensordict: TensorDictBase) -> TensorDictBase: @@ -494,6 +495,7 @@ class DatasetChatEnv(TransformedEnv): input_mode (Literal["history", "text", "tokens"], optional): The mode of input to the environment. Defaults to `"history"`. data_key (str, optional): The spec of the data returned by the dataloader (or better, its collate_fn). Defaults to `None` (automatically determined based on the input_mode). + system_prompt (str | None, optional): The system prompt to use for the environment. Defaults to `None`. .. seealso:: `DatasetChatEnv` is a thin wrapper around :class:`~torchrl.envs.llm.ChatEnv` bucketed with a :class:`~torchrl.envs.llm.DataLoadingPrimer` transform. See these two classes for more insight on data format @@ -525,6 +527,7 @@ def __init__( input_mode: Literal["history", "text", "tokens"] = "history", data_key: str | None = None, primers: Composite | None = None, + system_prompt: str | None = None, ): from datasets import load_dataset from tensordict import list_to_stack @@ -566,6 +569,7 @@ def __init__( template_kwargs=template_kwargs, input_mode=input_mode, data_key=data_key, + system_prompt=system_prompt, ) @classmethod @@ -578,10 +582,11 @@ def from_dataloader( group_repeats: bool = False, batch_size: tuple | torch.Size | None = None, primers: Composite | None = None, - tokenizer: transformers.AutoTokenizer | None = None, + tokenizer: transformers.AutoTokenizer | None = None, # noqa: F821 template_kwargs: dict[str, Any] | None = None, input_mode: Literal["history", "text", "tokens"] = "history", data_key: str | None = None, + system_prompt: str | None = None, ): """Create a chat environment from a dataloader. @@ -597,6 +602,10 @@ def from_dataloader( primers (Composite | None, optional): The primers to use for data loading. Defaults to `None`. tokenizer (transformers.AutoTokenizer | None, optional): The tokenizer to use for text processing. Defaults to `None`. template_kwargs (dict[str, Any] | None, optional): Additional keyword arguments for the template. Defaults to `None`. + input_mode (Literal["history", "text", "tokens"], optional): The mode of input to the environment. Defaults to `"history"`. + data_key (str, optional): The spec of the data returned by the dataloader (or better, its collate_fn). + Defaults to `None` (automatically determined based on the input_mode). + system_prompt (str | None, optional): The system prompt to use for the environment. Defaults to `None`. Returns: ChatEnv: The chat environment. @@ -611,7 +620,7 @@ def from_dataloader( ) env_base = ChatEnv( batch_size=batch_size, - system_prompt=cls.SYSTEM_PROMPT, + system_prompt=cls.SYSTEM_PROMPT if system_prompt is None else system_prompt, tokenizer=tokenizer, template_kwargs=template_kwargs, input_mode=input_mode, diff --git a/torchrl/envs/utils.py b/torchrl/envs/utils.py index 1185f4a44c1..81485ff8e4e 100644 --- a/torchrl/envs/utils.py +++ b/torchrl/envs/utils.py @@ -1453,8 +1453,6 @@ def _update_during_reset( # by contract, a reset signal at one level cannot # be followed by other resets at nested levels, so it's safe to # simply update - print("update", node_reset) - print("node", node) node.update(node_reset, update_batch_size=True) else: # there can be two cases: (1) the key is present in both tds, From 11dccc776a416ee3b33c4cb138eb3dc9d0652371 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 11:05:47 +0100 Subject: [PATCH 11/28] amend --- torchrl/envs/llm/chat.py | 41 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/torchrl/envs/llm/chat.py b/torchrl/envs/llm/chat.py index 4231e6a5bdc..a9b091767e2 100644 --- a/torchrl/envs/llm/chat.py +++ b/torchrl/envs/llm/chat.py @@ -558,7 +558,8 @@ def __init__( collate_fn=collate_fn if collate_fn is not None else _default_collate_fn, generator=generator, ) - return self.from_dataloader( + self._from_dataloader( + self, dataloader=dataloader, repeats=repeats, device=device, @@ -610,6 +611,39 @@ def from_dataloader( Returns: ChatEnv: The chat environment. """ + self = cls.__new__(cls) + return cls._from_dataloader( + self, + dataloader, + repeats=repeats, + device=device, + group_repeats=group_repeats, + batch_size=batch_size, + primers=primers, + tokenizer=tokenizer, + template_kwargs=template_kwargs, + input_mode=input_mode, + data_key=data_key, + system_prompt=system_prompt, + ) + + @classmethod + def _from_dataloader( + cls, + self, + dataloader, + *, + repeats: int | None = None, + device: torch.device | None = None, + group_repeats: bool = False, + batch_size: tuple | torch.Size | None = None, + primers: Composite | None = None, + tokenizer: transformers.AutoTokenizer | None = None, + template_kwargs: dict[str, Any] | None = None, + input_mode: Literal["history", "text", "tokens"] = "history", + data_key: str | None = None, + system_prompt: str | None = None, + ): primer = DataLoadingPrimer( dataloader=dataloader, repeats=repeats, @@ -627,9 +661,8 @@ def from_dataloader( data_key=data_key, device=device, ) - new_env = cls.__new__(cls) - TransformedEnv.__init__(new_env, env_base, primer) - return new_env + TransformedEnv.__init__(self, env_base, primer) + return self def reset_dataloader(self): """Reset the dataloader. From 1d5edf2857fa0cb2f8e7d760edf460df6338d381 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 11:34:16 +0100 Subject: [PATCH 12/28] amend --- sota-implementations/grpo/config/grpo_ifeval.yaml | 4 ++++ sota-implementations/grpo/grpo-sync.py | 9 +++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/sota-implementations/grpo/config/grpo_ifeval.yaml b/sota-implementations/grpo/config/grpo_ifeval.yaml index adf35c4477d..02e376847a2 100644 --- a/sota-implementations/grpo/config/grpo_ifeval.yaml +++ b/sota-implementations/grpo/config/grpo_ifeval.yaml @@ -60,6 +60,10 @@ train: # Fields used only by grpo-async.py / grpo-sync.py logging_frequency: 1 # Log metrics every N steps - here at each optimization step + # Whether to empty the replay buffer at the end of training epochs (sync only). Guarantees that data + # is used only once. + empty_replay_buffer: true + # Training model configuration train_model: gradient_checkpointing: true # Enabled for memory efficiency diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index ee53cf848dc..9f5d1f1effb 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -171,7 +171,7 @@ def train( for data in pbar: # Wait for the replay buffer to be filled - when reasoning, we collect trajectories # so the buffer may not be filled straight away - if replay_buffer.write_count < replay_buffer.batch_size: + if not replay_buffer.write_count: torchrl_logger.info( f"Waiting for replay buffer to be filled, {replay_buffer.write_count=}" ) @@ -296,6 +296,9 @@ def train( wandb_logger.log_scalar(f"timeit/{key}", val) timeit.reset() + if cfg.train.empty_replay_buffer: + replay_buffer.empty() + pbar.close() collector.shutdown() @@ -370,7 +373,9 @@ def main(cfg): rb = RayReplayBuffer( storage=partial( LazyStackStorage, - cfg.train.dialog_turns_per_batch, + # Since we cache the values in the queue until we have "repeats" samples, + # the buffer can be bigger than what the dialog_turns_per_batch (at most repeats * num_envs) + cfg.env.repeats * cfg.env.num_envs, ), sampler=SamplerWithoutReplacement, transform_factory=partial(MCAdvantage, grpo_size=cfg.env.repeats), From acbe3e99b791c1c479e64567914bc9ab8289f910 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 11:35:31 +0100 Subject: [PATCH 13/28] amend --- sota-implementations/grpo/config/grpo_gsm8k.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sota-implementations/grpo/config/grpo_gsm8k.yaml b/sota-implementations/grpo/config/grpo_gsm8k.yaml index 3bd2a235ddf..bd8cd628364 100644 --- a/sota-implementations/grpo/config/grpo_gsm8k.yaml +++ b/sota-implementations/grpo/config/grpo_gsm8k.yaml @@ -60,6 +60,10 @@ train: # Fields used only by grpo-async.py / grpo-sync.py logging_frequency: 10 # Log metrics every N steps + # Whether to empty the replay buffer at the end of training epochs (sync only). Guarantees that data + # is used only once. + empty_replay_buffer: true + # Training model configuration train_model: gradient_checkpointing: true # Enabled for memory efficiency From aa12038619be238400f0ec323aac817f3fc05676 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 11:47:29 +0100 Subject: [PATCH 14/28] amend --- sota-implementations/grpo/config/mode/async.yaml | 2 +- sota-implementations/grpo/config/mode/sync.yaml | 2 +- sota-implementations/grpo/grpo-async.py | 2 +- sota-implementations/grpo/grpo-sync.py | 6 ++++-- torchrl/data/replay_buffers/replay_buffers.py | 10 +++++++--- torchrl/data/replay_buffers/writers.py | 15 +++++++++------ 6 files changed, 23 insertions(+), 14 deletions(-) diff --git a/sota-implementations/grpo/config/mode/async.yaml b/sota-implementations/grpo/config/mode/async.yaml index c72a0592849..8dff97800ad 100644 --- a/sota-implementations/grpo/config/mode/async.yaml +++ b/sota-implementations/grpo/config/mode/async.yaml @@ -5,7 +5,7 @@ train: # Number of epochs to train for, every time a batch is collected. Per se, not directly used in async - aside from computing the total number of steps. epochs: 1 - # The buffer size can be controlled in async mode + # The buffer size is overwritten in async mode. buffer_size: 128 # Update policy weights every N steps - can be set to any positive integer in async mode weight_update_frequency: 10 diff --git a/sota-implementations/grpo/config/mode/sync.yaml b/sota-implementations/grpo/config/mode/sync.yaml index 743e850fcc2..8773a176728 100644 --- a/sota-implementations/grpo/config/mode/sync.yaml +++ b/sota-implementations/grpo/config/mode/sync.yaml @@ -5,7 +5,7 @@ train: # Number of epochs to train for, every time a batch is collected. epochs: 1 - # Leave buffer_size empty to use dialog_turns_per_batch in sync mode + # Override the buffer size in sync mode. If not set, the buffer size will be the number of repeats * num_envs buffer_size: # Update policy weights every N steps - must be left empty in sync mode weight_update_frequency: diff --git a/sota-implementations/grpo/grpo-async.py b/sota-implementations/grpo/grpo-async.py index c5c0c5fdec7..30726c93eb4 100644 --- a/sota-implementations/grpo/grpo-async.py +++ b/sota-implementations/grpo/grpo-async.py @@ -357,7 +357,7 @@ def main(cfg): LazyStackStorage, cfg.train.buffer_size if cfg.train.buffer_size - else cfg.train.dialog_turns_per_batch, + else cfg.env.repeats * cfg.env.num_envs, ), transform_factory=partial(MCAdvantage, grpo_size=cfg.env.repeats), batch_size=cfg.train.optim_batch_size, diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index 9f5d1f1effb..0fc7d304b98 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -297,7 +297,7 @@ def train( timeit.reset() if cfg.train.empty_replay_buffer: - replay_buffer.empty() + replay_buffer.empty(empty_write_count=False) pbar.close() collector.shutdown() @@ -375,7 +375,9 @@ def main(cfg): LazyStackStorage, # Since we cache the values in the queue until we have "repeats" samples, # the buffer can be bigger than what the dialog_turns_per_batch (at most repeats * num_envs) - cfg.env.repeats * cfg.env.num_envs, + cfg.train.buffer_size + if cfg.train.buffer_size + else cfg.env.repeats * cfg.env.num_envs, ), sampler=SamplerWithoutReplacement, transform_factory=partial(MCAdvantage, grpo_size=cfg.env.repeats), diff --git a/torchrl/data/replay_buffers/replay_buffers.py b/torchrl/data/replay_buffers/replay_buffers.py index 6c3756e470c..d8f4c6c3927 100644 --- a/torchrl/data/replay_buffers/replay_buffers.py +++ b/torchrl/data/replay_buffers/replay_buffers.py @@ -794,9 +794,13 @@ def _sample(self, batch_size: int) -> tuple[Any, dict]: return data, info - def empty(self): - """Empties the replay buffer and reset cursor to 0.""" - self._writer._empty() + def empty(self, empty_write_count: bool = True): + """Empties the replay buffer and reset cursor to 0. + + Args: + empty_write_count (bool, optional): Whether to empty the write_count attribute. Defaults to `True`. + """ + self._writer._empty(empty_write_count=empty_write_count) self._sampler._empty() self._storage._empty() diff --git a/torchrl/data/replay_buffers/writers.py b/torchrl/data/replay_buffers/writers.py index 237d17abae0..547534652c2 100644 --- a/torchrl/data/replay_buffers/writers.py +++ b/torchrl/data/replay_buffers/writers.py @@ -58,7 +58,7 @@ def extend(self, data: Sequence) -> torch.Tensor: ... @abstractmethod - def _empty(self): + def _empty(self, empty_write_count: bool = True) -> None: ... @abstractmethod @@ -122,7 +122,7 @@ def add(self, data: Any) -> int: def extend(self, data: Sequence) -> torch.Tensor: raise RuntimeError(self.WRITING_ERR) - def _empty(self): + def _empty(self, empty_write_count: bool = True) -> None: raise RuntimeError(self.WRITING_ERR) def dumps(self, path): @@ -215,9 +215,10 @@ def state_dict(self) -> dict[str, Any]: def load_state_dict(self, state_dict: dict[str, Any]) -> None: self._cursor = state_dict["_cursor"] - def _empty(self): + def _empty(self, empty_write_count: bool = True) -> None: self._cursor = 0 - self._write_count = 0 + if empty_write_count: + self._write_count = 0 @property def _cursor(self): @@ -572,9 +573,11 @@ def extend(self, data: TensorDictBase) -> None: ent.mark_update(index) return index - def _empty(self) -> None: + def _empty(self, empty_write_count: bool = True) -> None: self._cursor = 0 self._current_top_values = [] + if empty_write_count: + self._write_count = 0 def __getstate__(self): if get_spawning_popen() is not None: @@ -664,7 +667,7 @@ def _rng(self, value): for writer in self._writers: writer._rng = value - def _empty(self): + def _empty(self, empty_write_count: bool = True) -> None: raise NotImplementedError def dumps(self, path: Path): From 45d900342f99961ff75bd7c23af5a218b947b7b5 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 11:54:07 +0100 Subject: [PATCH 15/28] amend --- sota-implementations/grpo/grpo-sync.py | 12 ++++++++---- torchrl/data/replay_buffers/replay_buffers.py | 2 +- torchrl/envs/llm/chat.py | 4 ++-- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index 0fc7d304b98..864cfa6108d 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -171,11 +171,15 @@ def train( for data in pbar: # Wait for the replay buffer to be filled - when reasoning, we collect trajectories # so the buffer may not be filled straight away - if not replay_buffer.write_count: + if not len(replay_buffer): torchrl_logger.info( - f"Waiting for replay buffer to be filled, {replay_buffer.write_count=}" + f"Waiting for replay buffer to be filled" ) continue + else: + torchrl_logger.info( + f"Replay buffer filled: {len(replay_buffer)}" + ) pbar.update(1) @@ -377,10 +381,10 @@ def main(cfg): # the buffer can be bigger than what the dialog_turns_per_batch (at most repeats * num_envs) cfg.train.buffer_size if cfg.train.buffer_size - else cfg.env.repeats * cfg.env.num_envs, + else cfg.env.repeats * cfg.env.num_envs, ), sampler=SamplerWithoutReplacement, - transform_factory=partial(MCAdvantage, grpo_size=cfg.env.repeats), + transform_factory=partial(MCAdvantage, grpo_size=cfg.env.repeats, verbose=True), batch_size=cfg.train.optim_batch_size, remote_config=replay_buffer_config, ) diff --git a/torchrl/data/replay_buffers/replay_buffers.py b/torchrl/data/replay_buffers/replay_buffers.py index d8f4c6c3927..cb097bb0097 100644 --- a/torchrl/data/replay_buffers/replay_buffers.py +++ b/torchrl/data/replay_buffers/replay_buffers.py @@ -796,7 +796,7 @@ def _sample(self, batch_size: int) -> tuple[Any, dict]: def empty(self, empty_write_count: bool = True): """Empties the replay buffer and reset cursor to 0. - + Args: empty_write_count (bool, optional): Whether to empty the write_count attribute. Defaults to `True`. """ diff --git a/torchrl/envs/llm/chat.py b/torchrl/envs/llm/chat.py index a9b091767e2..402b754f7da 100644 --- a/torchrl/envs/llm/chat.py +++ b/torchrl/envs/llm/chat.py @@ -277,7 +277,7 @@ def from_dataloader( group_repeats: bool = False, batch_size: tuple | torch.Size | None = None, primers: Composite | None = None, - tokenizer: transformers.AutoTokenizer | None = None, + tokenizer: transformers.AutoTokenizer | None = None, # noqa: F821 template_kwargs: dict[str, Any] | None = None, input_mode: Literal["history", "text", "tokens"] = "history", data_key: str | None = None, @@ -638,7 +638,7 @@ def _from_dataloader( group_repeats: bool = False, batch_size: tuple | torch.Size | None = None, primers: Composite | None = None, - tokenizer: transformers.AutoTokenizer | None = None, + tokenizer: transformers.AutoTokenizer | None = None, # noqa: F821 template_kwargs: dict[str, Any] | None = None, input_mode: Literal["history", "text", "tokens"] = "history", data_key: str | None = None, From 2701c0b06364cad150623df03b787609b1f18137 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 11:59:20 +0100 Subject: [PATCH 16/28] amend --- torchrl/data/replay_buffers/ray_buffer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchrl/data/replay_buffers/ray_buffer.py b/torchrl/data/replay_buffers/ray_buffer.py index 65dadc1f4f2..e9e7ad23812 100644 --- a/torchrl/data/replay_buffers/ray_buffer.py +++ b/torchrl/data/replay_buffers/ray_buffer.py @@ -200,8 +200,8 @@ def loads(self, path): def load(self, *args, **kwargs): return ray.get(self._rb.load.remote(*args, **kwargs)) - def empty(self): - return ray.get(self._rb.empty.remote()) + def empty(self, empty_write_count: bool = True): + return ray.get(self._rb.empty.remote(empty_write_count=empty_write_count)) def __getitem__(self, index): return ray.get(self._rb.__getitem__.remote(index)) From 0df4f819957c585239ec1c4bf4ddb8862758e3cc Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 12:13:23 +0100 Subject: [PATCH 17/28] amend --- torchrl/objectives/llm/grpo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchrl/objectives/llm/grpo.py b/torchrl/objectives/llm/grpo.py index 5195fa6ca3c..042581e4ec0 100644 --- a/torchrl/objectives/llm/grpo.py +++ b/torchrl/objectives/llm/grpo.py @@ -513,7 +513,7 @@ def _inv_call(self, tensordict: TensorDictBase) -> TensorDictBase: elif tensordict.ndim > 2: # keep the time dim at the end tensordict = tensordict.flatten(0, -2) - trajs = tensordict.unbind(-1) + trajs = tensordict.unbind(0) # Iterate over the trajectories result = [] for traj in trajs: @@ -522,5 +522,5 @@ def _inv_call(self, tensordict: TensorDictBase) -> TensorDictBase: continue result.append(td_out) if result: - return torch.cat(result, -1) + return torch.cat(result, 0) return From 02fbae5ced670d74d711854ef059fa9ebfd78805 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 12:14:04 +0100 Subject: [PATCH 18/28] amend --- sota-implementations/grpo/grpo-sync.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index 864cfa6108d..0342d4f3cd3 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -209,6 +209,7 @@ def train( with timeit("forward_pass"): # Forward pass with mixed precision with autocast("cuda", enabled=cfg.train.mixed_precision): + torchrl_logger.info(f"Batch for forward pass: {batch.unbind(0)}") loss = loss_fn(batch) loss_val = ( loss.mean(reduce=True) From 534d570b6156357caa7cf167b2ee3d4e489b07d1 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 12:18:25 +0100 Subject: [PATCH 19/28] amend --- torchrl/objectives/llm/grpo.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/torchrl/objectives/llm/grpo.py b/torchrl/objectives/llm/grpo.py index 042581e4ec0..f6e4b105589 100644 --- a/torchrl/objectives/llm/grpo.py +++ b/torchrl/objectives/llm/grpo.py @@ -476,6 +476,8 @@ def forward(self, tensordict: TensorDictBase) -> GRPOLossOutput: return tensordict def _inv_call(self, tensordict: TensorDictBase) -> TensorDictBase: + if self.verbose: + torchrl_logger.info(f"Invoking MCAdvantage.\nData size: {tensordict.shape}.\nCurrent queue size: {len(self.queues)}.\nTotal queue content: {sum(len(q) for q in self.queues.values())}") # Tensordict can be any number of dims, but it must contain entire trajectories if tensordict.ndim == 1: # Check how many done states we have From 9213b830e7595f5bf031f6b890598c58a64d80b9 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 12:55:20 +0100 Subject: [PATCH 20/28] amend --- sota-implementations/grpo/config/grpo_gsm8k.yaml | 4 ++-- sota-implementations/grpo/config/grpo_ifeval.yaml | 2 +- sota-implementations/grpo/grpo-async.py | 6 +++++- sota-implementations/grpo/grpo-sync.py | 7 ++++++- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/sota-implementations/grpo/config/grpo_gsm8k.yaml b/sota-implementations/grpo/config/grpo_gsm8k.yaml index bd8cd628364..5f76992017c 100644 --- a/sota-implementations/grpo/config/grpo_gsm8k.yaml +++ b/sota-implementations/grpo/config/grpo_gsm8k.yaml @@ -41,13 +41,13 @@ train: # Number of gradient accumulation steps. Higher values will use less GPU memory (comparing with bigger batches and lower gradient_accumulation_steps), # but will make the optimization step slower. - gradient_accumulation_steps: 1 + gradient_accumulation_steps: 4 # Fields used by both scripts but with different semantics checkpoint_frequency: 100 # Save checkpoint every N steps/batches # Batch size for optimization. Higher values will use more GPU memory. - optim_batch_size: 1 + optim_batch_size: 4 # Whether to include the KL coefficient in the loss function. Alternatively, the KL ref-to-train will be added to the reward. kl_coef_in_loss: true diff --git a/sota-implementations/grpo/config/grpo_ifeval.yaml b/sota-implementations/grpo/config/grpo_ifeval.yaml index 02e376847a2..bb772373e06 100644 --- a/sota-implementations/grpo/config/grpo_ifeval.yaml +++ b/sota-implementations/grpo/config/grpo_ifeval.yaml @@ -47,7 +47,7 @@ train: checkpoint_frequency: 100 # Save checkpoint every N steps/batches # Batch size for optimization. Higher values will use more GPU memory. - optim_batch_size: 2 + optim_batch_size: 4 # Whether to include the KL coefficient in the loss function. Alternatively, the KL ref-to-train will be added to the reward. kl_coef_in_loss: false diff --git a/sota-implementations/grpo/grpo-async.py b/sota-implementations/grpo/grpo-async.py index 30726c93eb4..a76691838da 100644 --- a/sota-implementations/grpo/grpo-async.py +++ b/sota-implementations/grpo/grpo-async.py @@ -352,6 +352,10 @@ def main(cfg): torchrl_logger.info(f"Inference policy: {inference_policy}") torchrl_logger.info(f"Starting replay buffer with {replay_buffer_config=}") + if cfg.train.optim_batch_size % cfg.train.gradient_accumulation_steps != 0: + raise ValueError( + "optim_batch_size must be divisible by gradient_accumulation_steps" + ) rb = RayReplayBuffer( storage=partial( LazyStackStorage, @@ -360,7 +364,7 @@ def main(cfg): else cfg.env.repeats * cfg.env.num_envs, ), transform_factory=partial(MCAdvantage, grpo_size=cfg.env.repeats), - batch_size=cfg.train.optim_batch_size, + batch_size=cfg.train.optim_batch_size // cfg.train.gradient_accumulation_steps, remote_config=replay_buffer_config, ) torchrl_logger.info(f"Replay buffer: {rb}") diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index 0342d4f3cd3..ef6ca022607 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -375,6 +375,11 @@ def main(cfg): "buffer_size must be equal to dialog_turns_per_batch in sync settings." ) + if cfg.train.optim_batch_size % cfg.train.gradient_accumulation_steps != 0: + raise ValueError( + "optim_batch_size must be divisible by gradient_accumulation_steps" + ) + rb = RayReplayBuffer( storage=partial( LazyStackStorage, @@ -386,7 +391,7 @@ def main(cfg): ), sampler=SamplerWithoutReplacement, transform_factory=partial(MCAdvantage, grpo_size=cfg.env.repeats, verbose=True), - batch_size=cfg.train.optim_batch_size, + batch_size=cfg.train.optim_batch_size // cfg.train.gradient_accumulation_steps, remote_config=replay_buffer_config, ) torchrl_logger.info(f"Replay buffer: {rb}") From 7647466c33198bec9ecf8a9fd727cd4e6b24ccaa Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 13:08:35 +0100 Subject: [PATCH 21/28] amend --- sota-implementations/grpo/grpo-sync.py | 12 +++---- torchrl/modules/distributions/discrete.py | 1 + torchrl/modules/llm/policies/common.py | 40 ++++++++++++++++++++--- torchrl/objectives/llm/grpo.py | 4 ++- 4 files changed, 44 insertions(+), 13 deletions(-) diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index ef6ca022607..e75971a2e2f 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -172,14 +172,10 @@ def train( # Wait for the replay buffer to be filled - when reasoning, we collect trajectories # so the buffer may not be filled straight away if not len(replay_buffer): - torchrl_logger.info( - f"Waiting for replay buffer to be filled" - ) + torchrl_logger.info(f"Waiting for replay buffer to be filled") continue else: - torchrl_logger.info( - f"Replay buffer filled: {len(replay_buffer)}" - ) + torchrl_logger.info(f"Replay buffer filled: {len(replay_buffer)}") pbar.update(1) @@ -209,7 +205,9 @@ def train( with timeit("forward_pass"): # Forward pass with mixed precision with autocast("cuda", enabled=cfg.train.mixed_precision): - torchrl_logger.info(f"Batch for forward pass: {batch.unbind(0)}") + torchrl_logger.info( + f"Batch for forward pass: {batch.unbind(0)}" + ) loss = loss_fn(batch) loss_val = ( loss.mean(reduce=True) diff --git a/torchrl/modules/distributions/discrete.py b/torchrl/modules/distributions/discrete.py index 930b90ddbba..068ca34bb2f 100644 --- a/torchrl/modules/distributions/discrete.py +++ b/torchrl/modules/distributions/discrete.py @@ -823,6 +823,7 @@ def log_prob(self, value: torch.Tensor) -> torch.Tensor: # For token-level masking, we need to check if specific tokens are masked logits = self._original_logits + value = value.masked_fill(~self._mask, self.ignore_index) if value.ndim > 1: # Reshape for cross_entropy: (batch, seq_len, vocab) -> (batch*seq_len, vocab) logits_flat = logits.reshape(-1, logits.size(-1)) diff --git a/torchrl/modules/llm/policies/common.py b/torchrl/modules/llm/policies/common.py index aaee8cd44ed..1f42c889391 100644 --- a/torchrl/modules/llm/policies/common.py +++ b/torchrl/modules/llm/policies/common.py @@ -489,10 +489,16 @@ def get_dist( mask = logits != padding_value if mask is not None: - return LLMMaskedCategorical( + dist = LLMMaskedCategorical( logits=logits, mask=mask, ) + if not dist._position_level_masking: + raise ValueError( + "Mask is not a position-level mask. " + "This is likely because the mask is not a position-level mask." + ) + return dist return Categorical(logits) def _get_dist_with_prompt_mask( @@ -601,10 +607,16 @@ def _get_dist_with_prompt_mask( padding_side=padding_side, ) - return LLMMaskedCategorical( + dist = LLMMaskedCategorical( logits=logits, mask=response_mask.bool(), ) + if not dist._position_level_masking: + raise ValueError( + "Mask is not a position-level mask. " + "This is likely because the mask is not a position-level mask." + ) + return dist def _get_dist_with_assistant_mask( self, @@ -664,10 +676,16 @@ def _get_dist_with_assistant_mask( f"Assistant mask not found in tensordict at key {assistant_mask_key}. {post_msg}" ) - return LLMMaskedCategorical( + dist = LLMMaskedCategorical( logits=logits, mask=assistant_mask, ) + if not dist._position_level_masking: + raise ValueError( + "Assistant mask is not a position-level mask. " + "This is likely because the assistant mask is not a position-level mask." + ) + return dist def _get_dist_with_attention_mask( self, @@ -716,10 +734,16 @@ def _get_dist_with_attention_mask( f"Attention mask not found in tensordict at key {attention_mask_key}" ) - return LLMMaskedCategorical( + dist = LLMMaskedCategorical( logits=logits, mask=attention_mask, ) + if not dist._position_level_masking: + raise ValueError( + "Attention mask is not a position-level mask. " + "This is likely because the attention mask is not a position-level mask." + ) + return dist def _get_dist_with_custom_mask( self, @@ -756,10 +780,16 @@ def _get_dist_with_custom_mask( if logits is None: raise ValueError(f"Logits not found in tensordict at key {logits_key}") - return LLMMaskedCategorical( + dist = LLMMaskedCategorical( logits=logits, mask=mask, ) + if not dist._position_level_masking: + raise ValueError( + "Custom mask is not a position-level mask. " + "This is likely because the custom mask is not a position-level mask." + ) + return dist # Convenience methods for common LLM training scenarios def _get_sft_dist(self, tensordict: TensorDictBase, **kwargs) -> D.Distribution: diff --git a/torchrl/objectives/llm/grpo.py b/torchrl/objectives/llm/grpo.py index f6e4b105589..b415c08c6c7 100644 --- a/torchrl/objectives/llm/grpo.py +++ b/torchrl/objectives/llm/grpo.py @@ -477,7 +477,9 @@ def forward(self, tensordict: TensorDictBase) -> GRPOLossOutput: def _inv_call(self, tensordict: TensorDictBase) -> TensorDictBase: if self.verbose: - torchrl_logger.info(f"Invoking MCAdvantage.\nData size: {tensordict.shape}.\nCurrent queue size: {len(self.queues)}.\nTotal queue content: {sum(len(q) for q in self.queues.values())}") + torchrl_logger.info( + f"Invoking MCAdvantage.\nData size: {tensordict.shape}.\nCurrent queue size: {len(self.queues)}.\nTotal queue content: {sum(len(q) for q in self.queues.values())}" + ) # Tensordict can be any number of dims, but it must contain entire trajectories if tensordict.ndim == 1: # Check how many done states we have From eafeeffe787ad2737e012811f8845dd4ecd5afe7 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 13:10:22 +0100 Subject: [PATCH 22/28] amend --- sota-implementations/grpo/grpo-sync.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index e75971a2e2f..342dbec8290 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -205,9 +205,6 @@ def train( with timeit("forward_pass"): # Forward pass with mixed precision with autocast("cuda", enabled=cfg.train.mixed_precision): - torchrl_logger.info( - f"Batch for forward pass: {batch.unbind(0)}" - ) loss = loss_fn(batch) loss_val = ( loss.mean(reduce=True) From 0e4f38b0920f0c6507da4fd53b0fbbb21285c110 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 13:28:29 +0100 Subject: [PATCH 23/28] amend --- torchrl/envs/llm/datasets/ifeval.py | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/torchrl/envs/llm/datasets/ifeval.py b/torchrl/envs/llm/datasets/ifeval.py index 856189644b7..0e23846f4aa 100644 --- a/torchrl/envs/llm/datasets/ifeval.py +++ b/torchrl/envs/llm/datasets/ifeval.py @@ -7,6 +7,7 @@ from typing import Any, Callable, Literal import torch +import transformers from tensordict import NonTensorData, NonTensorStack, TensorClass, TensorDict from torchrl.data import Composite, NonTensor, Unbounded from torchrl.envs import StepCounter @@ -190,11 +191,29 @@ class IFEvalEnv(DatasetChatEnv): """ - SYSTEM_PROMPT = """A conversation between User and Assistant. -You are tasked with responding to user queries in a very specific format. -When given a task or question, first think through the problem and provide your thought process between and tags. -Then, give your final answer or response between and tags. -You will be assessed by the content of the answer block only, so make sure it contains all the required information, and only that.""" + SYSTEM_PROMPT = """You are a helpful AI assistant that follows instructions extremely well. + +IMPORTANT: You must respond in a specific format for every task: + +1. First, think through the problem step by step and write your reasoning between and tags +2. Then, provide your final answer between and tags + +CRITICAL RULES: +- ALWAYS use ... and ... tags exactly as shown +- Do NOT use , , or any other tag variations +- Your section will be evaluated, so make it complete and accurate +- Follow ALL specific requirements in the user's request (formatting, content, etc.) +- If the user asks for placeholders like [restaurant], include them exactly as requested +- Pay attention to capitalization, punctuation, and other formatting requirements + +Example format: + +I need to analyze what the user is asking for... +[Your reasoning here] + + +[Your final answer here, following all user requirements] +""" def __init__( self, From 0dce8c8e65738ae111a74fab29212c33a293233d Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 14:29:49 +0100 Subject: [PATCH 24/28] amend --- sota-implementations/grpo/grpo_utils.py | 4 +- torchrl/envs/llm/transforms/reason.py | 59 +++++++++++++++---------- 2 files changed, 37 insertions(+), 26 deletions(-) diff --git a/sota-implementations/grpo/grpo_utils.py b/sota-implementations/grpo/grpo_utils.py index 723c57690e4..c339550871d 100644 --- a/sota-implementations/grpo/grpo_utils.py +++ b/sota-implementations/grpo/grpo_utils.py @@ -553,8 +553,8 @@ def make_env(cfg: DictConfig, devices: list[int] | None = None): <= reward_threshold and td["step_count"] < max_steps, role="assistant", - edit_last_turn=True, - zero_reward=True, + edit_last_turn=False, + zero_reward=False, undo_done=True, random_prompt=True, ), diff --git a/torchrl/envs/llm/transforms/reason.py b/torchrl/envs/llm/transforms/reason.py index 0db8aa1cdbd..eb082295d8e 100644 --- a/torchrl/envs/llm/transforms/reason.py +++ b/torchrl/envs/llm/transforms/reason.py @@ -226,16 +226,18 @@ def _step( return next_tensordict def _replace_answer_with_prompt(self, content: str) -> str: - """Replace the answer section with a thinking prompt. + """Replace the last answer section with a thinking prompt. - This method uses regex to find and replace the ... section + This method uses regex to find and replace the last ... section with the thinking prompt, preserving any content before the answer tag. + Only the last answer block is replaced to avoid interfering with earlier + examples or instructions that might contain answer tags. Args: content: The original content string Returns: - The modified content with the answer replaced by the thinking prompt + The modified content with the last answer replaced by the thinking prompt """ # Pattern to match ... with optional EOS token # Use non-greedy matching and be more specific about the end @@ -243,31 +245,40 @@ def _replace_answer_with_prompt(self, content: str) -> str: # Check if there's an answer tag if "" in content: - # Replace the answer section with the thinking prompt - prompt = self.prompt - - # Replace the answer section, but preserve the EOS token if it exists - modified_content = re.sub(answer_pattern, prompt, content, flags=re.DOTALL) - - # Clean up any trailing whitespace - modified_content = modified_content.rstrip() - - # Ensure we end with the EOS token if the original content had it - if content.endswith("<|im_end|>"): - modified_content = modified_content.rstrip() + "<|im_end|>" - - # Ensure proper spacing around the prompt - if not modified_content.endswith(prompt): - # If the prompt wasn't properly inserted, append it - modified_content = content.rstrip() - if modified_content.endswith("<|im_end|>"): - modified_content = modified_content[: -len("<|im_end|>")].rstrip() - modified_content = modified_content + "\n\n" + prompt + "<|im_end|>" + # Find all matches to get the last one + matches = list(re.finditer(answer_pattern, content, flags=re.DOTALL)) + + if matches: + # Get the last match + last_match = matches[-1] + start, end = last_match.span() + + # Replace only the last answer section with the thinking prompt + prompt = self.prompt + modified_content = content[:start] + prompt + content[end:] + + # Clean up any trailing whitespace + modified_content = modified_content.rstrip() + + # Ensure we end with the EOS token if the original content had it + if content.endswith("<|im_end|>"): + modified_content = modified_content.rstrip() + "<|im_end|>" + + # Ensure proper spacing around the prompt + if not modified_content.endswith(prompt): + # If the prompt wasn't properly inserted, append it + modified_content = content.rstrip() + if modified_content.endswith("<|im_end|>"): + modified_content = modified_content[: -len("<|im_end|>")].rstrip() + modified_content = modified_content + "\n\n" + prompt + "<|im_end|>" + else: + # No matches found, just append the prompt + prompt = self.prompt + modified_content = content.rstrip() + "\n\n" + prompt else: # No answer tag found, just append the prompt prompt = self.prompt - modified_content = content.rstrip() + "\n\n" + prompt return modified_content From 83afe7f1058382d8f0f8d4474e1269ea1905a9e3 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 17:17:28 +0100 Subject: [PATCH 25/28] amend --- torchrl/envs/llm/reward/ifeval/_scorer.py | 114 ++++++++++++---------- 1 file changed, 61 insertions(+), 53 deletions(-) diff --git a/torchrl/envs/llm/reward/ifeval/_scorer.py b/torchrl/envs/llm/reward/ifeval/_scorer.py index f0a9a3a2467..79499c2f8d8 100644 --- a/torchrl/envs/llm/reward/ifeval/_scorer.py +++ b/torchrl/envs/llm/reward/ifeval/_scorer.py @@ -122,7 +122,7 @@ def _process_results( inp = _InputExample( key=data["key"], instruction_id_list=data["instruction_id_list"], - prompt=prompt, + prompt=prompt if prompt is not None else "", kwargs=data["kwargs"], ) @@ -230,7 +230,7 @@ def default_reward_aggregator( answer_blocks: list[str] | None = None, complete: bool | torch.Tensor | None = None, ) -> torch.Tensor: - r"""Default reward aggregation function that provides a more nuanced scoring system. + r"""Improved reward aggregation function with tiered multiplicative scoring. Args: score (IFEvalScoreData): The score data. @@ -238,79 +238,87 @@ def default_reward_aggregator( answer_blocks (list[str], optional): The list of answer blocks. complete (bool, optional): Whether the response is complete (ends with a eos token). - The reward is composed of three main components: - 1. Format score (max 1.0): - - prompt_level_strict_acc: 0.4 (highest weight for strict adherence to all instructions) - - inst_level_strict_acc: 0.3 (high weight for strict adherence to individual instructions) - - prompt_level_loose_acc: 0.2 (medium weight for loose adherence to all instructions) - - inst_level_loose_acc: 0.1 (lowest weight for loose adherence to individual instructions) - All instruction-level metrics are averaged to ensure balanced contribution. + The reward uses a tiered multiplicative system: - 2. Structure score (max 1.0): - - think_block: 0.5 (presence of exactly one think block) - - answer_block: 0.5 (presence of exactly one answer block) + 1. Critical failure check: No answer blocks = 0 reward + 2. Base format score (0-1): Weighted average of format metrics + 3. Structure multiplier (0.1-1.0): Penalties for missing/multiple blocks + 4. Quality bonus (0-0.5): Rewards for high quality and completion + 5. Task complexity scaling: More requirements = higher potential rewards - 3. Completion bonus (max 0.2): - - complete: 0.2 (response ends with eos token) + The final formula is: + reward = (format_score + quality_bonus) * structure_multiplier * complexity_scale - The overall formula for the reward is: + This provides better learning signals by: + - Requiring critical elements (answer tags) for meaningful rewards + - Using multiplicative scaling to reward doing everything well + - Scaling rewards based on task complexity + - Providing clear failure modes and success incentives - .. math:: - - reward = format\_score + structure\_score + completion\_bonus - - Therefore, the maximum value the reward can take is 2.2, with: - - 1.0 from format adherence - - 1.0 from structural elements (think/answer blocks) - - 0.2 from completion bonus + Reward range: 0.0 to ~1.5-2.7 depending on task complexity (more instructions = higher max reward). """ default_dtype = torch.get_default_dtype() score = score.to(default_dtype) - # Format score calculation - using mean for instruction-level metrics + # Critical failure check - no answer = no reward + if not answer_blocks: + return torch.zeros(score.batch_size + (1,), device=score.device, dtype=default_dtype) + + # Base format score calculation (0-1) format_components = torch.stack( [ - score.prompt_level_strict_acc.sum(-1, keepdim=True), # Single value - score.inst_level_strict_acc.mean( - -1, keepdim=True - ), # Average across instructions - score.prompt_level_loose_acc.sum(-1, keepdim=True), # Single value - score.inst_level_loose_acc.mean( - -1, keepdim=True - ), # Average across instructions + score.prompt_level_strict_acc.sum(-1, keepdim=True) if score.prompt_level_strict_acc is not None else torch.zeros(score.batch_size + (1,), device=score.device, dtype=default_dtype), # Single value + score.inst_level_strict_acc.mean(-1, keepdim=True) if score.inst_level_strict_acc is not None else torch.zeros(score.batch_size + (1,), device=score.device, dtype=default_dtype), # Average across instructions + score.prompt_level_loose_acc.sum(-1, keepdim=True) if score.prompt_level_loose_acc is not None else torch.zeros(score.batch_size + (1,), device=score.device, dtype=default_dtype), # Single value + score.inst_level_loose_acc.mean(-1, keepdim=True) if score.inst_level_loose_acc is not None else torch.zeros(score.batch_size + (1,), device=score.device, dtype=default_dtype), # Average across instructions ], -1, ) weights = torch.tensor( self.format_weights, device=format_components.device, - dtype=torch.get_default_dtype(), + dtype=default_dtype, ) format_score = (format_components * weights).sum(dim=-1, keepdim=True) - # Structure score calculation - if think_blocks is not None: - think_score = float(len(think_blocks) == 1) * 0.5 - else: - think_score = 0.0 - - if answer_blocks is not None: - answer_score = float(len(answer_blocks) == 1) * 0.5 - else: - answer_score = 0.0 - - structure_score = think_score + answer_score - + # Structure multiplier (0.1-1.0) + structure_multiplier = 1.0 + + # Heavy penalty for missing think blocks (but not zero) + if not think_blocks: + structure_multiplier *= 0.3 + elif len(think_blocks) > 1: + structure_multiplier *= 0.7 # Penalty for multiple think blocks + + # Penalty for multiple answer blocks + if len(answer_blocks) > 1: + structure_multiplier *= 0.7 + + # Quality bonus (0-0.5) + quality_bonus = torch.zeros_like(format_score) + + # Bonus for high quality responses + if format_score > 0.8: + quality_bonus += 0.3 + # Completion bonus - if complete is None: - completion_bonus = 0.0 - elif isinstance(complete, torch.Tensor): - completion_bonus = complete.to(default_dtype) * 0.2 + if complete is not None: + if isinstance(complete, torch.Tensor): + completion_bonus = complete.to(default_dtype) * 0.2 + else: + completion_bonus = float(complete) * 0.2 + quality_bonus += completion_bonus + + # Task complexity scaling based on number of instructions + # More instructions = higher potential rewards + if score.inst_level_strict_acc is not None and score.inst_level_strict_acc.numel() > 0: + num_instructions = score.inst_level_strict_acc.shape[-1] else: - completion_bonus = float(complete) * 0.2 + num_instructions = 1 + complexity_scale = 1.0 + (num_instructions - 1) * 0.2 # 1.0 for 1 instruction, 1.2 for 2, etc. - # Combine all components - final_reward = format_score + structure_score + completion_bonus + # Final reward: (format + quality) * structure_multiplier * complexity_scale + final_reward = (format_score + quality_bonus) * structure_multiplier * complexity_scale final_reward = final_reward.to(default_dtype) return final_reward From c7d7014deb3c1c07c063b20bca532f2d608f7e49 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 21:43:40 +0100 Subject: [PATCH 26/28] amend --- torchrl/collectors/collectors.py | 2 +- torchrl/envs/llm/reward/ifeval/_scorer.py | 49 +++++++++++++++++------ torchrl/envs/llm/transforms/reason.py | 14 ++++--- 3 files changed, 46 insertions(+), 19 deletions(-) diff --git a/torchrl/collectors/collectors.py b/torchrl/collectors/collectors.py index 38cab4d90c3..bf255f40d78 100644 --- a/torchrl/collectors/collectors.py +++ b/torchrl/collectors/collectors.py @@ -198,7 +198,7 @@ def _get_policy_and_device( env = getattr(self, "env", None) policy = _make_compatible_policy( policy, - self.env.observation_spec, + getattr(env, "observation_spec", None), env=env, env_maker=env_maker, env_maker_kwargs=env_maker_kwargs, diff --git a/torchrl/envs/llm/reward/ifeval/_scorer.py b/torchrl/envs/llm/reward/ifeval/_scorer.py index 79499c2f8d8..40830b1dc84 100644 --- a/torchrl/envs/llm/reward/ifeval/_scorer.py +++ b/torchrl/envs/llm/reward/ifeval/_scorer.py @@ -262,15 +262,33 @@ def default_reward_aggregator( # Critical failure check - no answer = no reward if not answer_blocks: - return torch.zeros(score.batch_size + (1,), device=score.device, dtype=default_dtype) + return torch.zeros( + score.batch_size + (1,), device=score.device, dtype=default_dtype + ) # Base format score calculation (0-1) format_components = torch.stack( [ - score.prompt_level_strict_acc.sum(-1, keepdim=True) if score.prompt_level_strict_acc is not None else torch.zeros(score.batch_size + (1,), device=score.device, dtype=default_dtype), # Single value - score.inst_level_strict_acc.mean(-1, keepdim=True) if score.inst_level_strict_acc is not None else torch.zeros(score.batch_size + (1,), device=score.device, dtype=default_dtype), # Average across instructions - score.prompt_level_loose_acc.sum(-1, keepdim=True) if score.prompt_level_loose_acc is not None else torch.zeros(score.batch_size + (1,), device=score.device, dtype=default_dtype), # Single value - score.inst_level_loose_acc.mean(-1, keepdim=True) if score.inst_level_loose_acc is not None else torch.zeros(score.batch_size + (1,), device=score.device, dtype=default_dtype), # Average across instructions + score.prompt_level_strict_acc.sum(-1, keepdim=True) + if score.prompt_level_strict_acc is not None + else torch.zeros( + score.batch_size + (1,), device=score.device, dtype=default_dtype + ), # Single value + score.inst_level_strict_acc.mean(-1, keepdim=True) + if score.inst_level_strict_acc is not None + else torch.zeros( + score.batch_size + (1,), device=score.device, dtype=default_dtype + ), # Average across instructions + score.prompt_level_loose_acc.sum(-1, keepdim=True) + if score.prompt_level_loose_acc is not None + else torch.zeros( + score.batch_size + (1,), device=score.device, dtype=default_dtype + ), # Single value + score.inst_level_loose_acc.mean(-1, keepdim=True) + if score.inst_level_loose_acc is not None + else torch.zeros( + score.batch_size + (1,), device=score.device, dtype=default_dtype + ), # Average across instructions ], -1, ) @@ -283,24 +301,24 @@ def default_reward_aggregator( # Structure multiplier (0.1-1.0) structure_multiplier = 1.0 - + # Heavy penalty for missing think blocks (but not zero) if not think_blocks: structure_multiplier *= 0.3 elif len(think_blocks) > 1: structure_multiplier *= 0.7 # Penalty for multiple think blocks - + # Penalty for multiple answer blocks if len(answer_blocks) > 1: structure_multiplier *= 0.7 # Quality bonus (0-0.5) quality_bonus = torch.zeros_like(format_score) - + # Bonus for high quality responses if format_score > 0.8: quality_bonus += 0.3 - + # Completion bonus if complete is not None: if isinstance(complete, torch.Tensor): @@ -311,14 +329,21 @@ def default_reward_aggregator( # Task complexity scaling based on number of instructions # More instructions = higher potential rewards - if score.inst_level_strict_acc is not None and score.inst_level_strict_acc.numel() > 0: + if ( + score.inst_level_strict_acc is not None + and score.inst_level_strict_acc.numel() > 0 + ): num_instructions = score.inst_level_strict_acc.shape[-1] else: num_instructions = 1 - complexity_scale = 1.0 + (num_instructions - 1) * 0.2 # 1.0 for 1 instruction, 1.2 for 2, etc. + complexity_scale = ( + 1.0 + (num_instructions - 1) * 0.2 + ) # 1.0 for 1 instruction, 1.2 for 2, etc. # Final reward: (format + quality) * structure_multiplier * complexity_scale - final_reward = (format_score + quality_bonus) * structure_multiplier * complexity_scale + final_reward = ( + (format_score + quality_bonus) * structure_multiplier * complexity_scale + ) final_reward = final_reward.to(default_dtype) return final_reward diff --git a/torchrl/envs/llm/transforms/reason.py b/torchrl/envs/llm/transforms/reason.py index eb082295d8e..fad26cfa689 100644 --- a/torchrl/envs/llm/transforms/reason.py +++ b/torchrl/envs/llm/transforms/reason.py @@ -247,29 +247,31 @@ def _replace_answer_with_prompt(self, content: str) -> str: if "" in content: # Find all matches to get the last one matches = list(re.finditer(answer_pattern, content, flags=re.DOTALL)) - + if matches: # Get the last match last_match = matches[-1] start, end = last_match.span() - + # Replace only the last answer section with the thinking prompt prompt = self.prompt modified_content = content[:start] + prompt + content[end:] - + # Clean up any trailing whitespace modified_content = modified_content.rstrip() - + # Ensure we end with the EOS token if the original content had it if content.endswith("<|im_end|>"): modified_content = modified_content.rstrip() + "<|im_end|>" - + # Ensure proper spacing around the prompt if not modified_content.endswith(prompt): # If the prompt wasn't properly inserted, append it modified_content = content.rstrip() if modified_content.endswith("<|im_end|>"): - modified_content = modified_content[: -len("<|im_end|>")].rstrip() + modified_content = modified_content[ + : -len("<|im_end|>") + ].rstrip() modified_content = modified_content + "\n\n" + prompt + "<|im_end|>" else: # No matches found, just append the prompt From 65e0f28b2818b04a584831b6585c146ea8a0578b Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 21:49:37 +0100 Subject: [PATCH 27/28] amend --- torchrl/envs/llm/transforms/kl.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/torchrl/envs/llm/transforms/kl.py b/torchrl/envs/llm/transforms/kl.py index 3123c76f362..03fd7470f50 100644 --- a/torchrl/envs/llm/transforms/kl.py +++ b/torchrl/envs/llm/transforms/kl.py @@ -675,8 +675,7 @@ class RetrieveKL(Compose): Keyword Args: assistant_only (bool): whether to only retrieve the log-probs of the assistant tokens (i.e., steps of history - where the role is `"assistant"`). Defaults to `None` (takes the opposite value from the `gen_model` and `ref_model` if they match, as - selection needs to happen only once, or `False` if not specified within the models). + where the role is `"assistant"`). Defaults to `True`. .. note:: When `assistant_only=True`, both models must have `input_mode='history'` to properly identify assistant tokens. For other input modes (`"text"` or `"tokens"`), set `assistant_only=False`. @@ -787,7 +786,7 @@ def __init__( gen_model: LLMWrapperBase | Literal["from_collector"] = "from_collector", ref_model: LLMWrapperBase | None = None, *, - assistant_only: bool | None = None, + assistant_only: bool | None = True, history_key: str = "history", tokenizer_kwargs: dict[str, Any] | None = None, detach: bool = True, From cc647b2c35adcdd9949b7345e31c4a0cba322a96 Mon Sep 17 00:00:00 2001 From: vmoens Date: Tue, 8 Jul 2025 21:58:56 +0100 Subject: [PATCH 28/28] amend --- sota-implementations/grpo/config/grpo_gsm8k.yaml | 4 ++++ sota-implementations/grpo/grpo-sync.py | 2 +- sota-implementations/grpo/grpo_utils.py | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/sota-implementations/grpo/config/grpo_gsm8k.yaml b/sota-implementations/grpo/config/grpo_gsm8k.yaml index 5f76992017c..3178f077f04 100644 --- a/sota-implementations/grpo/config/grpo_gsm8k.yaml +++ b/sota-implementations/grpo/config/grpo_gsm8k.yaml @@ -17,6 +17,10 @@ env: reasoning: false # Maximum number of dialog turns per episode. max_steps: 2 + # Whether to group repeated samples together. Grouping will make all the answers to a single prompt to be written + # together, whereas a value of false will group multiple prompts in the buffer at a given time. + # Batches are usually bigger with group_repeats=false. + group_repeats: false # Base model configuration model: diff --git a/sota-implementations/grpo/grpo-sync.py b/sota-implementations/grpo/grpo-sync.py index 342dbec8290..2b5363f4c91 100644 --- a/sota-implementations/grpo/grpo-sync.py +++ b/sota-implementations/grpo/grpo-sync.py @@ -172,7 +172,7 @@ def train( # Wait for the replay buffer to be filled - when reasoning, we collect trajectories # so the buffer may not be filled straight away if not len(replay_buffer): - torchrl_logger.info(f"Waiting for replay buffer to be filled") + torchrl_logger.info("Waiting for replay buffer to be filled") continue else: torchrl_logger.info(f"Replay buffer filled: {len(replay_buffer)}") diff --git a/sota-implementations/grpo/grpo_utils.py b/sota-implementations/grpo/grpo_utils.py index c339550871d..fe73d9ca8c1 100644 --- a/sota-implementations/grpo/grpo_utils.py +++ b/sota-implementations/grpo/grpo_utils.py @@ -530,6 +530,7 @@ def make_env(cfg: DictConfig, devices: list[int] | None = None): tokenizer=train_tokenizer, num_envs=cfg.env.num_envs, max_steps=max_steps, + group_repeats=cfg.env.group_repeats, device=torch.device("cuda:0") if devices is not None else None, ) elif cfg.env.dataset == "ifeval": # ifeval @@ -540,6 +541,7 @@ def make_env(cfg: DictConfig, devices: list[int] | None = None): tokenizer=train_tokenizer, num_envs=cfg.env.num_envs, max_steps=max_steps, + group_repeats=cfg.env.group_repeats, device=torch.device("cuda:0") if devices is not None else None, ) else: