From 940ffc96f7214bca24aa77479bc7c33900aaef28 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Sun, 24 Mar 2024 16:17:26 +0100 Subject: [PATCH 01/37] bump: `lightning ==2.3.0.dev20240324` (#1185) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 907887b074..6fa093bd05 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ license = { file = "LICENSE" } dependencies = [ "torch>=2.2.0", - "lightning==2.3.0.dev20240318", + "lightning==2.3.0.dev20240324", "jsonargparse[signatures]>=4.27.6", ] From 9023c452cd754c27b021868a49d312ba79cb35ec Mon Sep 17 00:00:00 2001 From: Andrei-Aksionov <58434077+Andrei-Aksionov@users.noreply.github.com> Date: Sun, 24 Mar 2024 23:39:37 +0300 Subject: [PATCH 02/37] Automated formatting (#1180) --- README.md | 1 - config_hub/finetune/README.md | 2 +- extensions/thunder/README.md | 8 ++++---- extensions/thunder/unsloth/pretrain.py | 3 +-- litgpt/data/json_data.py | 4 ++-- tests/data/fixtures/alpaca.json | 2 +- tests/data/fixtures/longform_train.json | 2 +- tests/data/fixtures/longform_val.json | 2 +- tests/test_thunder_fsdp.py | 3 +-- tests/test_unsloth_executor.py | 6 +++--- tutorials/convert_hf_checkpoint.md | 7 +++---- tutorials/convert_lit_models.md | 4 ++-- tutorials/download_model_weights.md | 4 ++-- tutorials/prepare_dataset.md | 4 ++-- 14 files changed, 24 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index cfea1a2155..57b28b4108 100644 --- a/README.md +++ b/README.md @@ -401,4 +401,3 @@ If you use LitGPT in your research, please cite the following work: ## License LitGPT is released under the [Apache 2.0](https://github.com/Lightning-AI/litgpt/blob/main/LICENSE) license. - diff --git a/config_hub/finetune/README.md b/config_hub/finetune/README.md index 8aed153105..aa160472d5 100644 --- a/config_hub/finetune/README.md +++ b/config_hub/finetune/README.md @@ -1,6 +1,6 @@ ## Config files -The table below lists the performances you can expect from the provided config files. Note that you can achieve lower memory consumption by lowering the micro batch size as needed. In addition, you can lower the rank (`lora_r`) in the LoRA configuration files and disable LoRA for certain layers (for example, setting `lora_projection` and other LoRA layer-specific parameters to `false`). +The table below lists the performances you can expect from the provided config files. Note that you can achieve lower memory consumption by lowering the micro batch size as needed. In addition, you can lower the rank (`lora_r`) in the LoRA configuration files and disable LoRA for certain layers (for example, setting `lora_projection` and other LoRA layer-specific parameters to `false`). For more information, see the [Dealing with out-of-memory (OOM) errors](../../tutorials/oom.md) on lowering the memory requirements.   diff --git a/extensions/thunder/README.md b/extensions/thunder/README.md index e1e5f7bdb7..b84da74e5f 100644 --- a/extensions/thunder/README.md +++ b/extensions/thunder/README.md @@ -40,7 +40,7 @@ print(forward_trace) @torch.no_grad() @no_autocast() def augmented_forward_fn(*args): - # args: "Collection" + # args: "Collection" t0, \ t1, \ t2, \ @@ -245,8 +245,8 @@ print(backward_trace) @torch.no_grad() @no_autocast() def backward_fn(saved_for_backward, cotangents): - # saved_for_backward: "Collection" - # cotangents: "Collection" + # saved_for_backward: "Collection" + # cotangents: "Collection" C0, \ C1, \ = saved_for_backward @@ -528,7 +528,7 @@ We provide ready-to-use Fabric strategies that integrate Thunder DDP|FSDP. Under ```python model = thunder.distributed.ddp(model) -# or +# or # model = thunder.distributed.fsdp(model) model = thunder.jit(model) diff --git a/extensions/thunder/unsloth/pretrain.py b/extensions/thunder/unsloth/pretrain.py index 3526dd498b..3bb0166d38 100644 --- a/extensions/thunder/unsloth/pretrain.py +++ b/extensions/thunder/unsloth/pretrain.py @@ -439,11 +439,10 @@ def validate_args(train: TrainArgs, eval: EvalArgs, initial_checkpoint_dir, resu def jit(fn: Callable) -> Any: import thunder + from executor import unsloth_ex from thunder.executors.sdpaex import sdpa_ex from thunder.executors.torch_compile import torch_compile_executor - from executor import unsloth_ex - return thunder.jit( fn, executors=[sdpa_ex, unsloth_ex, torch_compile_executor, thunder.nvfuser_executor, thunder.pytorch_executor] ) diff --git a/litgpt/data/json_data.py b/litgpt/data/json_data.py index 8ce5b63368..541678b93f 100644 --- a/litgpt/data/json_data.py +++ b/litgpt/data/json_data.py @@ -18,8 +18,8 @@ class JSON(DataModule): """Loads JSON or JSONL data for supervised finetuning.""" json_path: Path - """A path to a JSON file or a directory with `train.json` and `val.json` containing the data. - The file(s) should contain a list of samples (dicts). Each dict must have the keys 'instruction' and 'output', + """A path to a JSON file or a directory with `train.json` and `val.json` containing the data. + The file(s) should contain a list of samples (dicts). Each dict must have the keys 'instruction' and 'output', and can optionally have a key 'input' (see Alpaca).""" mask_prompt: bool = False """Whether to mask the prompt section from the label (with ``ignore_index``).""" diff --git a/tests/data/fixtures/alpaca.json b/tests/data/fixtures/alpaca.json index 10f50be6cb..a2f3b56649 100644 --- a/tests/data/fixtures/alpaca.json +++ b/tests/data/fixtures/alpaca.json @@ -114,4 +114,4 @@ "input": "Recording about the history of Waterloo", "output": "The Battle of Waterloo, fought on June 18, 1815, was the last major battle of the Napoleonic Wars. It marked the end of Napoleon Bonaparte's rule. The battle was fought between Napoleon Bonaparte's French forces and an allied force of British, Dutch, Belgian and Prussian troops, led by the Duke of Wellington. It was fought near the town of Waterloo in the former Austrian Netherlands, now present-day Belgium. The battle was a defeat for Napoleon's forces and ended Napoleon's rule over France. Napoleon himself was exiled to St. Helena, where he would die in 1821. The battle marked the end of French domination of Europe and the beginning of a period of peace in the region. It was also the last major battle of the Napoleonic Wars and is remembered as one of the most important battles in history. The victory of the allied forces at Waterloo helped to bring an end to French imperial ambitions in Europe. It also established the British as one of the most powerful nations in Europe. The battle was fought with a combination of infantry, cavalry, and artillery tactics, and showed the beginning of the changing nature of warfare. Although the French forces greatly outnumbered the allies, their strategic and tactical mistake led to the loss of the battle. This defeat signaled the end of French imperial power in Europe. The legacy of Waterloo still stands today, and it cemented the British Empire's position for the next one hundred years." } -] \ No newline at end of file +] diff --git a/tests/data/fixtures/longform_train.json b/tests/data/fixtures/longform_train.json index 457fb8173a..7c67fb3972 100644 --- a/tests/data/fixtures/longform_train.json +++ b/tests/data/fixtures/longform_train.json @@ -101,4 +101,4 @@ "source": "Wikipedia", "subset": "chatbot" } -] \ No newline at end of file +] diff --git a/tests/data/fixtures/longform_val.json b/tests/data/fixtures/longform_val.json index dd2f94e1ec..f08c02f3e7 100644 --- a/tests/data/fixtures/longform_val.json +++ b/tests/data/fixtures/longform_val.json @@ -53,4 +53,4 @@ "source": "Natural Instructions", "subset": "task1658 billsum summarization" } -] \ No newline at end of file +] diff --git a/tests/test_thunder_fsdp.py b/tests/test_thunder_fsdp.py index fed938aba6..76dc36bae6 100644 --- a/tests/test_thunder_fsdp.py +++ b/tests/test_thunder_fsdp.py @@ -6,11 +6,10 @@ import pytest import torch +from conftest import RunIf from lightning.fabric import Fabric from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_3 -from conftest import RunIf - # support running without installing as a package wd = Path(__file__).parent.parent.resolve() sys.path.append(str(wd)) diff --git a/tests/test_unsloth_executor.py b/tests/test_unsloth_executor.py index 3ce6907ffd..797d1f6f53 100644 --- a/tests/test_unsloth_executor.py +++ b/tests/test_unsloth_executor.py @@ -3,8 +3,8 @@ from conftest import RunIf from litgpt import GPT, Config -from litgpt.utils import chunked_cross_entropy from litgpt.model import apply_rope, build_rope_cache +from litgpt.utils import chunked_cross_entropy @RunIf(min_cuda_gpus=1, thunder=True) @@ -85,9 +85,9 @@ def test_unsloth_swiglu(): import thunder from thunder.core.transforms import grad - from extensions.thunder.unsloth.executor import unsloth_ex, ThunderLLaMAMLP - from litgpt.model import LLaMAMLP + from extensions.thunder.unsloth.executor import ThunderLLaMAMLP, unsloth_ex from litgpt import Config + from litgpt.model import LLaMAMLP config = Config.from_name("Llama-2-7b-hf") with torch.device("cuda"): diff --git a/tutorials/convert_hf_checkpoint.md b/tutorials/convert_hf_checkpoint.md index 39bbc9823d..7081ae7c46 100644 --- a/tutorials/convert_hf_checkpoint.md +++ b/tutorials/convert_hf_checkpoint.md @@ -26,13 +26,13 @@ checkpoints/ To disable the automatic conversion, which is useful for development and debugging purposes, you can run the `litgpt/scripts/download.py` with the `--convert_checkpoint false` flag. This will only download the checkpoint files but do not convert them for use in LitGPT: ```bash -rm -rf checkpoints/EleutherAI/pythia-14m +rm -rf checkpoints/EleutherAI/pythia-14m litgpt download \ --repo_id EleutherAI/pythia-14m \ --convert_checkpoint false - -ls checkpoints/EleutherAI/pythia-14m + +ls checkpoints/EleutherAI/pythia-14m ``` ``` @@ -52,4 +52,3 @@ The required files `model_config.yaml` and `lit_model.pth` files can then be man litgpt convert to_litgpt \ --checkpoint_dir checkpoints/EleutherAI/pythia-14m ``` - diff --git a/tutorials/convert_lit_models.md b/tutorials/convert_lit_models.md index 3525e8a40e..301abfc161 100644 --- a/tutorials/convert_lit_models.md +++ b/tutorials/convert_lit_models.md @@ -66,7 +66,7 @@ For convenience, we first specify an environment variable (optional) to avoid co export repo_id=TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T ``` -Instead of using TinyLlama, you can replace the `repo_id` target with any other model repository +Instead of using TinyLlama, you can replace the `repo_id` target with any other model repository specifier that is currently supported by LitGPT. You can get a list of supported repository specifier by running `litgpt/scripts/download.py` without any additional arguments. @@ -147,4 +147,4 @@ lm_eval --model hf \ --tasks "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge" \ --device "cuda:0" \ --batch_size 4 -``` \ No newline at end of file +``` diff --git a/tutorials/download_model_weights.md b/tutorials/download_model_weights.md index a058619046..ad42c044f6 100644 --- a/tutorials/download_model_weights.md +++ b/tutorials/download_model_weights.md @@ -209,7 +209,7 @@ litgpt chat --checkpoint_dir checkpoints/$repo_id   ## Specific Models -Note that certain models require that you've been granted access to the weights on the Hugging Face Hub. +Note that certain models require that you've been granted access to the weights on the Hugging Face Hub. For example, to get access to the Gemma 2B model, you can do so by following the steps at https://huggingface.co/google/gemma-2b. After access is granted, you can find your HF hub token in https://huggingface.co/settings/tokens. @@ -249,7 +249,7 @@ litgpt download \   ## Converting Checkpoints Manually -For development purposes, for example, when adding or experimenting with new model configurations, it may be beneficial to split the weight download and model conversion into two separate steps. +For development purposes, for example, when adding or experimenting with new model configurations, it may be beneficial to split the weight download and model conversion into two separate steps. You can do this by passing the `--convert_checkpoint false` option to the download script: diff --git a/tutorials/prepare_dataset.md b/tutorials/prepare_dataset.md index 51df8b020c..2cb63ecee6 100644 --- a/tutorials/prepare_dataset.md +++ b/tutorials/prepare_dataset.md @@ -80,7 +80,7 @@ For comparison, the Falcon 7B model requires 23.52 GB of memory for the original ### Alpaca-GPT4 -The Alpaca-GPT4 was built by using the prompts of the original Alpaca dataset and generate the responses via GPT 4. The +The Alpaca-GPT4 was built by using the prompts of the original Alpaca dataset and generate the responses via GPT 4. The dataset consists of 52,000 instructions and responses. The original [Alpaca-GPT4](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) dataset can be used as follows: @@ -131,7 +131,7 @@ litgpt finetune lora \ ### Deita -The Deita dataset (short for Data-Efficient Instruction Tuning for Alignment) is a collection of 9500 prompts and responses, as described in the [What Makes Good Data for Alignment? A Comprehensive Study of Automatic Data Selection in Instruction Tuning](https://arxiv.org/abs/2312.15685) paper. +The Deita dataset (short for Data-Efficient Instruction Tuning for Alignment) is a collection of 9500 prompts and responses, as described in the [What Makes Good Data for Alignment? A Comprehensive Study of Automatic Data Selection in Instruction Tuning](https://arxiv.org/abs/2312.15685) paper. Using Falcon 7b as an example, we can use the dataset as follows: ```bash From dc60ac2af7b6f4da1b6e854451eab9b6e8556e8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Mon, 25 Mar 2024 03:35:22 +0100 Subject: [PATCH 03/37] Update merge_lora.py docstring --- litgpt/scripts/merge_lora.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litgpt/scripts/merge_lora.py b/litgpt/scripts/merge_lora.py index 6f2e5ea588..49129bcae1 100644 --- a/litgpt/scripts/merge_lora.py +++ b/litgpt/scripts/merge_lora.py @@ -22,7 +22,7 @@ def merge_lora( Args: checkpoint_dir: Path to the checkpoint directory with trained LoRA weights, which is the output of - ``litgpt finetune --method lora``. + ``litgpt finetune lora``. pretrained_checkpoint_dir: Optional path to the checkpoint directory with the weights of the base model corresponding to the LoRA checkpoint. By default, this will automatically be inferred from the metadata in the given `checkpoint_dir` directory. Only set this if the base model's checkpoint directory From 025a66883ee1e8e118eb6cf857d39ce8fb5c50fa Mon Sep 17 00:00:00 2001 From: Andrei-Aksionov <58434077+Andrei-Aksionov@users.noreply.github.com> Date: Mon, 25 Mar 2024 06:03:09 +0300 Subject: [PATCH 04/37] GemmaMLP: add missing approximation for LoRA and AdapterV2 variants (#1178) --- litgpt/adapter_v2.py | 4 +++- litgpt/lora.py | 4 +++- tests/test_adapter_v2.py | 47 ++++++++++++++++++++++++++++++++++++++-- tests/test_lora.py | 43 ++++++++++++++++++++++++++++++++++++ 4 files changed, 94 insertions(+), 4 deletions(-) diff --git a/litgpt/adapter_v2.py b/litgpt/adapter_v2.py index d7161e3b4f..a77f2c94d5 100644 --- a/litgpt/adapter_v2.py +++ b/litgpt/adapter_v2.py @@ -181,6 +181,8 @@ def __init__(self, config: Config) -> None: self.fc_2 = AdapterV2Linear(config.n_embd, config.intermediate_size, bias=config.bias) self.proj = AdapterV2Linear(config.intermediate_size, config.n_embd, bias=config.bias) + self.config = config + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: """For compatibility with base checkpoints.""" mapping = { @@ -199,7 +201,7 @@ class GemmaMLP(LLaMAMLP): def forward(self, x: torch.Tensor) -> torch.Tensor: x_fc_1 = self.fc_1(x) x_fc_2 = self.fc_2(x) - x = torch.nn.functional.gelu(x_fc_1) * x_fc_2 + x = torch.nn.functional.gelu(x_fc_1, approximate=self.config.gelu_approximate) * x_fc_2 return self.proj(x) diff --git a/litgpt/lora.py b/litgpt/lora.py index fd54d6f771..d706bfb884 100644 --- a/litgpt/lora.py +++ b/litgpt/lora.py @@ -686,6 +686,8 @@ def __init__(self, config: Config) -> None: lora_dropout=config.lora_dropout, ) + self.config = config + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: """For compatibility with base checkpoints.""" mapping = { @@ -704,7 +706,7 @@ class GemmaMLP(LLaMAMLP): def forward(self, x: torch.Tensor) -> torch.Tensor: x_fc_1 = self.fc_1(x) x_fc_2 = self.fc_2(x) - x = torch.nn.functional.gelu(x_fc_1) * x_fc_2 + x = torch.nn.functional.gelu(x_fc_1, approximate=self.config.gelu_approximate) * x_fc_2 return self.proj(x) diff --git a/tests/test_adapter_v2.py b/tests/test_adapter_v2.py index 716428bf24..f1a12cc539 100644 --- a/tests/test_adapter_v2.py +++ b/tests/test_adapter_v2.py @@ -13,12 +13,13 @@ from lightning.fabric.plugins.precision.bitsandbytes import _BITSANDBYTES_AVAILABLE, BitsandbytesPrecision from lightning.fabric.wrappers import _FabricOptimizer from torch._dynamo.backends import debugging +from transformers.models.gemma import GemmaConfig, GemmaForCausalLM from transformers.models.mixtral import MixtralConfig, MixtralForCausalLM import litgpt.config as config_module import litgpt.finetune.adapter_v2 as module -from litgpt.adapter_v2 import GPT, Config, adapter_filter from litgpt.adapter_v2 import GPT as AdapterV2GPT +from litgpt.adapter_v2 import Config, adapter_filter from litgpt.args import EvalArgs, TrainArgs from litgpt.data import Alpaca from litgpt.model import GPT as BaseGPT @@ -195,7 +196,7 @@ def test_against_hf_mixtral(): theirs_state_dict = theirs_model.state_dict() state_dict = {} copy_weights_hf_llama(ours_config, {}, state_dict, theirs_state_dict) - ours_model = GPT(ours_config).to(device) + ours_model = AdapterV2GPT(ours_config).to(device) # strict=False because missing keys due to adapter weights not contained in state dict ours_model.load_state_dict(state_dict, strict=False) @@ -207,6 +208,48 @@ def test_against_hf_mixtral(): torch.testing.assert_close(ours_y, theirs_y) +@torch.inference_mode() +@pytest.mark.xfail(raises=AssertionError, match="Tensor-likes are not close") +@pytest.mark.parametrize("model_name", ["gemma-2b", "gemma-7b"]) +def test_against_hf_gemma(model_name): + device = torch.device("cpu") + dtype = torch.float32 + T = 5 + ours_config = Config.from_name(model_name, n_layer=2, n_head=16, n_embd=32, intermediate_size=86) + theirs_config = GemmaConfig( + vocab_size=ours_config.padded_vocab_size, + hidden_size=ours_config.n_embd, + head_dim=ours_config.head_size, + num_attention_heads=ours_config.n_head, + num_hidden_layers=ours_config.n_layer, + intermediate_size=ours_config.intermediate_size, + max_position_embeddings=T, + rms_norm_eps=ours_config.norm_eps, + num_key_value_heads=ours_config.n_query_groups, + rope_theta=ours_config.rope_base, + attention_bias=ours_config.bias, + tie_word_embeddings=True, + hidden_act="gelu_pytorch_tanh", + ) + assert ours_config.intermediate_size == theirs_config.intermediate_size + + theirs_model = GemmaForCausalLM(theirs_config).to(device) + theirs_state_dict = theirs_model.state_dict() + # Gemma weights are shipped without `lm_head.weight` + theirs_state_dict.pop("lm_head.weight") + state_dict = {} + copy_weights_hf_llama(ours_config, {}, state_dict, theirs_state_dict) + ours_model = AdapterV2GPT(ours_config).to(device) + ours_model.load_state_dict(state_dict, strict=False) + + # test end to end + x = torch.tensor([[9856, 23, 491, 1536, 304]], dtype=torch.int32, device=device) + assert x.size(1) == T + ours_y = ours_model(x) + theirs_y = theirs_model(x)["logits"].to(dtype) # HF converts logits to float + torch.testing.assert_close(ours_y, theirs_y) + + @RunIf(min_cuda_gpus=1) def test_adapter_v2_bitsandbytes(monkeypatch, tmp_path, fake_checkpoint_dir, alpaca_path): if not _BITSANDBYTES_AVAILABLE: diff --git a/tests/test_lora.py b/tests/test_lora.py index c45a4f0bf8..fd09daeeeb 100644 --- a/tests/test_lora.py +++ b/tests/test_lora.py @@ -15,6 +15,7 @@ from lightning.fabric.wrappers import _FabricOptimizer from torch._dynamo.backends import debugging from torch.nn import functional as F +from transformers.models.gemma import GemmaConfig, GemmaForCausalLM from transformers.models.mixtral import MixtralConfig, MixtralForCausalLM import litgpt.config as config_module @@ -554,6 +555,48 @@ def test_against_hf_mixtral(): torch.testing.assert_close(ours_y, theirs_y) +@torch.inference_mode() +@pytest.mark.xfail(raises=AssertionError, match="Tensor-likes are not close") +@pytest.mark.parametrize("model_name", ["gemma-2b", "gemma-7b"]) +def test_against_hf_gemma(model_name): + device = torch.device("cpu") + dtype = torch.float32 + T = 5 + ours_config = Config.from_name(model_name, n_layer=2, n_head=16, n_embd=32, intermediate_size=86) + theirs_config = GemmaConfig( + vocab_size=ours_config.padded_vocab_size, + hidden_size=ours_config.n_embd, + head_dim=ours_config.head_size, + num_attention_heads=ours_config.n_head, + num_hidden_layers=ours_config.n_layer, + intermediate_size=ours_config.intermediate_size, + max_position_embeddings=T, + rms_norm_eps=ours_config.norm_eps, + num_key_value_heads=ours_config.n_query_groups, + rope_theta=ours_config.rope_base, + attention_bias=ours_config.bias, + tie_word_embeddings=True, + hidden_act="gelu_pytorch_tanh", + ) + assert ours_config.intermediate_size == theirs_config.intermediate_size + + theirs_model = GemmaForCausalLM(theirs_config).to(device) + theirs_state_dict = theirs_model.state_dict() + # Gemma weights are shipped without `lm_head.weight` + theirs_state_dict.pop("lm_head.weight") + state_dict = {} + copy_weights_hf_llama(ours_config, {}, state_dict, theirs_state_dict) + ours_model = LoRAGPT(ours_config).to(device) + ours_model.load_state_dict(state_dict) + + # test end to end + x = torch.tensor([[9856, 23, 491, 1536, 304]], dtype=torch.int32, device=device) + assert x.size(1) == T + ours_y = ours_model(x) + theirs_y = theirs_model(x)["logits"].to(dtype) # HF converts logits to float + torch.testing.assert_close(ours_y, theirs_y) + + @RunIf(min_cuda_gpus=1) def test_lora_bitsandbytes(monkeypatch, tmp_path, fake_checkpoint_dir, alpaca_path): if not _BITSANDBYTES_AVAILABLE: From 56889db4807f5d7384b30e60dc124aaf0f1580ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Mon, 25 Mar 2024 04:07:31 +0100 Subject: [PATCH 05/37] Check checkpoint dir before accessing it (#1188) --- litgpt/chat/base.py | 1 - litgpt/finetune/adapter.py | 3 ++- litgpt/finetune/adapter_v2.py | 3 ++- litgpt/finetune/full.py | 3 ++- litgpt/finetune/lora.py | 3 ++- litgpt/generate/adapter.py | 1 - litgpt/generate/adapter_v2.py | 1 - litgpt/generate/base.py | 1 - litgpt/generate/full.py | 1 - litgpt/generate/sequentially.py | 1 - litgpt/generate/tp.py | 1 - litgpt/scripts/merge_lora.py | 2 +- litgpt/utils.py | 3 +-- tests/test_config_hub.py | 11 ++++++----- 14 files changed, 16 insertions(+), 19 deletions(-) diff --git a/litgpt/chat/base.py b/litgpt/chat/base.py index eb31205d5d..81229ddd2a 100644 --- a/litgpt/chat/base.py +++ b/litgpt/chat/base.py @@ -132,7 +132,6 @@ def main( fabric = L.Fabric(devices=1, precision=precision, plugins=plugins) check_valid_checkpoint_dir(checkpoint_dir) - config = Config.from_file(checkpoint_dir / "model_config.yaml") checkpoint_path = checkpoint_dir / "lit_model.pth" diff --git a/litgpt/finetune/adapter.py b/litgpt/finetune/adapter.py index a88fd8e2c3..88fd4ecc26 100644 --- a/litgpt/finetune/adapter.py +++ b/litgpt/finetune/adapter.py @@ -75,6 +75,8 @@ def setup( pprint(locals()) data = Alpaca() if data is None else data devices = parse_devices(devices) + + check_valid_checkpoint_dir(checkpoint_dir) config = Config.from_file(checkpoint_dir / "model_config.yaml") precision = precision or get_default_supported_precision(training=True) @@ -120,7 +122,6 @@ def main( eval: EvalArgs, ) -> None: validate_args(train, eval) - check_valid_checkpoint_dir(checkpoint_dir) tokenizer = Tokenizer(checkpoint_dir) train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train) diff --git a/litgpt/finetune/adapter_v2.py b/litgpt/finetune/adapter_v2.py index 2b29ecc228..97d0e51f16 100644 --- a/litgpt/finetune/adapter_v2.py +++ b/litgpt/finetune/adapter_v2.py @@ -75,6 +75,8 @@ def setup( pprint(locals()) data = Alpaca() if data is None else data devices = parse_devices(devices) + + check_valid_checkpoint_dir(checkpoint_dir) config = Config.from_file(checkpoint_dir / "model_config.yaml") precision = precision or get_default_supported_precision(training=True) @@ -120,7 +122,6 @@ def main( eval: EvalArgs, ) -> None: validate_args(train, eval) - check_valid_checkpoint_dir(checkpoint_dir) tokenizer = Tokenizer(checkpoint_dir) train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train) diff --git a/litgpt/finetune/full.py b/litgpt/finetune/full.py index 086756e6b5..38aa1ae466 100644 --- a/litgpt/finetune/full.py +++ b/litgpt/finetune/full.py @@ -74,6 +74,8 @@ def setup( pprint(locals()) data = Alpaca() if data is None else data devices = parse_devices(devices) + + check_valid_checkpoint_dir(checkpoint_dir) config = Config.from_file(checkpoint_dir / "model_config.yaml") precision = precision or get_default_supported_precision(training=True) @@ -109,7 +111,6 @@ def main( eval: EvalArgs, ) -> None: validate_args(train, eval) - check_valid_checkpoint_dir(checkpoint_dir) tokenizer = Tokenizer(checkpoint_dir) train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train) diff --git a/litgpt/finetune/lora.py b/litgpt/finetune/lora.py index 22f6b54ef9..ce8b7764bd 100644 --- a/litgpt/finetune/lora.py +++ b/litgpt/finetune/lora.py @@ -94,6 +94,8 @@ def setup( pprint(locals()) data = Alpaca() if data is None else data devices = parse_devices(devices) + + check_valid_checkpoint_dir(checkpoint_dir) config = Config.from_file( checkpoint_dir / "model_config.yaml", lora_r=lora_r, @@ -150,7 +152,6 @@ def main( eval: EvalArgs, ) -> None: validate_args(train, eval) - check_valid_checkpoint_dir(checkpoint_dir) tokenizer = Tokenizer(checkpoint_dir) train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train) diff --git a/litgpt/generate/adapter.py b/litgpt/generate/adapter.py index e6f35be4bc..104b3e20b0 100644 --- a/litgpt/generate/adapter.py +++ b/litgpt/generate/adapter.py @@ -60,7 +60,6 @@ def main( fabric.launch() check_valid_checkpoint_dir(checkpoint_dir) - config = Config.from_file(checkpoint_dir / "model_config.yaml") checkpoint_path = checkpoint_dir / "lit_model.pth" diff --git a/litgpt/generate/adapter_v2.py b/litgpt/generate/adapter_v2.py index b8e6eff0c8..c7aeee8a91 100644 --- a/litgpt/generate/adapter_v2.py +++ b/litgpt/generate/adapter_v2.py @@ -60,7 +60,6 @@ def main( fabric.launch() check_valid_checkpoint_dir(checkpoint_dir) - config = Config.from_file(checkpoint_dir / "model_config.yaml") checkpoint_path = checkpoint_dir / "lit_model.pth" diff --git a/litgpt/generate/base.py b/litgpt/generate/base.py index d76a5bbb84..6488717429 100644 --- a/litgpt/generate/base.py +++ b/litgpt/generate/base.py @@ -133,7 +133,6 @@ def main( fabric = L.Fabric(devices=1, precision=precision, plugins=plugins) check_valid_checkpoint_dir(checkpoint_dir) - config = Config.from_file(checkpoint_dir / "model_config.yaml") checkpoint_path = checkpoint_dir / "lit_model.pth" diff --git a/litgpt/generate/full.py b/litgpt/generate/full.py index e602e6ef51..608115a5e1 100644 --- a/litgpt/generate/full.py +++ b/litgpt/generate/full.py @@ -59,7 +59,6 @@ def main( fabric.launch() check_valid_checkpoint_dir(checkpoint_dir) - config = Config.from_file(checkpoint_dir / "model_config.yaml") checkpoint_path = finetuned_path diff --git a/litgpt/generate/sequentially.py b/litgpt/generate/sequentially.py index cce1d8d3c9..f804c4cffc 100644 --- a/litgpt/generate/sequentially.py +++ b/litgpt/generate/sequentially.py @@ -158,7 +158,6 @@ def main( print(f"Using {total_devices} devices", file=sys.stderr) check_valid_checkpoint_dir(checkpoint_dir) - config = Config.from_file(checkpoint_dir / "model_config.yaml") checkpoint_path = checkpoint_dir / "lit_model.pth" diff --git a/litgpt/generate/tp.py b/litgpt/generate/tp.py index 3c6c8daf2c..5c56dd1c09 100644 --- a/litgpt/generate/tp.py +++ b/litgpt/generate/tp.py @@ -137,7 +137,6 @@ def main( fabric.launch() check_valid_checkpoint_dir(checkpoint_dir) - config = Config.from_file(checkpoint_dir / "model_config.yaml") model_file = "lit_model.pth" diff --git a/litgpt/scripts/merge_lora.py b/litgpt/scripts/merge_lora.py index 49129bcae1..ef186bb103 100644 --- a/litgpt/scripts/merge_lora.py +++ b/litgpt/scripts/merge_lora.py @@ -30,7 +30,7 @@ def merge_lora( precision: Optional precision setting to instantiate the model weights in. By default, this will automatically be inferred from the metadata in the given ``checkpoint_dir`` directory. """ - check_valid_checkpoint_dir(checkpoint_dir, lora=True) + check_valid_checkpoint_dir(checkpoint_dir, model_filename="lit_model.pth.lora") if pretrained_checkpoint_dir is not None: check_valid_checkpoint_dir(pretrained_checkpoint_dir) if (checkpoint_dir / "lit_model.pth").is_file(): diff --git a/litgpt/utils.py b/litgpt/utils.py index 3f145da105..fb6a86c107 100644 --- a/litgpt/utils.py +++ b/litgpt/utils.py @@ -52,8 +52,7 @@ def reset_parameters(module: nn.Module) -> None: mod.reset_parameters() -def check_valid_checkpoint_dir(checkpoint_dir: Path, lora: bool = False) -> None: - model_filename = "lit_model.pth.lora" if lora else "lit_model.pth" +def check_valid_checkpoint_dir(checkpoint_dir: Path, model_filename: str = "lit_model.pth") -> None: files = { model_filename: (checkpoint_dir / model_filename).is_file(), "model_config.yaml": (checkpoint_dir / "model_config.yaml").is_file(), diff --git a/tests/test_config_hub.py b/tests/test_config_hub.py index 163a6531dc..4ad634ca9b 100644 --- a/tests/test_config_hub.py +++ b/tests/test_config_hub.py @@ -36,7 +36,7 @@ @pytest.mark.parametrize(("script_file", "config_file"), all_pairs) -def test_config_help(script_file, config_file): +def test_config_help(script_file, config_file, monkeypatch): """Test that configs validate against the signature in the scripts.""" script_file = Path(__file__).parent.parent / script_file assert script_file.is_file() @@ -48,10 +48,11 @@ def test_config_help(script_file, config_file): module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) - module.main = Mock() - module.Tokenizer = Mock() - module.BitsandbytesPrecision = Mock(return_value=Precision()) - module.Config = Mock(return_value=Config.from_name("pythia-14m")) + monkeypatch.setattr(module, "main", Mock()) + monkeypatch.setattr(module, "Tokenizer", Mock()) + monkeypatch.setattr(module, "BitsandbytesPrecision", Mock(return_value=Precision()), raising=False) + monkeypatch.setattr(module, "Config", Mock(return_value=Config.from_name("pythia-14m"))) + monkeypatch.setattr(module, "check_valid_checkpoint_dir", Mock(), raising=False) with mock.patch("sys.argv", [script_file.name, "--config", str(config_file), "--devices", "1"]): CLI(module.setup) From ff7dcd17f7b36e2b349f2abe8c58b42ee6a122f6 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Sun, 24 Mar 2024 22:08:58 -0500 Subject: [PATCH 06/37] Add note about different precision (#1163) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- config_hub/finetune/README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/config_hub/finetune/README.md b/config_hub/finetune/README.md index aa160472d5..05a1dbeab6 100644 --- a/config_hub/finetune/README.md +++ b/config_hub/finetune/README.md @@ -35,5 +35,26 @@ For more information, see the [Dealing with out-of-memory (OOM) errors](../../tu | tiny-llama/full.yaml | 1.1B | Alpaca 2k | 1 | 1.105 | 14.10 GB | 512 | 4 | bfloat16 | 2.59 min (1xA10G) |   +## Extending the context length If you require a longer sequence length than the one used in a given config file, you can either edit the `max_seq_length` in the config file or pass an additional argument when running the finetuning command, for example, `--max_seq_length 4096` to override the sequence length provided in the config file. + +  +## Training on GPUs without bfloat16 support + +If you are training on GPUs without bfloat-16 support, you need to change the `precision` option to `16-true` (16-bit floating point precision) or `16-mixed` (16/32-bit mixed precision) training: + +```bash +litgpt finetune lora \ + --config config_hub/finetune/phi-2/lora.yaml \ + --precision 16-true +``` +or + +```bash +litgpt finetune lora \ + --config config_hub/finetune/phi-2/lora.yaml \ + --precision 16-mixed +``` + +Note that `16-true` is more compute and memory-efficient, but it can sometimes lead to training convergence issues. In this case, it's recommended to use `16-mixed`. From 7322026005de0ad339eb00176f603f83ad843ec3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Mon, 25 Mar 2024 04:34:39 +0100 Subject: [PATCH 07/37] Update Thunder's GPT traces in the README (#1190) --- extensions/thunder/README.md | 108 +++-------------------------------- 1 file changed, 9 insertions(+), 99 deletions(-) diff --git a/extensions/thunder/README.md b/extensions/thunder/README.md index b84da74e5f..df2d0461a7 100644 --- a/extensions/thunder/README.md +++ b/extensions/thunder/README.md @@ -41,27 +41,8 @@ print(forward_trace) @no_autocast() def augmented_forward_fn(*args): # args: "Collection" - t0, \ - t1, \ - t2, \ - t3, \ - t4, \ - t5, \ - t6, \ - t7, \ - t8, \ - t9, \ - t10, \ - t11, \ - t12, \ - t13, \ - t14, \ - t15, \ - t16, \ - t17, \ - t18, \ - t19, \ - = args + t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, \ + t18, t19, = args del args t24 = torch.nn.functional.embedding(t0, t19, None, None, 2.0, False, False) # t24: "cuda:0 f32[2, 5, 4096]" t20 = torch_slice_prim_impl(t1, [0, 0], [5, 128], [1, 1]) # t20: "cuda:0 f32[5, 128]" @@ -247,90 +228,19 @@ print(backward_trace) def backward_fn(saved_for_backward, cotangents): # saved_for_backward: "Collection" # cotangents: "Collection" - C0, \ - C1, \ - = saved_for_backward + C0, C1, = saved_for_backward clear_collection(saved_for_backward) del saved_for_backward - t178, \ - = cotangents + t178, = cotangents clear_collection(cotangents) del cotangents - t0, \ - t101, \ - t104, \ - t105, \ - t114, \ - t136, \ - t138, \ - t139, \ - t140, \ - t141, \ - t142, \ - t144, \ - t146, \ - t15, \ - t152, \ - t155, \ - t156, \ - t157, \ - t158, \ - t16, \ - t164, \ - t166, \ - t17, \ - t172, \ - t175, \ - t176, \ - t18, \ - t24, \ - t3, \ - t30, \ - t33, \ - t34, \ - t4, \ - t43, \ - t49, \ - t5, \ - t51, \ - t6, \ - t65, \ - t67, \ - t68, \ - t69, \ - t7, \ - t70, \ - t71, \ - t73, \ - t75, \ - t8, \ - t81, \ - t84, \ - t85, \ - t86, \ - t87, \ - t9, \ - t93, \ - t95, \ - = C0 + t0, t101, t104, t105, t114, t136, t138, t139, t140, t141, t142, t144, t146, \ + t15, t152, t155, t156, t157, t158, t16, t164, t166, t17, t172, t175, t176, t18, \ + t24, t3, t30, t33, t34, t4, t43, t49, t5, t51, t6, t65, t67, t68, t69, t7, t70, \ + t71, t73, t75, t8, t81, t84, t85, t86, t87, t9, t93, t95, = C0 clear_collection(C0) del C0 - b1, \ - b2, \ - b41, \ - b91, \ - f101, \ - f106, \ - f40, \ - f42, \ - f51, \ - f56, \ - f6, \ - f90, \ - f92, \ - i0, \ - i23, \ - i73, \ + b1, b2, b41, b91, f101, f106, f40, f42, f51, f56, f6, f90, f92, i0, i23, i73, \ = C1 clear_collection(C1) del C1 From bfe260991565132d213c5b86bfa98eb2b506071b Mon Sep 17 00:00:00 2001 From: Andrei-Aksionov <58434077+Andrei-Aksionov@users.noreply.github.com> Date: Mon, 25 Mar 2024 17:46:01 +0300 Subject: [PATCH 08/37] Annotations for forward method of the Block class. (#1142) --- litgpt/model.py | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/litgpt/model.py b/litgpt/model.py index f2626b0e88..fe71c60b80 100644 --- a/litgpt/model.py +++ b/litgpt/model.py @@ -139,6 +139,12 @@ def clear_kv_cache(self) -> None: class Block(nn.Module): def __init__(self, config: Config) -> None: super().__init__() + if not config.parallel_residual and config.shared_attention_norm: + raise NotImplementedError( + "No checkpoint amongst the ones we support uses this configuration" + " (non-parallel residual and shared attention norm)." + ) + self.norm_1 = config.norm_class(config.n_embd, eps=config.norm_eps) self.attn = CausalSelfAttention(config) self.norm_2 = None if config.shared_attention_norm else config.norm_class(config.n_embd, eps=config.norm_eps) @@ -154,18 +160,30 @@ def forward( mask: Optional[torch.Tensor] = None, input_pos: Optional[torch.Tensor] = None, ) -> torch.Tensor: - n_1 = self.norm_1(x) - h = self.attn(n_1, cos, sin, mask, input_pos) + """ + Non-parallel residual Parallel residual + ┌─ x ┌─ x ────────────┐ Note: if `shared_attention_norm` is True, + │ ↓ │ ↓ ↓ the output from `norm_1` is reused + │ norm_1 │ norm_1 ───► norm_2 + │ ↓ │ ↓ ↓ + │ attn │ attn mlp + │ ↓ │ ↓ │ + ┌─ └► + └► + ◄───────────┘ + │ norm_2 + │ ↓ + │ mlp + │ ↓ + └───► + + """ + + x_normed = self.norm_1(x) + attention_output = self.attn(x_normed, cos, sin, mask, input_pos) + if self.config.parallel_residual: - n_2 = n_1 if self.config.shared_attention_norm else self.norm_2(x) - x = self.mlp(n_2) + h + x + x_normed = x_normed if self.config.shared_attention_norm else self.norm_2(x) + x = self.mlp(x_normed) + attention_output + x else: - if self.config.shared_attention_norm: - raise NotImplementedError( - "No checkpoint amongst the ones we support uses this configuration" - " (non-parallel residual and shared attention norm)." - ) - x = h + x + x = attention_output + x x = self.mlp(self.norm_2(x)) + x return x From d656ca73940a08467cb57f5f0b9a2d3f23d59d1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Mon, 25 Mar 2024 17:22:36 +0100 Subject: [PATCH 09/37] Meta device and assign in `merge_lora` (#1189) --- litgpt/lora.py | 9 +++++---- litgpt/scripts/merge_lora.py | 10 +++++----- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/litgpt/lora.py b/litgpt/lora.py index d706bfb884..237747078b 100644 --- a/litgpt/lora.py +++ b/litgpt/lora.py @@ -144,11 +144,8 @@ def merge(self) -> None: if self.r > 0 and not self.merged: pretrained_dtype = self.linear.weight.data.dtype lora_data = self.get_lora_AB() - # if the pretrained weights and LoRA weights are of the same dtype - simply sum them - if pretrained_dtype == lora_data.dtype: - self.linear.weight.data += lora_data # if only the pretrained are in quantized form - dequantize, sum with LoRA and quantize the result - elif pretrained_dtype == torch.uint8: + if pretrained_dtype == torch.uint8: import bitsandbytes as bnb weight = self.linear.weight @@ -159,6 +156,10 @@ def merge(self) -> None: # assign updated weights and quantize by moving to CUDA device self.linear.weight = bnb.nn.Params4bit(weight_data, requires_grad=False, **weight.__dict__) self.linear.weight.cuda(weight.device) + # if the pretrained weights and LoRA weights are of compatible dtypes - simply sum them + elif torch.finfo(pretrained_dtype).max >= torch.finfo(lora_data.dtype).max: + # self.linear might be on CPU and lora_data on CUDA + self.linear.weight.data += lora_data.to(device=self.linear.weight.data.device) else: raise NotImplementedError( f"Cannot merge the pretrained weights of type {pretrained_dtype}" diff --git a/litgpt/scripts/merge_lora.py b/litgpt/scripts/merge_lora.py index ef186bb103..2bedfa743e 100644 --- a/litgpt/scripts/merge_lora.py +++ b/litgpt/scripts/merge_lora.py @@ -9,7 +9,7 @@ import yaml from litgpt.lora import GPT, Config, lora_filter, merge_lora_weights -from litgpt.utils import CLI, check_valid_checkpoint_dir, lazy_load +from litgpt.utils import CLI, check_valid_checkpoint_dir def merge_lora( @@ -43,16 +43,16 @@ def merge_lora( fabric = L.Fabric(devices=1, precision=precision, accelerator="cpu") config = Config.from_file(checkpoint_dir / "model_config.yaml", **lora_params) - with fabric.init_module(empty_init=True): + with fabric.init_module(), torch.device("meta"): model = GPT(config) lora_path = checkpoint_dir / "lit_model.pth.lora" - pretrained_checkpoint = lazy_load(pretrained_checkpoint_dir / "lit_model.pth") - lora_checkpoint = lazy_load(lora_path) + pretrained_checkpoint = torch.load(str(pretrained_checkpoint_dir / "lit_model.pth"), mmap=True) + lora_checkpoint = torch.load(str(lora_path), mmap=True) # Merge LoRA weights into the base model pretrained_checkpoint.update(lora_checkpoint.get("model", lora_checkpoint)) - model.load_state_dict(pretrained_checkpoint) + model.load_state_dict(pretrained_checkpoint, assign=True) merge_lora_weights(model) # Remove LoRA parameters and the LoRA linear substring From bbae7af5e3ef20db615b1429529cee3f75abde83 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Mon, 25 Mar 2024 16:51:04 -0500 Subject: [PATCH 10/37] Fix link in Readme (#1191) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 57b28b4108..d6be0e3625 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ ✅  Optimized and efficient code: Flash Attention v2, multi-GPU support via fully-sharded data parallelism, [optional CPU offloading](tutorials/oom.md#do-sharding-across-multiple-gpus), and [TPU and XLA support](extensions/xla). -✅  [Pretraining](tutorials/pretraining.md), [finetuning](tutorials/finetune.md), and [inference](tutorials/inference.md) in various precision settings: FP32, FP16, BF16, and FP16/FP32 mixed. +✅  [Pretraining](tutorials/pretrain_tinyllama.md), [finetuning](tutorials/finetune.md), and [inference](tutorials/inference.md) in various precision settings: FP32, FP16, BF16, and FP16/FP32 mixed. ✅  [Configuration files](config_hub) for great out-of-the-box performance. From 8bb5e7b2fd14e7551455c14ea65101e10331c53b Mon Sep 17 00:00:00 2001 From: Andrei-Aksionov <58434077+Andrei-Aksionov@users.noreply.github.com> Date: Tue, 26 Mar 2024 18:35:10 +0300 Subject: [PATCH 11/37] Chunked CrossEntropyLoss for AdapterV2 (#1194) --- .github/workflows/cpu-tests.yml | 2 +- litgpt/adapter_v2.py | 29 ++++++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cpu-tests.yml b/.github/workflows/cpu-tests.yml index ce7016c672..5f43634679 100644 --- a/.github/workflows/cpu-tests.yml +++ b/.github/workflows/cpu-tests.yml @@ -37,7 +37,7 @@ jobs: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} diff --git a/litgpt/adapter_v2.py b/litgpt/adapter_v2.py index a77f2c94d5..c43120a974 100644 --- a/litgpt/adapter_v2.py +++ b/litgpt/adapter_v2.py @@ -9,7 +9,7 @@ """ from dataclasses import dataclass -from typing import Any, Dict, Optional, Tuple, Type +from typing import Any, Dict, List, Optional, Tuple, Type, Union import torch import torch.nn as nn @@ -80,6 +80,33 @@ def __init__(self, config: Config) -> None: self.max_seq_length = self.config.block_size self.mask_cache: Optional[torch.Tensor] = None + def forward( + self, idx: torch.Tensor, input_pos: Optional[torch.Tensor] = None, lm_head_chunk_size: int = 0 + ) -> Union[torch.Tensor, List[torch.Tensor]]: + T = idx.size(1) + if self.max_seq_length < T: + raise ValueError(f"Cannot forward sequence of length {T}, max seq length is only {self.max_seq_length}.") + + if input_pos is not None: # use the kv cache + cos = self.cos.index_select(0, input_pos) + sin = self.sin.index_select(0, input_pos) + if self.mask_cache is None: + raise TypeError("You need to call `gpt.set_kv_cache()`") + mask = self.mask_cache.index_select(2, input_pos) + else: + cos = self.cos[:T] + sin = self.sin[:T] + mask = None + + x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) + for block in self.transformer.h: + x = block(x, cos, sin, mask, input_pos) + x = self.transformer.ln_f(x) + if lm_head_chunk_size > 0: + # chunk the lm head logits to reduce the peak memory used by autograd + return [self.lm_head(x_i) for x_i in x.split(lm_head_chunk_size, dim=1)] + return self.lm_head(x) # (b, t, vocab_size) + @classmethod def from_name(cls, name: str, **kwargs: Any) -> Self: return cls(Config.from_name(name, **kwargs)) From c7ae8669f8bfe57bc39bb012d473dacc18cbd6aa Mon Sep 17 00:00:00 2001 From: Andrei-Aksionov <58434077+Andrei-Aksionov@users.noreply.github.com> Date: Tue, 26 Mar 2024 18:36:00 +0300 Subject: [PATCH 12/37] Gemma: WTE scaling for Adapter and LoRA (#1193) --- litgpt/adapter.py | 2 ++ litgpt/lora.py | 2 ++ tests/test_adapter.py | 43 ++++++++++++++++++++++++++++++++++++++++ tests/test_adapter_v2.py | 1 - tests/test_lora.py | 1 - 5 files changed, 47 insertions(+), 2 deletions(-) diff --git a/litgpt/adapter.py b/litgpt/adapter.py index 3bed9100c6..3fcceda7b6 100644 --- a/litgpt/adapter.py +++ b/litgpt/adapter.py @@ -66,6 +66,8 @@ def forward( mask = None x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) + if self.config.scale_embeddings: + x = x * (self.config.n_embd**0.5) for block in self.transformer.h: x = block(x, cos, sin, mask, input_pos) x = self.transformer.ln_f(x) diff --git a/litgpt/lora.py b/litgpt/lora.py index 237747078b..cae5ca7fe9 100644 --- a/litgpt/lora.py +++ b/litgpt/lora.py @@ -542,6 +542,8 @@ def forward( mask = None x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) + if self.config.scale_embeddings: + x = x * (self.config.n_embd**0.5) for block in self.transformer.h: x = block(x, cos, sin, mask, input_pos) x = self.transformer.ln_f(x) diff --git a/tests/test_adapter.py b/tests/test_adapter.py index ab1a918ec9..cb9ac7b019 100644 --- a/tests/test_adapter.py +++ b/tests/test_adapter.py @@ -14,6 +14,7 @@ from lightning.fabric.plugins.precision.bitsandbytes import _BITSANDBYTES_AVAILABLE, BitsandbytesPrecision from lightning.fabric.wrappers import _FabricOptimizer from torch._dynamo.backends import debugging +from transformers.models.gemma import GemmaConfig, GemmaForCausalLM import litgpt.adapter as gpt_adapter import litgpt.finetune.adapter as module @@ -21,6 +22,7 @@ from litgpt.adapter import GPT, Config, adapter_filter from litgpt.args import EvalArgs, TrainArgs from litgpt.data import Alpaca +from litgpt.scripts.convert_hf_checkpoint import copy_weights_hf_llama def test_config_identical(): @@ -232,3 +234,44 @@ def test_adapter_bitsandbytes(monkeypatch, tmp_path, fake_checkpoint_dir, alpaca logs = stdout.getvalue() assert "of trainable parameters: 168" in logs assert "of non-trainable parameters: 1,888" in logs + + +@torch.inference_mode() +@pytest.mark.parametrize("model_name", ["gemma-2b", "gemma-7b"]) +def test_against_hf_gemma(model_name): + device = torch.device("cpu") + dtype = torch.float32 + T = 5 + ours_config = Config.from_name(model_name, n_layer=2, n_head=16, n_embd=32, intermediate_size=86) + theirs_config = GemmaConfig( + vocab_size=ours_config.padded_vocab_size, + hidden_size=ours_config.n_embd, + head_dim=ours_config.head_size, + num_attention_heads=ours_config.n_head, + num_hidden_layers=ours_config.n_layer, + intermediate_size=ours_config.intermediate_size, + max_position_embeddings=T, + rms_norm_eps=ours_config.norm_eps, + num_key_value_heads=ours_config.n_query_groups, + rope_theta=ours_config.rope_base, + attention_bias=ours_config.bias, + tie_word_embeddings=True, + hidden_act="gelu_pytorch_tanh", + ) + assert ours_config.intermediate_size == theirs_config.intermediate_size + + theirs_model = GemmaForCausalLM(theirs_config).to(device) + theirs_state_dict = theirs_model.state_dict() + # Gemma weights are shipped without `lm_head.weight` + theirs_state_dict.pop("lm_head.weight") + state_dict = {} + copy_weights_hf_llama(ours_config, {}, state_dict, theirs_state_dict) + ours_model = GPT(ours_config).to(device) + ours_model.load_state_dict(state_dict) + + # test end to end + x = torch.tensor([[9856, 23, 491, 1536, 304]], dtype=torch.int32, device=device) + assert x.size(1) == T + ours_y = ours_model(x) + theirs_y = theirs_model(x)["logits"].to(dtype) # HF converts logits to float + torch.testing.assert_close(ours_y, theirs_y) diff --git a/tests/test_adapter_v2.py b/tests/test_adapter_v2.py index f1a12cc539..67f0689c05 100644 --- a/tests/test_adapter_v2.py +++ b/tests/test_adapter_v2.py @@ -209,7 +209,6 @@ def test_against_hf_mixtral(): @torch.inference_mode() -@pytest.mark.xfail(raises=AssertionError, match="Tensor-likes are not close") @pytest.mark.parametrize("model_name", ["gemma-2b", "gemma-7b"]) def test_against_hf_gemma(model_name): device = torch.device("cpu") diff --git a/tests/test_lora.py b/tests/test_lora.py index fd09daeeeb..1b2039f2d4 100644 --- a/tests/test_lora.py +++ b/tests/test_lora.py @@ -556,7 +556,6 @@ def test_against_hf_mixtral(): @torch.inference_mode() -@pytest.mark.xfail(raises=AssertionError, match="Tensor-likes are not close") @pytest.mark.parametrize("model_name", ["gemma-2b", "gemma-7b"]) def test_against_hf_gemma(model_name): device = torch.device("cpu") From 73adb38a7bd6c52a1c8533cc9ac1ad7e8741ca0e Mon Sep 17 00:00:00 2001 From: Andrei-Aksionov <58434077+Andrei-Aksionov@users.noreply.github.com> Date: Tue, 26 Mar 2024 21:17:27 +0300 Subject: [PATCH 13/37] Gemma 7B x LoRA fix (#1197) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- litgpt/adapter_v2.py | 2 ++ litgpt/lora.py | 12 ++++++++---- tests/test_lora.py | 38 +++++++++++++++++++++++++++++++------- 3 files changed, 41 insertions(+), 11 deletions(-) diff --git a/litgpt/adapter_v2.py b/litgpt/adapter_v2.py index c43120a974..665527f053 100644 --- a/litgpt/adapter_v2.py +++ b/litgpt/adapter_v2.py @@ -99,6 +99,8 @@ def forward( mask = None x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) + if self.config.scale_embeddings: + x = x * (self.config.n_embd**0.5) for block in self.transformer.h: x = block(x, cos, sin, mask, input_pos) x = self.transformer.ln_f(x) diff --git a/litgpt/lora.py b/litgpt/lora.py index cae5ca7fe9..6082f765a1 100644 --- a/litgpt/lora.py +++ b/litgpt/lora.py @@ -186,6 +186,7 @@ def __init__( in_features: int, out_features: int, # ↓ the remaining part is for LoRA + head_size: int, n_head: int, n_query_groups: int, r: int = 0, @@ -205,6 +206,7 @@ def __init__( Args: in_features: number of input features of the pretrained weights out_features: number of output features of the pretrained weights + head_size: size of a single attention head n_head: number of attention heads n_query_groups: number of query groups (see diagram in `litgpt/config.py`) r: rank of the weight update matrices. To make sense of using LoRA the rank should be smaller than the rank of @@ -235,12 +237,13 @@ def __init__( if r > 0 and any(enable_lora): self.lora_A = nn.Parameter(torch.zeros((r * sum(enable_lora), in_features))) # (4, 128) enable_q, enable_k, enable_v = enable_lora - self.kv_embd_size = self.linear.in_features // (n_head // n_query_groups) # qkv_shapes will be used to split a tensor with weights correctly qkv_shapes = ( - self.linear.in_features * enable_q, - self.kv_embd_size * enable_k, - self.kv_embd_size * enable_v, + # if `head_size` is explicitly specified in the config, `n_embd` (or `in_features`) + # might not be equal to `head_size * n_head`, thus we use it directly here + head_size * n_head * enable_q, + head_size * n_query_groups * enable_k, + head_size * n_query_groups * enable_v, ) self.qkv_shapes = [s for s in qkv_shapes if s] self.lora_B = nn.Parameter(torch.zeros(sum(self.qkv_shapes), r)) # (256, 2)) @@ -597,6 +600,7 @@ def __init__(self, config: Config) -> None: enable_lora=(config.lora_query, config.lora_key, config.lora_value), bias=config.bias, # for MQA/GQA support + head_size=config.head_size, n_head=config.n_head, n_query_groups=config.n_query_groups, ) diff --git a/tests/test_lora.py b/tests/test_lora.py index 1b2039f2d4..3a6eeb8de3 100644 --- a/tests/test_lora.py +++ b/tests/test_lora.py @@ -234,7 +234,7 @@ def __init__(self, *args, **kwargs): original_linear = torch.nn.Linear # Our bnb does this sort of monkey patching torch.nn.Linear = MyLinear - layer = LoRAQKVLinear(1, 1, 1, 1) + layer = LoRAQKVLinear(1, 1, 1, 1, 1) assert isinstance(layer.linear, original_linear) torch.nn.Linear = original_linear @@ -324,6 +324,7 @@ def test_lora_gpt_query_groups_merge_and_forward_no_exception(n_query_groups, ap @torch.inference_mode() +@pytest.mark.parametrize("head_size", (1, 2, 4)) @pytest.mark.parametrize("n_head", (1, 2, 3, 6, 12)) @pytest.mark.parametrize( "enable_lora", @@ -337,9 +338,11 @@ def test_lora_gpt_query_groups_merge_and_forward_no_exception(n_query_groups, ap (True, True, True), ], ) -def test_lora_qkv_linear_compare_conv1d(n_head, enable_lora): +def test_lora_qkv_linear_compare_conv1d(head_size, n_head, enable_lora): C = 12 - layer = LoRAQKVLinear(C, 3 * C, n_head=n_head, n_query_groups=n_head, r=2, enable_lora=enable_lora) + layer = LoRAQKVLinear( + C, 3 * C, head_size=head_size, n_head=n_head, n_query_groups=n_head, r=2, enable_lora=enable_lora + ) x = torch.randn((1, 1, C)) a = F.linear(x, layer.lora_A).transpose(-2, -1) # after_A b = layer.lora_B.data.unsqueeze(-1) @@ -371,7 +374,8 @@ def test_lora_linear_weights_merged_status(rank, expected_merged): ((0, True, False), (1, True, True), (0, False, False), (1, False, False)), ) def test_lora_qkv_linear_weights_merged_status(rank, enable_lora, expected_merged): - layer = LoRAQKVLinear(10, 3 * 10, n_head=2, n_query_groups=2, r=rank, enable_lora=enable_lora) + C = 10 + layer = LoRAQKVLinear(C, 3 * C, head_size=5, n_head=2, n_query_groups=2, r=rank, enable_lora=enable_lora) assert not layer.merged layer.merge() assert layer.merged == expected_merged @@ -524,6 +528,9 @@ def test_against_hf_mixtral(): n_query_groups=2, intermediate_size=86, n_expert=4, + lora_r=1, + lora_key=True, + lora_value=True, ) T = 5 theirs_config = MixtralConfig( @@ -545,7 +552,10 @@ def test_against_hf_mixtral(): state_dict = {} copy_weights_hf_llama(ours_config, {}, state_dict, theirs_state_dict) ours_model = LoRAGPT(ours_config).to(device) - ours_model.load_state_dict(state_dict) + keys = ours_model.load_state_dict(state_dict, strict=False) + assert not keys.unexpected_keys + for k in keys.missing_keys: + assert lora_filter(k, None) # test end to end x = torch.tensor([[9856, 23, 491, 1536, 304], [23, 345, 65, 123, 321]], dtype=torch.int32, device=device) @@ -561,7 +571,18 @@ def test_against_hf_gemma(model_name): device = torch.device("cpu") dtype = torch.float32 T = 5 - ours_config = Config.from_name(model_name, n_layer=2, n_head=16, n_embd=32, intermediate_size=86) + ours_config = Config.from_name( + model_name, + n_layer=2, + n_head=16, + n_embd=32, + head_size=4, + intermediate_size=86, + lora_r=1, + lora_query=True, + lora_key=True, + lora_value=True, + ) theirs_config = GemmaConfig( vocab_size=ours_config.padded_vocab_size, hidden_size=ours_config.n_embd, @@ -586,7 +607,10 @@ def test_against_hf_gemma(model_name): state_dict = {} copy_weights_hf_llama(ours_config, {}, state_dict, theirs_state_dict) ours_model = LoRAGPT(ours_config).to(device) - ours_model.load_state_dict(state_dict) + keys = ours_model.load_state_dict(state_dict, strict=False) + assert not keys.unexpected_keys + for k in keys.missing_keys: + assert lora_filter(k, None) # test end to end x = torch.tensor([[9856, 23, 491, 1536, 304]], dtype=torch.int32, device=device) From d296c984ad583d840662b7f40b247fcc431e91b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 26 Mar 2024 20:33:50 +0100 Subject: [PATCH 14/37] `reset_parameters` fixes (#1199) --- litgpt/adapter.py | 3 ++- litgpt/lora.py | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/litgpt/adapter.py b/litgpt/adapter.py index 3fcceda7b6..295470c932 100644 --- a/litgpt/adapter.py +++ b/litgpt/adapter.py @@ -151,7 +151,8 @@ def scaled_dot_product_attention( return y + self.gating_factor * ay def reset_parameters(self) -> None: - torch.nn.init.zeros_(self.gating_factor) + if hasattr(self, "gating_factor"): + torch.nn.init.zeros_(self.gating_factor) def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: """For compatibility with older checkpoints.""" diff --git a/litgpt/lora.py b/litgpt/lora.py index 6082f765a1..69e83fcbc6 100644 --- a/litgpt/lora.py +++ b/litgpt/lora.py @@ -122,8 +122,8 @@ def __init__( # Actual trainable parameters if r > 0: - self.lora_A = nn.Parameter(torch.zeros((r, in_features))) - self.lora_B = nn.Parameter(torch.zeros((out_features, r))) + self.lora_A = nn.Parameter(torch.empty((r, in_features))) + self.lora_B = nn.Parameter(torch.empty((out_features, r))) self.scaling = self.lora_alpha / self.r self.reset_parameters() @@ -235,7 +235,7 @@ def __init__( # ⚬ r: 2 # ⚬ enable_lora: [True, False, True] if r > 0 and any(enable_lora): - self.lora_A = nn.Parameter(torch.zeros((r * sum(enable_lora), in_features))) # (4, 128) + self.lora_A = nn.Parameter(torch.empty((r * sum(enable_lora), in_features))) # (4, 128) enable_q, enable_k, enable_v = enable_lora # qkv_shapes will be used to split a tensor with weights correctly qkv_shapes = ( @@ -246,7 +246,7 @@ def __init__( head_size * n_query_groups * enable_v, ) self.qkv_shapes = [s for s in qkv_shapes if s] - self.lora_B = nn.Parameter(torch.zeros(sum(self.qkv_shapes), r)) # (256, 2)) + self.lora_B = nn.Parameter(torch.empty(sum(self.qkv_shapes), r)) # (256, 2)) # Notes about shapes above # - self.lora_A has shape (4, 128): 4 because rank is 2 and LoRA is applied only to two matrices; # 128 is the input size of the x (embedding size). (4, 128) and not (128, 4) because later on in From e4d634ddbc018fbbd014f94436f89c7ad406db1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 26 Mar 2024 21:29:21 +0100 Subject: [PATCH 15/37] Use `replaces=` for swiglu (#1200) --- extensions/thunder/unsloth/executor.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/extensions/thunder/unsloth/executor.py b/extensions/thunder/unsloth/executor.py index a638af079f..5b13c4dee2 100644 --- a/extensions/thunder/unsloth/executor.py +++ b/extensions/thunder/unsloth/executor.py @@ -48,8 +48,7 @@ def unsloth_cross_entropy_meta(logits: TensorProxy, labels: TensorProxy) -> Tupl def unsloth_cross_entropy_backward_impl(dlosses: Tensor, logits: Tensor, labels: Tensor, logsumexp: Tensor) -> Tensor: - # clone() because the kernel writes the grads in the logits. - # If it works, we can remove this it, but it's not a thing we generally anticipate and support right now. + # clone() because the kernel writes the grads in the logits return kernels.cross_entropy_loss._cross_entropy_backward_impl(dlosses, logits.clone(), logsumexp, labels) @@ -152,17 +151,10 @@ def unsloth_cross_entropy_grad( """ -def swiglu_forward_meta(e: TensorProxy, g: TensorProxy) -> TensorProxy: - return TensorProxy(like=e) - - -def swiglu_forward(e: torch.Tensor, g: torch.Tensor) -> torch.Tensor: +def swiglu(e: torch.Tensor, g: torch.Tensor) -> torch.Tensor: return torch.nn.functional.silu(e) * g -swiglu = unsloth_ex.register_operator("swiglu", meta=swiglu_forward_meta, fn=swiglu_forward) - - from litgpt.model import LLaMAMLP as OriginalLLaMAMLP @@ -170,16 +162,20 @@ class ThunderLLaMAMLP(OriginalLLaMAMLP): def forward(self, x: torch.Tensor) -> torch.Tensor: x_fc_1 = self.fc_1(x) x_fc_2 = self.fc_2(x) - # There's no `register_operator` for Modules and `swiglu_forward` is not a torch symbol that we can register to - # For now, some duplication and monkey patching is required - fn = swiglu if thunder.core.interpreter.is_jitting() else swiglu_forward - x = fn(x_fc_1, x_fc_2) + x = swiglu(x_fc_1, x_fc_2) return self.proj(x) litgpt.model.LLaMAMLP = ThunderLLaMAMLP +def swiglu_forward_meta(e: TensorProxy, g: TensorProxy) -> TensorProxy: + return TensorProxy(like=e) + + +litgpt_swiglu = unsloth_ex.register_operator("litgpt_swiglu", meta=swiglu_forward_meta, fn=swiglu, replaces=swiglu) + + unsloth_swiglu_forward = unsloth_ex.register_operator( "unsloth_swiglu_forward", meta=swiglu_forward_meta, fn=lambda *args: kernels.swiglu_fg_kernel(*args) ) @@ -217,7 +213,7 @@ def unsloth_swiglu_grad(e: TensorProxy, g: TensorProxy) -> TensorProxy: unsloth_ex.register_implementation( - swiglu, + litgpt_swiglu, checker=swiglu_to_unsloth_checker, execution_transform=unsloth_swiglu_forward, grad_transform=unsloth_swiglu_grad, From 4c56f1c425ffe11c6678f5b15fa8ff5bdd2047bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 26 Mar 2024 21:35:28 +0100 Subject: [PATCH 16/37] Fix config link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d6be0e3625..239179af99 100644 --- a/README.md +++ b/README.md @@ -134,7 +134,7 @@ LitGPT also allows users to use configuration files in YAML format instead of sp ```bash litgpt finetune lora \ - --config https://github.com/Lightning-AI/litgpt/blob/wip/config_hub/finetune/llama-2-7b/lora.yaml + --config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/finetune/llama-2-7b/lora.yaml ``` For added convenience, you can also manually override config file setting via the CLI: From d653607474dbed255a0a5bed0968fcdbab649fbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Wed, 27 Mar 2024 02:56:52 +0100 Subject: [PATCH 17/37] litdata backed tinystories (#1186) --- litgpt/data/tinystories.py | 213 +++++++++++++-------------------- tests/data/test_tinystories.py | 86 ++++++------- 2 files changed, 132 insertions(+), 167 deletions(-) diff --git a/litgpt/data/tinystories.py b/litgpt/data/tinystories.py index 40ab0a40ff..b494f3e9ef 100644 --- a/litgpt/data/tinystories.py +++ b/litgpt/data/tinystories.py @@ -1,23 +1,18 @@ -"""https://github.com/karpathy/llama2.c/blob/b3c4b6/tinystories.py""" - +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. import glob import json import os -import random -from concurrent.futures import ProcessPoolExecutor from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Optional -import numpy as np -import torch -from torch.utils.data import ConcatDataset, DataLoader +from torch.utils.data import DataLoader from tqdm import tqdm +from litgpt import Tokenizer +from litgpt.data import DataModule from litgpt.data.alpaca import download_if_missing -from litgpt.data.base import DataModule -from litgpt.tokenizer import Tokenizer @dataclass @@ -27,55 +22,97 @@ class TinyStories(DataModule): Provides training and validation dataloaders that return batches of tokens. Every sample is set to a fixed length. """ - path: Path = Path("data/") - """Path to the data directory where data will be downloaded and preprocessed""" - num_workers: int = 0 - """How many DataLoader processes to use for loading.""" + data_path: Path = Path("data/tinystories") + """The path to the data directory, containing two folders 'train' and 'val' + which are the output of the preprocessing step.""" seed: int = 42 - """The random seed for creating the train/val splits and shuffling the dataset.""" + """The seed to use for shuffling the dataset.""" + num_workers: int = 8 + """The number of workers to use for the dataloaders.""" tokenizer: Optional[Tokenizer] = field(default=None, init=False, repr=False) batch_size: int = field(default=1, init=False, repr=False) max_seq_length: int = field(default=-1, init=False, repr=False) - train_dataset: Optional[torch.utils.data.Dataset] = field(default=None, init=False, repr=False) - test_dataset: Optional[torch.utils.data.Dataset] = field(default=None, init=False, repr=False) + + def __post_init__(self) -> None: + self.data_path_train = self.data_path / "train" + self.data_path_val = self.data_path / "val" def connect(self, tokenizer: Optional[Tokenizer] = None, batch_size: int = 1, max_seq_length: int = -1) -> None: self.tokenizer = tokenizer self.batch_size = batch_size - self.max_seq_length = max_seq_length + self.max_seq_length = max_seq_length + 1 # Increase by one because we need the next token as well def prepare_data(self) -> None: - download(self.path) - assert self.tokenizer is not None - pretokenize(self.path, self.tokenizer) - - def setup(self, stage: str = "") -> None: - # the .bin files are right along the .json files - bin_dir = self.path / "TinyStories_all_data" - shard_filenames = sorted(glob.glob(str(bin_dir / "*.bin"))) - assert len(shard_filenames) > 0, f"No bin files found in {bin_dir}" - assert len(shard_filenames) > 1, f"Expected at least two bins in {bin_dir}" + from litdata import optimize + + download(self.data_path) + + files = sorted(glob.glob(str(self.data_path / "TinyStories_all_data" / "*.json"))) + assert len(files) > 0, f"No json files found in {files}" + assert len(files) > 1, f"Expected at least two json files in {files}" # train/test split. let's use only shard 0 for test split, rest train - va_files, *train_files = shard_filenames - # shuffle the training files - random.Random(self.seed).shuffle(train_files) - self.train_dataset = ConcatDataset([PretokDataset(f, self.max_seq_length) for f in train_files]) - self.val_dataset = PretokDataset(shard_filenames[0], self.max_seq_length) + val_files, *train_files = files + num_workers = os.cpu_count() - 1 + + if not Path(self.data_path_train).is_dir(): + optimize( + fn=partial(tokenize, tokenizer=self.tokenizer), + inputs=train_files, + output_dir=str(self.data_path_train), + num_workers=num_workers, + chunk_bytes="200MB", + ) + if not Path(self.data_path_val).is_dir(): + optimize( + fn=partial(tokenize, tokenizer=self.tokenizer), + inputs=val_files, + output_dir=str(self.data_path_val), + num_workers=num_workers, + chunk_bytes="200MB", + ) def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, batch_size=self.batch_size, pin_memory=True, shuffle=True, num_workers=self.num_workers + from litdata.streaming import StreamingDataLoader, StreamingDataset, TokensLoader + + train_dataset = StreamingDataset( + input_dir=str(self.data_path_train), + item_loader=TokensLoader(block_size=self.max_seq_length), + shuffle=True, + drop_last=True, + ) + train_dataloader = StreamingDataLoader( + train_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True ) + return train_dataloader def val_dataloader(self) -> DataLoader: - return DataLoader( - self.val_dataset, - batch_size=self.batch_size, - pin_memory=True, - shuffle=True, # llama2.c shuffles validation too - num_workers=self.num_workers, + from litdata.streaming import StreamingDataset, TokensLoader + + val_dataset = StreamingDataset( + input_dir=str(self.data_path_val), + item_loader=TokensLoader(block_size=self.max_seq_length), + shuffle=True, + # Consider setting to False, but we would lose some samples due to truncation when world size > 1 + drop_last=True, ) + val_dataloader = DataLoader( + val_dataset, batch_size=self.batch_size, pin_memory=True, num_workers=self.num_workers, drop_last=True + ) + return val_dataloader + + +def tokenize(filename: str, tokenizer: Tokenizer): + with open(filename, "r") as f: + data = json.load(f) + global_rank = int(os.environ["DATA_OPTIMIZER_GLOBAL_RANK"]) + num_workers = int(os.environ["DATA_OPTIMIZER_NUM_WORKERS"]) + local_rank = global_rank % num_workers + for example in tqdm(data, position=local_rank): + text = example["story"] + text = text.strip() # get rid of leading/trailing whitespace + tokens = tokenizer.encode(text, bos=True, eos=False) # encode the text, use BOS + yield tokens _URL = "https://huggingface.co/datasets/roneneldan/TinyStories/resolve/main/TinyStories_all_data.tar.gz" @@ -84,98 +121,20 @@ def val_dataloader(self) -> DataLoader: def download(data_dir: Path): data_dir.mkdir(exist_ok=True) + data_dir = data_dir / "TinyStories_all_data" + shard_filenames = sorted(glob.glob(str(data_dir / "*.json"))) + if shard_filenames: + print(f"{data_dir} already exists, skipping unpacking...") + return + # download the TinyStories dataset, unless it's already downloaded data_filename = data_dir / "TinyStories_all_data.tar.gz" download_if_missing(data_filename, _URL, stream=True, mode="wb") print("Download done.") # unpack the tar.gz file into all the data shards (json files) - data_dir = data_dir / "TinyStories_all_data" + data_dir.mkdir(exist_ok=True) + print(f"Unpacking {data_filename}...") + os.system(f"tar -xzf {data_filename} -C {data_dir}") shard_filenames = sorted(glob.glob(str(data_dir / "*.json"))) - if shard_filenames: - print(f"{data_dir} already exists, skipping unpacking...") - else: - data_dir.mkdir(exist_ok=True) - print(f"Unpacking {data_filename}...") - os.system(f"tar -xzf {data_filename} -C {data_dir}") - shard_filenames = sorted(glob.glob(str(data_dir / "*.json"))) - print(f"Number of shards: {len(shard_filenames)}") - # print a single example just for debugging and such - # with open(shard_filenames[0], "r") as f: - # data = json.load(f) - # print(f"Example story:\n{data[0]}") - - -def process_shard(args, tokenizer): - shard_id, shard = args - with open(shard, "r") as f: - data = json.load(f) - all_tokens = [] - for example in tqdm(data, position=shard_id): - text = example["story"] - text = text.strip() # get rid of leading/trailing whitespace - tokens = tokenizer.encode(text, bos=True, eos=False) # encode the text, use BOS - all_tokens.extend(tokens) - # convert to uint16 nparray - all_tokens = np.array(all_tokens, dtype=np.uint16) - # just save the tokenized file in the same dir - tokenized_filename = shard.replace(".json", ".bin") - # write the bytes - with open(tokenized_filename, "wb") as f: - f.write(all_tokens.tobytes()) - # calculate the average sequence length (they are separated by BOS=1) - bos_id = tokenizer.bos_id - assert bos_id >= 0 # uint16 is unsigned - bos_tokens = (all_tokens == tokenizer.bos_id).sum() - assert bos_tokens > 0 - avg_seq_len = all_tokens.size / bos_tokens - print( - f"Saved {tokenized_filename}, tokens: {all_tokens.size}, bos: {bos_tokens}, average seqlen: {avg_seq_len:.2f}" - ) - - -def pretokenize(data_dir: Path, tokenizer: Tokenizer): - bins_path = str(data_dir / "TinyStories_all_data" / "*.bin") - shard_filenames = sorted(glob.glob(bins_path)) - if shard_filenames: - print("Already pretokenized.") - return - # iterate the shards and tokenize all of them one by one - jsons_path = str(data_dir / "TinyStories_all_data" / "*.json") - shard_filenames = sorted(glob.glob(jsons_path)) - if not shard_filenames: - raise ValueError(f"No json files found in {jsons_path!r}. Did you run `python tinystories.py download`?") - # process all the shards in a process pool - fun = partial(process_shard, tokenizer=tokenizer) - with ProcessPoolExecutor() as executor: - executor.map(fun, enumerate(shard_filenames)) - print("Done.") - - -class PretokDataset(torch.utils.data.Dataset): - """Loads a pre-tokenized array from disk and returns chunks of `max_seq_length` length.""" - - def __init__(self, filepath: str, max_seq_len: int): - super().__init__() - self.filepath = filepath - # open the dataset for reading but keep it on disk with memmap - self.shard = np.memmap(filepath, dtype=np.uint16, mode="r") - self.shard_length = len(self.shard) - self.length = self.shard_length // max_seq_len - assert max_seq_len > 1 - self.max_seq_len = max_seq_len - - def __len__(self) -> int: - return self.length - - def __getitem__(self, ix: int) -> torch.Tensor: - if ix < 0: - raise NotImplementedError - start = ix * self.max_seq_len - end = start + self.max_seq_len + 1 - if end > self.shard_length: - raise IndexError - # calling .astype will copy the data into a new numpy array, now in RAM - chunk = torch.from_numpy((self.shard[start:end]).astype(np.int64)) - return chunk diff --git a/tests/data/test_tinystories.py b/tests/data/test_tinystories.py index 8e9a94dd0a..c67cc8e081 100644 --- a/tests/data/test_tinystories.py +++ b/tests/data/test_tinystories.py @@ -1,29 +1,26 @@ import json -from contextlib import redirect_stdout -from io import StringIO -import numpy as np import pytest import torch +from litdata import optimize +from litdata.streaming import StreamingDataset, TokensLoader from torch.utils._pytree import tree_map -from torch.utils.data import ConcatDataset -from litgpt.data.tinystories import PretokDataset, TinyStories, process_shard +def fake_chunk(path, data): + def fn(_): + for story in data: + yield torch.tensor(story) -def fake_bin(tmp_path, data, name): - all_tokens = np.array(data, dtype=np.uint16) - data_path = tmp_path / f"{name}.bin" - with open(data_path, "wb") as f: - f.write(all_tokens.tobytes()) - return data_path + optimize(fn=fn, inputs=[None] * len(data), output_dir=str(path), num_workers=1, chunk_bytes="200MB") +@pytest.mark.xfail(raises=IndexError, strict=True) # requires https://github.com/Lightning-AI/litdata/pull/77 @pytest.mark.parametrize( ("max_seq_len", "expected"), [ - (2, [[0, 23, 15], [15, 63, 0], [0, 73, 5], [5, 0, 1], [1, 1999, 0]]), - (5, [[0, 23, 15, 63, 0, 73], [73, 5, 0, 1, 1999, 0]]), + (2, [[0, 23, 15], [63, 0, 73], [5, 0, 1], [1999, 0, 13]]), + (5, [[0, 23, 15, 63, 0, 73], [5, 0, 1, 1999, 0, 13]]), (6, [[0, 23, 15, 63, 0, 73, 5]]), (7, [[0, 23, 15, 63, 0, 73, 5, 0]]), ], @@ -31,14 +28,18 @@ def fake_bin(tmp_path, data, name): def test_pretok_dataset(tmp_path, max_seq_len, expected): fake_data = [0, 23, 15, 63, 0, 73, 5, 0, 1, 1999, 0, 13] assert len(fake_data) == 12 - bin_path = fake_bin(tmp_path, fake_data, "data") + fake_chunk(tmp_path, [fake_data]) - dataset = PretokDataset(str(bin_path), max_seq_len) + dataset = StreamingDataset( + input_dir=str(tmp_path), item_loader=TokensLoader(block_size=max_seq_len + 1), shuffle=False, drop_last=False + ) actual = tree_map(torch.Tensor.tolist, list(dataset)) assert actual == expected -def test_process_shard(tmp_path): +def test_tokenize(tmp_path, monkeypatch): + from litgpt.data.tinystories import tokenize + story1, story2 = "foo bar", " fun " data = [{"story": story1}, {"story": story2}] shard_path = tmp_path / "data.json" @@ -53,38 +54,43 @@ def encode(self, text, bos, eos): assert not eos return [self.bos_id] + [ord(c) for c in text] - out = StringIO() - with redirect_stdout(out): - process_shard((0, str(shard_path)), Tokenizer()) - - text = out.getvalue() - assert text.endswith("data.bin, tokens: 12, bos: 2, average seqlen: 6.00\n") - assert shard_path.with_suffix(".bin").exists() + monkeypatch.setenv("DATA_OPTIMIZER_GLOBAL_RANK", "0") + monkeypatch.setenv("DATA_OPTIMIZER_NUM_WORKERS", "1") + data = tokenize(str(shard_path), Tokenizer()) + assert list(data) == [[0, 102, 111, 111, 32, 98, 97, 114], [0, 102, 117, 110]] def test_tinystories_datamodule(tmp_path): - datamodule = TinyStories(tmp_path, seed=42) - datamodule.connect(max_seq_length=2) + from litgpt.data.tinystories import TinyStories - data_dir = tmp_path / "TinyStories_all_data" - data_dir.mkdir() - fake_bin(data_dir, [12], "0") - fake_bin(data_dir, [0, 23, 15, 63, 0], "1") - fake_bin(data_dir, [73, 5, 0, 1, 1999, 0, 13], "2") + data_dir = tmp_path / "tinystories" - datamodule.setup() + datamodule = TinyStories(data_dir, seed=42) + datamodule.connect(max_seq_length=2) - assert isinstance(datamodule.train_dataset, ConcatDataset) - assert len(datamodule.train_dataset.datasets) == 2 - assert isinstance(datamodule.train_dataset.datasets[0], PretokDataset) - # unordered because it shuffled - assert datamodule.train_dataset.datasets[0].filepath == str(data_dir / "2.bin") - assert datamodule.train_dataset.datasets[1].filepath == str(data_dir / "1.bin") + # simulate `datamodule.prepare_data` + train_data_dir = data_dir / "train" + train_data_dir.mkdir(parents=True) + fake_chunk(train_data_dir, [[12], [0, 23, 15, 63, 0], [73, 5, 0, 1, 1999, 0, 13]]) - assert isinstance(datamodule.val_dataset, PretokDataset) - assert datamodule.val_dataset.filepath == str(data_dir / "0.bin") + datamodule.setup() tr_dataloader = datamodule.train_dataloader() torch.manual_seed(0) actual = tree_map(torch.Tensor.tolist, list(tr_dataloader)) - assert actual == [[[0, 1, 1999]], [[15, 63, 0]], [[1999, 0, 13]], [[0, 23, 15]], [[73, 5, 0]]] + # there is 1 sample per index in the data (13) + assert actual == [ + [[1999, 0, 13]], + [[0, 13, 12]], + [[1, 1999, 0]], + [[63, 0, 73]], + [[5, 0, 1]], + [[0, 73, 5]], + [[0, 23, 15]], + [[0, 1, 1999]], + [[15, 63, 0]], + [[73, 5, 0]], + [[12, 0, 23]], + [[23, 15, 63]], + [[13, 12, 0]], + ] From af8a39dc7b39847700b08b32bfb922efba5c6adc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Wed, 27 Mar 2024 03:10:38 +0100 Subject: [PATCH 18/37] Fix tests in MacOS/Windows --- tests/data/test_tinystories.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/data/test_tinystories.py b/tests/data/test_tinystories.py index c67cc8e081..d0318995bf 100644 --- a/tests/data/test_tinystories.py +++ b/tests/data/test_tinystories.py @@ -7,12 +7,13 @@ from torch.utils._pytree import tree_map -def fake_chunk(path, data): - def fn(_): - for story in data: - yield torch.tensor(story) +def tokenize(data): + for story in data: + yield torch.tensor(story) + - optimize(fn=fn, inputs=[None] * len(data), output_dir=str(path), num_workers=1, chunk_bytes="200MB") +def fake_chunk(path, data): + optimize(fn=tokenize, inputs=[data] * len(data), output_dir=str(path), num_workers=1, chunk_bytes="200MB") @pytest.mark.xfail(raises=IndexError, strict=True) # requires https://github.com/Lightning-AI/litdata/pull/77 From a7acf996ef163756b96d792fbd24229b4bed4578 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Wed, 27 Mar 2024 19:57:08 +0100 Subject: [PATCH 19/37] Use tinystories in `pretrain/debug.yaml` (#1203) --- config_hub/pretrain/debug.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config_hub/pretrain/debug.yaml b/config_hub/pretrain/debug.yaml index 346f4111b8..77ad6b13ad 100644 --- a/config_hub/pretrain/debug.yaml +++ b/config_hub/pretrain/debug.yaml @@ -20,7 +20,7 @@ initial_checkpoint_dir: resume: false # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``. -data: OpenWebText +data: TinyStories # Training-related arguments. See ``litgpt.args.TrainArgs`` for details train: From 84a73fd031892b9c442e645310602ccfce9c4d28 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Wed, 27 Mar 2024 15:17:05 -0500 Subject: [PATCH 20/37] Updated gemma configs (#1205) --- config_hub/finetune/README.md | 9 +- config_hub/finetune/gemma-2b/full.yaml | 8 +- config_hub/finetune/gemma-2b/lora.yaml | 4 +- config_hub/finetune/gemma-2b/qlora.yaml | 2 +- config_hub/finetune/gemma-7b/lora.yaml | 122 ++++++++++++++++++++++++ config_hub/finetune/gemma-7b/qlora.yaml | 122 ++++++++++++++++++++++++ 6 files changed, 257 insertions(+), 10 deletions(-) create mode 100644 config_hub/finetune/gemma-7b/lora.yaml create mode 100644 config_hub/finetune/gemma-7b/qlora.yaml diff --git a/config_hub/finetune/README.md b/config_hub/finetune/README.md index 05a1dbeab6..c31a862380 100644 --- a/config_hub/finetune/README.md +++ b/config_hub/finetune/README.md @@ -11,9 +11,12 @@ For more information, see the [Dealing with out-of-memory (OOM) errors](../../tu | falcon-7b/lora.yaml | 7B | Alpaca 2k | 4 | 0.945 | 16.69 GB | 512 | 2 | bfloat16 | 24.88 min (1xA10G) | | falcon-7b/qlora.yaml | 7B | Alpaca 2k | 4 | 0.993 | 9.44 GB | 512 | 2 | bfloat16 | 50.76 min (1xA10G) | | | | | | | | | | | | -| gemma-2b/lora.yaml | 2B | Alpaca 2k | 3 | 1.476 | 12.62 GB | 512 | 2 | bfloat16 | 18.31 min (1xA10G) | -| gemma-2b/qlora.yaml | 2B | Alpaca 2k | 3 | 1.626 | 11.51 GB | 512 | 2 | bfloat16 | 25.29 min (1xA10G) | -| gemma-2b/full.yaml | 2B | Alpaca 2k | 0.35 | 1.046 | 18.47 GB | 512 | 2 | bfloat16 | 16.79 min (2xA10G) | +| gemma-2b/lora.yaml | 2B | Alpaca 2k | 2 | 1.476 | 12.62 GB | 512 | 2 | bfloat16 | 9.29 min (1xA10G) | +| gemma-2b/qlora.yaml | 2B | Alpaca 2k | 2 | 0.981 | 11.59 GB | 512 | 2 | bfloat16 | 12.90 min (1xA10G) | +| gemma-2b/full.yaml | 2B | Alpaca 2k | 0.35 | 0.990 | 17.43 GB | 512 | 1 | bfloat16 | 13.61 min (4xA10G) | +| | | | | | | | | | | +| gemma-7b/lora.yaml | 7B | Alpaca 2k | 2 | 0.903 | 25.30 GB | 512 | 1 | bfloat16 | 11.47 min (1xA100) | +| gemma-7b/qlora.yaml | 7B | Alpaca 2k | 2 | 0.951 | 17.31 GB | 512 | 1 | bfloat16 | 23.46 min (1xA100) | | | | | | | | | | | | | llama-2-7b/lora.yaml | 7B | Alpaca 2k | 4 | 0.802 | 19.77 GB | 512 | 2 | bfloat16 | 32.75 min (A10G) | | llama-2-7b/qlora.yaml | 7B | Alpaca 2k | 4 | 0.814 | 13.68 GB | 512 | 2 | bfloat16 | 45.68 min (A10G) | diff --git a/config_hub/finetune/gemma-2b/full.yaml b/config_hub/finetune/gemma-2b/full.yaml index 509a2675e4..77f20658ca 100644 --- a/config_hub/finetune/gemma-2b/full.yaml +++ b/config_hub/finetune/gemma-2b/full.yaml @@ -9,7 +9,7 @@ out_dir: out/finetune/full-gemma-2b precision: bf16-true # How many devices/GPUs to use. (type: Union[int, str], default: 1) -devices: 1 +devices: 4 # Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``. data: @@ -32,7 +32,7 @@ train: log_interval: 1 # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 128) - global_batch_size: 6 + global_batch_size: 16 # Number of samples per data-parallel rank (type: int, default: 4) micro_batch_size: 1 @@ -41,13 +41,13 @@ train: lr_warmup_steps: 100 # Number of epochs to train on (type: Optional[int], default: 5) - epochs: 3 + epochs: 1 # Total number of tokens to train on (type: Optional[int], default: null) max_tokens: # Limits the number of optimizer steps to run. (type: Optional[int], default: null) - max_steps: + max_steps: 50 # Limits the length of samples. Off by default (type: Optional[int], default: null) max_seq_length: 512 diff --git a/config_hub/finetune/gemma-2b/lora.yaml b/config_hub/finetune/gemma-2b/lora.yaml index 72d56fc22b..c9f912a47c 100644 --- a/config_hub/finetune/gemma-2b/lora.yaml +++ b/config_hub/finetune/gemma-2b/lora.yaml @@ -15,7 +15,7 @@ quantize: devices: 1 # The LoRA rank. (type: int, default: 8) -lora_r: 16 +lora_r: 8 # The LoRA alpha. (type: int, default: 16) lora_alpha: 16 @@ -71,7 +71,7 @@ train: lr_warmup_steps: 200 # Number of epochs to train on (type: Optional[int], default: 5) - epochs: 4 + epochs: 2 # Total number of tokens to train on (type: Optional[int], default: null) max_tokens: diff --git a/config_hub/finetune/gemma-2b/qlora.yaml b/config_hub/finetune/gemma-2b/qlora.yaml index 4c26c9cee8..dc15fe90d3 100644 --- a/config_hub/finetune/gemma-2b/qlora.yaml +++ b/config_hub/finetune/gemma-2b/qlora.yaml @@ -71,7 +71,7 @@ train: lr_warmup_steps: 200 # Number of epochs to train on (type: Optional[int], default: 5) - epochs: 4 + epochs: 2 # Total number of tokens to train on (type: Optional[int], default: null) max_tokens: diff --git a/config_hub/finetune/gemma-7b/lora.yaml b/config_hub/finetune/gemma-7b/lora.yaml new file mode 100644 index 0000000000..d7d56f5b5c --- /dev/null +++ b/config_hub/finetune/gemma-7b/lora.yaml @@ -0,0 +1,122 @@ + +# The path to the base model's checkpoint directory to load for finetuning. (type: , default: checkpoints/stabilityai/stablelm-base-alpha-3b) +checkpoint_dir: checkpoints/google/gemma-7b + +# Directory in which to save checkpoints and logs. (type: , default: out/lora) +out_dir: out/finetune/qlora-gemma-7b + +# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null) +precision: bf16-true + +# If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information. (type: Optional[Literal['nf4', 'nf4-dq', 'fp4', 'fp4-dq', 'int8-training']], default: null) +quantize: + +# How many devices/GPUs to use. (type: Union[int, str], default: 1) +devices: 1 + +# The LoRA rank. (type: int, default: 8) +lora_r: 16 + +# The LoRA alpha. (type: int, default: 16) +lora_alpha: 16 + +# The LoRA dropout value. (type: float, default: 0.05) +lora_dropout: 0.1 + +# Whether to apply LoRA to the query weights in attention. (type: bool, default: True) +lora_query: true + +# Whether to apply LoRA to the key weights in attention. (type: bool, default: False) +lora_key: true + +# Whether to apply LoRA to the value weights in attention. (type: bool, default: True) +lora_value: true + +# Whether to apply LoRA to the output projection in the attention block. (type: bool, default: False) +lora_projection: true + +# Whether to apply LoRA to the weights of the MLP in the attention block. (type: bool, default: False) +lora_mlp: true + +# Whether to apply LoRA to output head in GPT. (type: bool, default: False) +lora_head: true + +# Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``. +data: + class_path: litgpt.data.Alpaca2k + init_args: + mask_prompt: false + val_split_fraction: 0.03847 + prompt_style: alpaca + ignore_index: -100 + seed: 42 + num_workers: 4 + +# Training-related arguments. See ``litgpt.args.TrainArgs`` for details +train: + + # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000) + save_interval: 800 + + # Number of iterations between logging calls (type: int, default: 1) + log_interval: 1 + + # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 128) + global_batch_size: 6 + + # Number of samples per data-parallel rank (type: int, default: 4) + micro_batch_size: 1 + + # Number of iterations with learning rate warmup active (type: int, default: 100) + lr_warmup_steps: 200 + + # Number of epochs to train on (type: Optional[int], default: 5) + epochs: 2 + + # Total number of tokens to train on (type: Optional[int], default: null) + max_tokens: + + # Limits the number of optimizer steps to run. (type: Optional[int], default: null) + max_steps: + + # Limits the length of samples. Off by default (type: Optional[int], default: null) + max_seq_length: 512 + + # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: null) + tie_embeddings: + + # (type: float, default: 0.0003) + learning_rate: 0.0002 + + # (type: float, default: 0.02) + weight_decay: 0.0 + + # (type: float, default: 0.9) + beta1: 0.9 + + # (type: float, default: 0.95) + beta2: 0.95 + + # (type: Optional[float], default: null) + max_norm: + + # (type: float, default: 6e-05) + min_lr: 6.0e-05 + +# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details +eval: + + # Number of optimizer steps between evaluation calls (type: int, default: 100) + interval: 25 + + # Number of tokens to generate (type: Optional[int], default: 100) + max_new_tokens: 100 + + # Number of iterations (type: int, default: 100) + max_iters: 100 + +# The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: csv) +logger_name: csv + +# The random seed to use for reproducibility. (type: int, default: 1337) +seed: 1337 diff --git a/config_hub/finetune/gemma-7b/qlora.yaml b/config_hub/finetune/gemma-7b/qlora.yaml new file mode 100644 index 0000000000..7d4a2c634c --- /dev/null +++ b/config_hub/finetune/gemma-7b/qlora.yaml @@ -0,0 +1,122 @@ + +# The path to the base model's checkpoint directory to load for finetuning. (type: , default: checkpoints/stabilityai/stablelm-base-alpha-3b) +checkpoint_dir: checkpoints/google/gemma-7b + +# Directory in which to save checkpoints and logs. (type: , default: out/lora) +out_dir: out/finetune/qlora-gemma-7b + +# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null) +precision: bf16-true + +# If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information. (type: Optional[Literal['nf4', 'nf4-dq', 'fp4', 'fp4-dq', 'int8-training']], default: null) +quantize: bnb.nf4 + +# How many devices/GPUs to use. (type: Union[int, str], default: 1) +devices: 1 + +# The LoRA rank. (type: int, default: 8) +lora_r: 16 + +# The LoRA alpha. (type: int, default: 16) +lora_alpha: 16 + +# The LoRA dropout value. (type: float, default: 0.05) +lora_dropout: 0.1 + +# Whether to apply LoRA to the query weights in attention. (type: bool, default: True) +lora_query: true + +# Whether to apply LoRA to the key weights in attention. (type: bool, default: False) +lora_key: true + +# Whether to apply LoRA to the value weights in attention. (type: bool, default: True) +lora_value: true + +# Whether to apply LoRA to the output projection in the attention block. (type: bool, default: False) +lora_projection: true + +# Whether to apply LoRA to the weights of the MLP in the attention block. (type: bool, default: False) +lora_mlp: true + +# Whether to apply LoRA to output head in GPT. (type: bool, default: False) +lora_head: true + +# Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``. +data: + class_path: litgpt.data.Alpaca2k + init_args: + mask_prompt: false + val_split_fraction: 0.03847 + prompt_style: alpaca + ignore_index: -100 + seed: 42 + num_workers: 4 + +# Training-related arguments. See ``litgpt.args.TrainArgs`` for details +train: + + # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000) + save_interval: 800 + + # Number of iterations between logging calls (type: int, default: 1) + log_interval: 1 + + # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 128) + global_batch_size: 6 + + # Number of samples per data-parallel rank (type: int, default: 4) + micro_batch_size: 1 + + # Number of iterations with learning rate warmup active (type: int, default: 100) + lr_warmup_steps: 200 + + # Number of epochs to train on (type: Optional[int], default: 5) + epochs: 2 + + # Total number of tokens to train on (type: Optional[int], default: null) + max_tokens: + + # Limits the number of optimizer steps to run. (type: Optional[int], default: null) + max_steps: + + # Limits the length of samples. Off by default (type: Optional[int], default: null) + max_seq_length: 512 + + # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: null) + tie_embeddings: + + # (type: float, default: 0.0003) + learning_rate: 0.0002 + + # (type: float, default: 0.02) + weight_decay: 0.0 + + # (type: float, default: 0.9) + beta1: 0.9 + + # (type: float, default: 0.95) + beta2: 0.95 + + # (type: Optional[float], default: null) + max_norm: + + # (type: float, default: 6e-05) + min_lr: 6.0e-05 + +# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details +eval: + + # Number of optimizer steps between evaluation calls (type: int, default: 100) + interval: 25 + + # Number of tokens to generate (type: Optional[int], default: 100) + max_new_tokens: 100 + + # Number of iterations (type: int, default: 100) + max_iters: 100 + +# The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: csv) +logger_name: csv + +# The random seed to use for reproducibility. (type: int, default: 1337) +seed: 1337 From a67dd5c743aa273babf5daeff890f99955d871af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 28 Mar 2024 00:42:19 +0100 Subject: [PATCH 21/37] Smarter `thunder.jit` decisions (#1204) --- extensions/thunder/strategies/thunder_ddp.py | 39 +++++++- extensions/thunder/strategies/thunder_fsdp.py | 88 ++++++++++++++++--- tests/test_thunder_ddp.py | 44 +++++++++- tests/test_thunder_fsdp.py | 37 ++++++++ 4 files changed, 193 insertions(+), 15 deletions(-) diff --git a/extensions/thunder/strategies/thunder_ddp.py b/extensions/thunder/strategies/thunder_ddp.py index 2afa7290e1..4efbe27c60 100644 --- a/extensions/thunder/strategies/thunder_ddp.py +++ b/extensions/thunder/strategies/thunder_ddp.py @@ -45,17 +45,35 @@ def __init__( cluster_environment: Optional[ClusterEnvironment] = None, checkpoint_io: Optional[CheckpointIO] = None, precision: Optional[Precision] = None, + jit: bool = True, executors: Optional[Tuple[Union["Executor", str], ...]] = None, process_group_backend: Optional[str] = None, timeout: Optional[timedelta] = default_pg_timeout, **kwargs: Any, ): + r"""Strategy for Replicated Data Parallel provided by Lightning Thunder. + + .. warning:: This is an :ref:`experimental ` feature. + + Arguments: + jit: Whether to automatically call ``thunder.jit(model)`` if necessary. Disable this if you are manually + jitting a function that includes the model. + + executors: The list of Thunder executors to enable. They can be either string aliases for the executors + or the actual executor instances. + + \**kwargs: See available parameters in :func:`thunder.distributed.ddp`. + + """ if not _THUNDER_AVAILABLE: raise ModuleNotFoundError(str(_THUNDER_AVAILABLE)) super().__init__(accelerator=accelerator, checkpoint_io=checkpoint_io, precision=precision) self.parallel_devices = parallel_devices self.cluster_environment: Optional[ClusterEnvironment] = cluster_environment + if not jit and executors is not None: + raise ValueError(f"Passing executors={executors} doesn't have an effect with `jit={jit}`") + self.jit = jit self.executors = _validate_executors(executors) self._num_nodes = 1 self._process_group_backend: Optional[str] = process_group_backend @@ -111,8 +129,25 @@ def setup_environment(self) -> None: def setup_module(self, module: Module) -> Module: import thunder - module = thunder.distributed.ddp(module, **self._ddp_kwargs) - + if (cd := thunder.compile_data(module)) is not None: + # the module was already jitted + if thunder.compile_stats(module).last_traces is not None: + raise RuntimeError( + "You already called `thunder.jit()` and generated an execution trace. It's too late to apply the" + " DDP transform. Remove the `forward` call before `fabric.setup()`" + ) + assert cd.is_module # sanity check + ddp_module = thunder.distributed.ddp(cd.fn, **self._ddp_kwargs) + # update the compile data state + cd.fn = ddp_module + assert hasattr(cd, "_processed_function") # sanity check + cd._processed_function = ddp_module + cd.process_group_for_ddp = ddp_module.process_group_for_ddp + return module + else: + module = thunder.distributed.ddp(module, **self._ddp_kwargs) + if not self.jit: + return module return thunder.jit(module, executors=self.executors) @override diff --git a/extensions/thunder/strategies/thunder_fsdp.py b/extensions/thunder/strategies/thunder_fsdp.py index 6fd2200d70..d4e60c0085 100644 --- a/extensions/thunder/strategies/thunder_fsdp.py +++ b/extensions/thunder/strategies/thunder_fsdp.py @@ -54,12 +54,54 @@ def __init__( cluster_environment: Optional[ClusterEnvironment] = None, checkpoint_io: Optional[CheckpointIO] = None, precision: Optional[Precision] = None, + jit: bool = True, + executors: Optional[Tuple[Union["Executor", str], ...]] = None, sharding_strategy: "_FSDP_TYPE" = "ZERO3", bucketing_strategy: "_BUCKETING_STRATEGY" = "NONE", - executors: Optional[Tuple[Union["Executor", str], ...]] = None, state_dict_type: Literal["full", "sharded"] = "sharded", **kwargs: Any, ): + r"""Strategy for Fully Sharded Data Parallel provided by Lightning Thunder. + + .. warning:: This is an :ref:`experimental ` feature. + + Fully Sharded Training shards the entire model across all available GPUs, allowing you to scale model + size, whilst using efficient communication to reduce overhead. In practice, this means we can remain + at parity with PyTorch DDP, whilst scaling our model sizes dramatically. + + Arguments: + jit: Whether to automatically call ``thunder.jit(model)`` if necessary. Disable this if you are manually + jitting a function that includes the model. + + executors: The list of Thunder executors to enable. They can be either string aliases for the executors + or the actual executor instances. + + sharding_strategy: Select whether to shard model parameters, gradients, optimizer states, or a combination + of them: + + - ``"ZERO3"``: Shards model parameters, gradients, and optimizer states (default). + - ``"ZERO2"``: Shards gradients and optimizer states only. Model parameters get replicated. + + Also accepts a :class:`thunder.distributed.FSDPType` enum value. + + bucketing_strategy: Enables combining the collective operations for sets of layers. + + - ``"NONE"``: No bucketing (default). + - ``"LAYER"``: Create buckets per layer class. + - ``"BLOCK"``: Create buckets per layer block. + + Also accepts a :class:`thunder.distributed.FSDPBucketingStrategy` enum value. + + state_dict_type: The format in which the state of the model and optimizers gets saved into the checkpoint. + + - ``"full"``: The full weights and optimizer states get assembled on rank 0 and saved to a single file + (default). + - ``"sharded"``: Each rank saves its shard of weights and optimizer states to a file. The checkpoint is + a folder with as many files as the world size. + + \**kwargs: See available parameters in :func:`thunder.distributed.fsdp`. + + """ if not _TORCH_GREATER_EQUAL_2_2: raise ImportError("Thunder's FSDP strategy requires PyTorch 2.2 or higher.") if not _THUNDER_AVAILABLE: @@ -77,6 +119,9 @@ def __init__( if isinstance(bucketing_strategy, str) else bucketing_strategy ) + if not jit and executors is not None: + raise ValueError(f"Passing executors={executors} doesn't have an effect with `jit={jit}`") + self.jit = jit self.executors = _validate_executors(executors) self._state_dict_type = state_dict_type self._fsdp_kwargs = kwargs @@ -115,16 +160,37 @@ def setup_environment(self) -> None: def setup_module(self, module: Module) -> Module: import thunder - module = thunder.distributed.fsdp( - module, - device=self.root_device, - sharding_strategy=self.sharding_strategy, - bucketing_strategy=self.bucketing_strategy, - **self._fsdp_kwargs, - ) - - # NOTE @IvanYaschuck says that `fsdp(jit(model))` could be supported in the future so that the user owns the `jit` call. - # we would still `jit(fsdp(undo_jit(jit(model))))` internally + if (cd := thunder.compile_data(module)) is not None: + # the module was already jitted + if thunder.compile_stats(module).last_traces is not None: + raise RuntimeError( + "You already called `thunder.jit()` and generated an execution trace. It's too late to apply the" + " FSDP transform. Remove the `forward` call before `fabric.setup()`" + ) + assert cd.is_module # sanity check + fsdp_module = thunder.distributed.fsdp( + cd.fn, + device=self.root_device, + sharding_strategy=self.sharding_strategy, + bucketing_strategy=self.bucketing_strategy, + **self._fsdp_kwargs, + ) + # update the compile data state + cd.fn = fsdp_module + assert hasattr(cd, "_processed_function") # sanity check + cd._processed_function = fsdp_module + cd.process_group_for_ddp = fsdp_module.process_group_for_ddp + return module + else: + module = thunder.distributed.fsdp( + module, + device=self.root_device, + sharding_strategy=self.sharding_strategy, + bucketing_strategy=self.bucketing_strategy, + **self._fsdp_kwargs, + ) + if not self.jit: + return module return thunder.jit(module, executors=self.executors) @override diff --git a/tests/test_thunder_ddp.py b/tests/test_thunder_ddp.py index 5ccc853eea..566e883ac3 100644 --- a/tests/test_thunder_ddp.py +++ b/tests/test_thunder_ddp.py @@ -10,13 +10,19 @@ wd = Path(__file__).parent.parent.resolve() sys.path.append(str(wd)) +from extensions.thunder.strategies.thunder_ddp import ThunderDDPStrategy + + +@RunIf(thunder=True) +def test_thunder_strategy_input_parsing(): + with pytest.raises(ValueError, match="doesn't have an effect with `jit=False"): + ThunderDDPStrategy(jit=False, executors=("python",)) + @RunIf(min_cuda_gpus=2, thunder=True, standalone=True) @pytest.mark.parametrize("strategy", ["ddp", "thunder_ddp"]) def test_no_backward_sync(strategy): if strategy == "thunder_ddp": - from extensions.thunder.strategies.thunder_ddp import ThunderDDPStrategy - strategy = ThunderDDPStrategy() fabric = Fabric(devices=2, accelerator="cuda", strategy=strategy) @@ -47,3 +53,37 @@ def test_no_backward_sync(strategy): # rank0 rank1 allreduce1 rank0 rank1 allreduce2 assert model.weight.grad.item() == (9.0 if i == 3 else 22.5) model.weight.grad = None + + +@RunIf(min_cuda_gpus=2, thunder=True, standalone=True) +@pytest.mark.parametrize("jit", (False, True)) +def test_jit_before_setup(jit): + import thunder + + fabric = Fabric(devices=2, accelerator="cuda", strategy=ThunderDDPStrategy(jit=jit)) + fabric.launch() + + x = torch.randn(1, 1, device=fabric.device) + model = torch.nn.Linear(1, 2, bias=False, device=fabric.device) + + tmodel = thunder.jit(model) + fmodel = fabric.setup(tmodel) + fmodel(x) + + assert "all_reduce" in thunder.last_backward_traces(tmodel)[-1].python() + + +@RunIf(min_cuda_gpus=1, thunder=True) +def test_setup_already_traced(): + import thunder + + device = torch.device("cuda") + x = torch.randn(1, 1, device=device) + model = torch.nn.Linear(1, 2, bias=False, device=device) + + strategy = ThunderDDPStrategy() + + tmodel = thunder.jit(model) + tmodel(x) + with pytest.raises(RuntimeError, match="already called"): + strategy.setup_module(tmodel) diff --git a/tests/test_thunder_fsdp.py b/tests/test_thunder_fsdp.py index 76dc36bae6..8b9c0f4340 100644 --- a/tests/test_thunder_fsdp.py +++ b/tests/test_thunder_fsdp.py @@ -28,6 +28,9 @@ def test_thunder_strategy_input_parsing(): assert strategy.executors == (pythonex,) assert strategy.sharding_strategy is FSDPType.ZERO3 + with pytest.raises(ValueError, match="doesn't have an effect with `jit=False"): + ThunderFSDPStrategy(jit=False, executors=("python",)) + @RunIf(thunder=True) def test_validate_executors(): @@ -309,3 +312,37 @@ def test_save_load_sharded_checkpoint(tmp_path): actual["buf"] = actual["buf"].to(device="cpu") torch.testing.assert_close(actual, expected) assert state["primitive"] == 123 + + +@RunIf(min_cuda_gpus=2, thunder=True, standalone=True) +@pytest.mark.parametrize("jit", (False, True)) +def test_jit_before_setup(jit): + import thunder + + fabric = Fabric(devices=2, accelerator="cuda", strategy=ThunderFSDPStrategy(jit=jit)) + fabric.launch() + + x = torch.randn(1, 1, device=fabric.device) + model = torch.nn.Linear(1, 2, bias=False, device=fabric.device) + + tmodel = thunder.jit(model) + fmodel = fabric.setup(tmodel) + fmodel(x) + + assert "all_gather" in thunder.last_traces(tmodel)[-1].python() + + +@RunIf(min_cuda_gpus=1, thunder=True) +def test_setup_already_traced(): + import thunder + + device = torch.device("cuda") + x = torch.randn(1, 1, device=device) + model = torch.nn.Linear(1, 2, bias=False, device=device) + + strategy = ThunderFSDPStrategy() + + tmodel = thunder.jit(model) + tmodel(x) + with pytest.raises(RuntimeError, match="already called"): + strategy.setup_module(tmodel) From 605d9491eb0394cd8d90b7ba906e6e859802c455 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 28 Mar 2024 03:51:09 +0100 Subject: [PATCH 22/37] Download fixes for tinystories --- litgpt/data/tinystories.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/litgpt/data/tinystories.py b/litgpt/data/tinystories.py index b494f3e9ef..90fce42341 100644 --- a/litgpt/data/tinystories.py +++ b/litgpt/data/tinystories.py @@ -52,7 +52,7 @@ def prepare_data(self) -> None: assert len(files) > 0, f"No json files found in {files}" assert len(files) > 1, f"Expected at least two json files in {files}" # train/test split. let's use only shard 0 for test split, rest train - val_files, *train_files = files + val_file, *train_files = files num_workers = os.cpu_count() - 1 if not Path(self.data_path_train).is_dir(): @@ -66,9 +66,9 @@ def prepare_data(self) -> None: if not Path(self.data_path_val).is_dir(): optimize( fn=partial(tokenize, tokenizer=self.tokenizer), - inputs=val_files, + inputs=[val_file], output_dir=str(self.data_path_val), - num_workers=num_workers, + num_workers=1, # there's only 1 file chunk_bytes="200MB", ) @@ -119,8 +119,9 @@ def tokenize(filename: str, tokenizer: Tokenizer): def download(data_dir: Path): - data_dir.mkdir(exist_ok=True) + data_dir.mkdir(exist_ok=True, parents=True) + data_tar = data_dir / "TinyStories_all_data.tar.gz" data_dir = data_dir / "TinyStories_all_data" shard_filenames = sorted(glob.glob(str(data_dir / "*.json"))) if shard_filenames: @@ -128,13 +129,12 @@ def download(data_dir: Path): return # download the TinyStories dataset, unless it's already downloaded - data_filename = data_dir / "TinyStories_all_data.tar.gz" - download_if_missing(data_filename, _URL, stream=True, mode="wb") - print("Download done.") + download_if_missing(data_tar, _URL, stream=True, mode="wb") # unpack the tar.gz file into all the data shards (json files) - data_dir.mkdir(exist_ok=True) - print(f"Unpacking {data_filename}...") - os.system(f"tar -xzf {data_filename} -C {data_dir}") + data_dir.mkdir(exist_ok=False) + tar_command = f"tar -xzf {data_tar} -C {data_dir}" + print(tar_command) + os.system(tar_command) shard_filenames = sorted(glob.glob(str(data_dir / "*.json"))) print(f"Number of shards: {len(shard_filenames)}") From 660d936ece40d9240f2428e35643ab9599c5617d Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Thu, 28 Mar 2024 11:08:12 -0500 Subject: [PATCH 23/37] List supported pretraining model names (#1136) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- litgpt/pretrain.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/litgpt/pretrain.py b/litgpt/pretrain.py index b6f8560861..3119e5d9d6 100644 --- a/litgpt/pretrain.py +++ b/litgpt/pretrain.py @@ -91,7 +91,9 @@ def setup( if model_config is not None and model_name is not None: raise ValueError("Only one of `model_name` or `model_config` can be set.") elif model_config is None and model_name is None: - model_name = "tiny-llama-1.1b" + from litgpt.config import name_to_config + available_models = "\n".join(sorted(name_to_config)) + raise ValueError(f"Please specify --model_name . Available values:\n{available_models}") config = Config.from_name(model_name) if model_config is None else model_config devices = parse_devices(devices) out_dir = init_out_dir(out_dir) From 3cf6493cdc7c90c72c5a5bf12d07490cf798327f Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Thu, 28 Mar 2024 12:02:06 -0500 Subject: [PATCH 24/37] Zero to LitGPT (#1165) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos Mocholí --- README.md | 19 +- tutorials/0_to_litgpt.md | 549 ++++++++++++++++++ tutorials/images/0_to_litgpt/4-commands.webp | Bin 0 -> 33466 bytes tutorials/images/0_to_litgpt/finetune.webp | Bin 0 -> 25058 bytes .../images/0_to_litgpt/instruction-1.webp | Bin 0 -> 84740 bytes .../images/0_to_litgpt/instruction-2.webp | Bin 0 -> 47454 bytes tutorials/images/0_to_litgpt/pretrain.webp | Bin 0 -> 22906 bytes tutorials/images/0_to_litgpt/usage.webp | Bin 0 -> 81938 bytes 8 files changed, 564 insertions(+), 4 deletions(-) create mode 100644 tutorials/0_to_litgpt.md create mode 100644 tutorials/images/0_to_litgpt/4-commands.webp create mode 100644 tutorials/images/0_to_litgpt/finetune.webp create mode 100644 tutorials/images/0_to_litgpt/instruction-1.webp create mode 100644 tutorials/images/0_to_litgpt/instruction-2.webp create mode 100644 tutorials/images/0_to_litgpt/pretrain.webp create mode 100644 tutorials/images/0_to_litgpt/usage.webp diff --git a/README.md b/README.md index 239179af99..a87655d355 100644 --- a/README.md +++ b/README.md @@ -35,13 +35,12 @@ ✅  [Quantization](tutorials/quantize.md): 4-bit floats, 8-bit integers, and double quantization. -✅  [Exporting](https://github.com/Lightning-AI/litgpt/blob/wip/tutorials/convert_lit_models.md) to other popular model weight formats. +✅  [Exporting](tutorials/convert_lit_models.md) to other popular model weight formats. ✅  Many popular datasets for [pretraining](tutorials/pretrain_tinyllama.md) and [finetuning](tutorials/prepare_dataset.md), and [support for custom datasets](tutorials/prepare_dataset.md#preparing-custom-datasets-for-instruction-finetuning). ✅  Readable and easy-to-modify code to experiment with the latest research ideas. -  
  @@ -59,8 +58,6 @@ The following [Lightning Studio](https://lightning.ai/lightning-ai/studios) temp - -  
  @@ -107,6 +104,14 @@ For more information, refer to the [download](tutorials/download_model_weights.m   + +> [!NOTE] +> We recommend starting with the **[Zero to Pretraining, Finetuning, and Using LLMs with LitGPT](https://chat.openai.com/c/tutorial/0_to_litgpt.md)** if you are looking to get started with using LitGPT. + + + +  + ## Finetuning and pretraining LitGPT supports [pretraining](tutorials/pretrain_tinyllama.md) and [finetuning](tutorials/finetune.md) to optimize models on excisting or custom datasets. Below is an example showing how to finetune a model with LoRA: @@ -324,6 +329,12 @@ If you have general questions about building with LitGPT, please [join our Disco ## Tutorials, how-to guides, and docs + +> [!NOTE] +> We recommend starting with the **[Zero to Pretraining, Finetuning, and Using LLMs with LitGPT](https://chat.openai.com/c/tutorial/0_to_litgpt.md)** if you are looking to get started with using LitGPT. + +Tutorials and in-depth feature documentation can be found below: + - Finetuning, incl. LoRA, QLoRA, and Adapters ([tutorials/finetune.md](tutorials/finetune.md)) - Pretraining ([tutorials/pretrain_tinyllama.md](tutorials/pretrain_tinyllama.md)) - Model evaluation ([tutorials/evaluation.md](tutorials/evaluation.md)) diff --git a/tutorials/0_to_litgpt.md b/tutorials/0_to_litgpt.md new file mode 100644 index 0000000000..415190fec6 --- /dev/null +++ b/tutorials/0_to_litgpt.md @@ -0,0 +1,549 @@ +# Zero to LitGPT: Getting Started with Pretraining, Finetuning, and Using LLMs + + + +This tutorial walks you through the main features and usage patterns for ⚡️LitGPT, a library for pretraining, finetuning, and using LLMs that focuses on an efficient user experience while being developer-friendly. + +The topics, following the installation of LitGPT, are in chronological order, reflecting the steps in an LLM lifecycle: Pretraining → Finetuning → Inference. + +  + + + +  + + + +  + +However, it is also possible, and even common, to use and deploy models with LitGPT without pretraining and finetuning. So, if you are not interested in pretraining and finetuning, please feel free to skip these sections. + + + + + +  +## Install LitGPT + +LitGPT is available as a Python library from the PyPI package repository, and we recommend installing it using Python's `pip` installer module, including all required package dependencies: + +```bash +pip install 'litgpt[all]' +``` + +Alternatively, if you are a researcher or developer planning to make changes to LitGPT, you can clone the GitHub repository and install it from a local folder as follows: + +``` +git clone https://github.com/Lightning-AI/litgpt.git +cd litgpt +pip install -e '.[all]' +``` + + +  +## Pretrain LLMs + +Finetuning LLMs requires substantial compute resources and time commitment. For that reason, most researchers and practitioners prefer to skip this step and continue with the Download pretrained model weights section instead. + +However, if you feel adventurous and want to pretrain your own LLM, here's how. + +First, we have to decide which type of model architecture we want to use. We list the available architectures by using the `pretrain` command without any additional arguments: + +```bash +litgpt pretrain +``` + +This prints a list of all available model architectures in alphabetical order: + +``` +Camel-Platypus2-13B +Camel-Platypus2-70B +CodeLlama-13b-Python-hf +... +tiny-llama-1.1b +vicuna-13b-v1.3 +vicuna-13b-v1.5 +vicuna-13b-v1.5-16k +vicuna-33b-v1.3 +vicuna-7b-v1.3 +vicuna-7b-v1.5 +vicuna-7b-v1.5-16k +``` + +Suppose we want to pretraining the 1.1B parameter small `tiny-llama-1.1b` model. Before starting finetuning, we must also choose and download a tokenizer. + +We can download a tokenizer via the `download` command. Note that running `litgpt download` without any additional arguments will also print a list of all available models and tokenizers to download. + +To filter for specific models, e.g., TinyLlama, we can use the `grep` command in our terminal: + +```bash +litgpt download | grep TinyLlama +``` + +This prints + +``` +TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T +TinyLlama/TinyLlama-1.1B-Chat-v1.0 +``` + +Let's now download the tokenizer corresponding to `TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T` that we can then use to pretrain the TinyLlama model, which saves the download tokenizer to a `checkpoints/` folder by default: + +``` +litgpt download \ + --repo_id TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T \ + --tokenizer_only true +``` + +  + + + +  + +Next, we can pretrain the model on the OpenWebText dataset with the default setting as follows: + +```bash +litgpt pretrain \ + --model_name tiny-llama-1.1b \ + --data OpenWebText \ + --tokenizer_dir checkpoints/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T +``` + +If you are interested in additional settings, you can use the help command as follows: + +``` +litgpt pretrain --help +``` + +  + +> [!TIP] +> Above, we only covered the most basic commands for pretraining a model using LitGPT. We highly recommend checking the resources below if you are interested in pretraining a model. + +  + +**More information and additional resources** + +- [tutorials/pretrain_tinyllama](./pretrain_tinyllama.md): A tutorial for finetuning a 1.1B TinyLlama model on 3 trillion tokens +- [config_hub/pretrain](../config_hub/pretrain): Pre-made config files for pretraining that work well out of the box +- Project templates in reproducible environments with multi-GPU and multi-node support: + - [Prepare the TinyLlama 1T token dataset](https://lightning.ai/lightning-ai/studios/prepare-the-tinyllama-1t-token-dataset) + - [Pretrain LLMs - TinyLlama 1.1B](https://lightning.ai/lightning-ai/studios/pretrain-llms-tinyllama-1-1b) + - [Continued Pretraining with TinyLlama 1.1B](https://lightning.ai/lightning-ai/studios/continued-pretraining-with-tinyllama-1-1b) + + +  +## Download pretrained model weights + +Most practical use cases, like LLM inference (/chat) or finetuning, involve using pretrained model weights. LitGPT supports a large number of model weights, which can be listed by executing the `download` command without any additional arguments: + +```bash +litgpt download +``` + +This will print a (long) list of all supported pretrained models (abbreviated for readability below): + +``` +.. +google/gemma-2b +... +meta-llama/Llama-2-7b-hf +... +microsoft/phi-2 +... +mistralai/Mixtral-8x7B-Instruct-v0.1 +... +``` + +To download the model weights, provide one of the model strings above as a `--repo_id` argument: + +```bash +litgpt download --repo_id microsoft/phi-2 +``` + +``` +model-00001-of-00002.safetensors: 100%|████████████████████████████████| 5.00G/5.00G [00:40<00:00, 124MB/s] +model-00002-of-00002.safetensors: 100%|████████████████████████████████| 564M/564M [00:01<00:00, 330MB/s] +tokenizer.json: 100%|██████████████████████████████████████████████████| 2.11M/2.11M [00:00<00:00, 54.0MB/s] +... +Converting checkpoint files to LitGPT format. +Processing checkpoints/microsoft/phi-2/model-00001-of-00002.bin +... +Saving converted checkpoint to checkpoints/microsoft/phi-2 +``` + + +  + +> [!TIP] +> Note that some models, such as Llama 2, require that you accept Meta AI's terms of service for this model, and you need to use a special access token via the `litgpt download ... --access_token ...` option. For more information, visit the respective Model Hub website, e.g., [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf). The access token can be created under your Model Hub in the `Profile > Access Tokens` menu. + +  + + +By default, the weights are going to be stored in a `./checkpoints` subdirectory: + +```bash +ls -lh checkpoints/microsoft/phi-2/ +``` + +``` +total 11G +-rw-r--r-- 1 sebastian sebastian 863 Mar 19 21:14 config.json +-rw-r--r-- 1 sebastian sebastian 124 Mar 19 21:14 generation_config.json +-rw-r--r-- 1 sebastian sebastian 5.2G Mar 19 21:15 lit_model.pth +-rw-r--r-- 1 sebastian sebastian 4.7G Mar 19 21:15 model-00001-of-00002.bin +-rw-r--r-- 1 sebastian sebastian 538M Mar 19 21:15 model-00002-of-00002.bin +-rw-r--r-- 1 sebastian sebastian 528 Mar 19 21:15 model_config.yaml +-rw-r--r-- 1 sebastian sebastian 2.1M Mar 19 21:14 tokenizer.json +-rw-r--r-- 1 sebastian sebastian 7.2K Mar 19 21:14 tokenizer_config.json +``` + +The model is now ready for inference and chat, for example, using the `chat` command on the checkpoint directory: + +```bash +litgpt chat --checkpoint_dir checkpoints/microsoft/phi-2 +``` + +``` +Now chatting with phi-2. +To exit, press 'Enter' on an empty prompt. + +Seed set to 1234 +>> Prompt: Why are LLMs so useful? +>> Reply: When building applications or operating systems, you can use LLMs to know how a computer should respond to your commands. This can make your programs run faster and more efficiently. + +Time for inference: 1.26 sec total, 27.81 tokens/sec, 35 tokens + +>> Prompt: +``` + + +  +**More information and additional resources** + +- [tutorials/download_model_weights](download_model_weights.md): A more comprehensive download tutorial, tips for GPU memory limitations, and more + + +  +## Finetune LLMs + +LitGPT supports several methods of supervised instruction finetuning, which allows you to finetune models to follow instructions. + +Datasets for Instruction-finetuning are usually formatted in the following way: + +  + + + +  + +Alternatively, datasets for instruction finetuning can also contain an `'input'` field: + +In an instruction-finetuning context, "full" finetuning means updating all model parameters as opposed to only a subset. Adapter and LoRA (short for low-rank adaptation) are methods for parameter-efficient finetuning that only require updating a small fraction of the model weights. + +  + + + +  + +Parameter-efficient finetuning is much more resource-efficient and cheaper than full finetuning, and it often results in the same good performance on downstream tasks. + +In the following example, we will use LoRA for finetuning, which is one of the most popular LLM finetuning methods. (For more information on how LoRA works, please see [Code LoRA from Scratch](https://lightning.ai/lightning-ai/studios/code-lora-from-scratch).) + +Before we start, we have to download a model as explained in the previous "Download pretrained model" section above: + +```bash +litgpt download --repo_id microsoft/phi-2 +``` + +The LitGPT interface can be used via command line arguments and configuration files. We recommend starting with the configuration files from the [config_hub](../config_hub) and either modifying them directly or overriding specific settings via the command line. For example, we can use the following setting to train the downloaded 2.7B parameter `microsoft/phi-2` model, where we set `--max_steps 5` for a quick test run. + +If you have downloaded or cloned the LitGPT repository, you can provide the `config` file via a relative path: + +```bash +litgpt finetune lora \ + --config config_hub/finetune/phi-2/lora.yaml \ + --train.max_steps 5 +``` + +Alternatively, you can provide a URL: + +```bash +litgpt finetune lora \ + --config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/finetune/phi-2/lora.yaml \ + --train.max_steps 5 +``` + + +  + + +> [!TIP] +> Note that the config file above will finetune the model on the `Alpaca2k` dataset on 1 GPU and save the resulting files in an `out/finetune/lora-phi-2` directory. All of these settings can be changed via a respective command line argument or by changing the config file. +> To see more options, execute `litgpt finetune lora --help`. + +  + +Running the previous finetuning command will initiate the finetuning process, which should only take about a minute on a GPU due to the `--train.max_steps 5` setting. + +``` +{'checkpoint_dir': PosixPath('checkpoints/microsoft/phi-2'), + 'data': Alpaca2k(mask_prompt=False, + val_split_fraction=0.03847, + prompt_style=, + ignore_index=-100, + seed=42, + num_workers=4, + download_dir=PosixPath('data/alpaca2k')), + 'devices': 1, + 'eval': EvalArgs(interval=100, max_new_tokens=100, max_iters=100), + 'logger_name': 'csv', + 'lora_alpha': 16, + 'lora_dropout': 0.05, + 'lora_head': True, + 'lora_key': True, + 'lora_mlp': True, + 'lora_projection': True, + 'lora_query': True, + 'lora_r': 8, + 'lora_value': True, + 'out_dir': PosixPath('out/finetune/lora-phi-2'), + 'precision': 'bf16-true', + 'quantize': None, + 'seed': 1337, + 'train': TrainArgs(save_interval=800, + log_interval=1, + global_batch_size=8, + micro_batch_size=4, + lr_warmup_steps=10, + epochs=1, + max_tokens=None, + max_steps=5, + max_seq_length=512, + tie_embeddings=None, + learning_rate=0.0002, + weight_decay=0.0, + beta1=0.9, + beta2=0.95, + max_norm=None, + min_lr=6e-05)} +Seed set to 1337 +Number of trainable parameters: 12,226,560 +Number of non-trainable parameters: 2,779,683,840 +The longest sequence length in the train data is 512, the model's maximum sequence length is 512 and context length is 2048 +Validating ... +Recommend a movie for me to watch during the weekend and explain the reason. +Below is an instruction that describes a task. Write a response that appropriately completes the request. + +### Instruction: +Recommend a movie for me to watch during the weekend and explain the reason. + +### Response: +I recommend you watch "Parasite" because it's a critically acclaimed movie that won multiple awards, including the Academy Award for Best Picture. It's a thought-provoking and suspenseful film that will keep you on the edge of your seat. The movie also tackles social and economic inequalities, making it a must-watch for anyone interested in meaningful storytelling. + +/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/torchmetrics/utilities/prints.py:43: UserWarning: The ``compute`` method of metric MeanMetric was called before the ``update`` method which may lead to errors, as metric states have not yet been updated. + warnings.warn(*args, **kwargs) # noqa: B028 +Missing logger folder: out/finetune/lora-phi-2/logs/csv +Epoch 1 | iter 1 step 0 | loss train: 1.646, val: n/a | iter time: 820.31 ms +Epoch 1 | iter 2 step 1 | loss train: 1.660, val: n/a | iter time: 548.72 ms (step) +Epoch 1 | iter 3 step 1 | loss train: 1.687, val: n/a | iter time: 300.07 ms +Epoch 1 | iter 4 step 2 | loss train: 1.597, val: n/a | iter time: 595.27 ms (step) +Epoch 1 | iter 5 step 2 | loss train: 1.640, val: n/a | iter time: 260.75 ms +Epoch 1 | iter 6 step 3 | loss train: 1.703, val: n/a | iter time: 568.22 ms (step) +Epoch 1 | iter 7 step 3 | loss train: 1.678, val: n/a | iter time: 511.70 ms +Epoch 1 | iter 8 step 4 | loss train: 1.741, val: n/a | iter time: 514.14 ms (step) +Epoch 1 | iter 9 step 4 | loss train: 1.689, val: n/a | iter time: 423.59 ms +Epoch 1 | iter 10 step 5 | loss train: 1.524, val: n/a | iter time: 603.03 ms (step) +Training time: 11.20s +Memory used: 13.90 GB +Saving LoRA weights to 'out/finetune/lora-phi-2/final/lit_model.pth.lora' +Saved merged weights to 'out/finetune/lora-phi-2/final/lit_model.pth' +``` + +Notice that the LoRA script saves both the LoRA weights (`'out/finetune/lora-phi-2/final/lit_model.pth.lora'`) and the LoRA weight merged back into the original model (`'out/finetune/lora-phi-2/final/lit_model.pth'`) for convenience. This allows us to use the finetuned model via the `chat` function directly: + +```bash +litgpt chat --checkpoint_dir out/finetune/lora-phi-2/final/ +``` + +``` +Now chatting with phi-2. +To exit, press 'Enter' on an empty prompt. + +Seed set to 1234 +>> Prompt: Why are LLMs so useful? +>> Reply: LLMs are useful because they can be trained to perform various natural language tasks, such as language translation, text generation, and question-answering. They are also able to understand the context of the input data, which makes them particularly useful for tasks such as sentiment analysis and text summarization. Additionally, because LLMs can learn from large amounts of data, they are able to generalize well and perform well on new data. + +Time for inference: 2.15 sec total, 39.57 tokens/sec, 85 tokens + +>> Prompt: +``` + + + +  + +**More information and additional resources** + +- [tutorials/prepare_dataset](prepare_dataset): A summary of all out-of-the-box supported datasets in LitGPT and utilities for preparing custom datasets +- [tutorials/finetune](finetune.md): An overview of the different finetuning methods supported in LitGPT +- [tutorials/finetune_full](finetune_full.md): A tutorial on full-parameter finetuning +- [tutorials/finetune_lora](finetune_lora.md): Options for parameter-efficient finetuning with LoRA and QLoRA +- [tutorials/finetune_adapter](finetune_adapter.md): A description of the parameter-efficient Llama-Adapter methods supported in LitGPT +- [tutorials/oom](oom.md): Tips for dealing with out-of-memory (OOM) errors +- [config_hub/finetune](../config_hub/finetune): Pre-made config files for finetuning that work well out of the box + +  +## LLM inference + +To use a downloaded or finetuned model for chat, you only need to provide the corresponding checkpoint directory containing the model and tokenizer files. For example, to chat with the phi-2 model from Microsoft, download it as follows, as described in the "Download pretrained model" section: + +```bash +litgpt download --repo_id microsoft/phi-2 +``` + +``` +model-00001-of-00002.safetensors: 100%|████████████████████████████████| 5.00G/5.00G [00:40<00:00, 124MB/s] +model-00002-of-00002.safetensors: 100%|████████████████████████████████| 564M/564M [00:01<00:00, 330MB/s] +tokenizer.json: 100%|██████████████████████████████████████████████████| 2.11M/2.11M [00:00<00:00, 54.0MB/s] +... +Converting checkpoint files to LitGPT format. +Processing checkpoints/microsoft/phi-2/model-00001-of-00002.bin +... +Saving converted checkpoint to checkpoints/microsoft/phi-2 +``` + + + + + +Then, chat with the model using the following command: + +```bash +litgpt chat --checkpoint_dir checkpoints/microsoft/phi-2 +``` + +``` +Now chatting with phi-2. +To exit, press 'Enter' on an empty prompt. + +Seed set to 1234 +>> Prompt: What is the main difference between a large language model and a traditional search engine? +>> Reply: A large language model uses deep learning algorithms to analyze and generate natural language, while a traditional search engine uses algorithms to retrieve information from web pages. + +Time for inference: 1.14 sec total, 26.26 tokens/sec, 30 tokens +``` + +> [!TIP] +> Most model weights are already represented in an efficient bfloat16 format. However, if the model currently exceeds your GPU memory, you can try to pass the `--precision bf16-true` option. In addition, you can check the quantization documentation for further optimization, which is linked below. + + +  +**More information and additional resources** + +- [tutorials/inference](inference.md): Chat and inference tutorial +- [tutorials/quantize](quantize.md): Quantizing models to reduce GPU memory requirements + + + + +  +## Converting LitGPT model weights to `safetensors` format + +Sometimes, it can be useful to convert LitGPT model weights for third-party and external tools. For example, we can convert a LitGPT model to the Hugging Face format and save it via `.safetensors` files. + +The `--checkpoint_dir` argument provided below points to a directory corresponding to a downloaded or finetuned model (see the *Download pretrained model* or *Finetune LLMs* sections above for more information): + + +```bash +litgpt convert from_litgpt \ + --checkpoint_dir checkpoints/microsoft/phi-2 \ + --output_dir out/converted_model/ +``` + +Certain tools like the `.from_pretrained` method in Hugging Face `transformers` also require the original `config.json` file that originally came with the downloaded model: + +```bash +cp checkpoints/microsoft/phi-2/config.json out/converted_model/config.json +``` + +You can now load the model into a Hugging Face transformers model and safe it in a `.safetensors` format as follows: + +```bash +import torch +from transformers import AutoModel + +# Load model +state_dict = torch.load('out/converted_model/model.pth') +model = AutoModel.from_pretrained( + "microsoft/phi-2", state_dict=state_dict +) + +# Save .safetensors files +model.save_pretrained("out/converted_model/") +``` + +``` +⚡ ~/litgpt ls -lh out/converted_model +total 16G +-rwxr--r-- 1 sebastian sebastian 891 Mar 20 17:08 config.json +-rw-r--r-- 1 sebastian sebastian 4.7G Mar 20 17:08 model-00001-of-00003.safetensors +-rw-r--r-- 1 sebastian sebastian 4.7G Mar 20 17:09 model-00002-of-00003.safetensors +-rw-r--r-- 1 sebastian sebastian 601M Mar 20 17:09 model-00003-of-00003.safetensors +-rw-r--r-- 1 sebastian sebastian 5.2G Mar 20 16:30 model.pth +-rw-r--r-- 1 sebastian sebastian 33K Mar 20 17:09 model.safetensors.index.json +``` + +You can then use the model with external tools, for example, Eleuther AI's [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) (see the `lm_eval` installation instructions [here](https://github.com/EleutherAI/lm-evaluation-harness?tab=readme-ov-file#install)). + +The LM Evaluation Harness requires a tokenizer to be present in the model checkpoint folder, which we can copy from the original download checkpoint: + +```bash +# Copy the tokenizer needed by the Eval Harness +cp checkpoints/microsoft/phi-2/tokenizer* +out/converted_model +``` + +Then, we can run the Evaluation Harness as follows: + +```bash +lm_eval --model hf \ + --model_args pretrained="out/converted_model" \ + --tasks "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge" \ + --device "cuda:0" \ + --batch_size 4 +``` + +  + +> [!TIP] +> The Evaluation Harness tasks above are those used in Open LLM Leaderboard. You can find a list all supported tasks [here](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/docs/task_table.md). + + + +  +**More information and additional resources** + +- [tutorials/convert_lit_models](tutorials/convert_lit_models.md): Tutorial on converting LitGPT weights + + + +  + +## Get involved! + +We appreciate your feedback and contributions. If you have feature requests, questions, or want to contribute code or config files, please don't hesitate to use the [GitHub Issue](https://github.com/Lightning-AI/litgpt/issues) tracker. + +We welcome all individual contributors, regardless of their level of experience or hardware. Your contributions are valuable, and we are excited to see what you can accomplish in this collaborative and supportive environment. + +  + +> [!TIP] +> Unsure about contributing? Check out our [How to Contribute to LitGPT](https://lightning.ai/pages/community/tutorial/how-to-contribute-to-litgpt/) guide. + +  + +If you have general questions about building with LitGPT, please [join our Discord](https://discord.gg/VptPCZkGNa). diff --git a/tutorials/images/0_to_litgpt/4-commands.webp b/tutorials/images/0_to_litgpt/4-commands.webp new file mode 100644 index 0000000000000000000000000000000000000000..aac24a13b31521a80bee1413bfbfac80f551c454 GIT binary patch literal 33466 zcmce+Lzpf+5H8rZZQHg^o2PBtwolu(ZQHhO+cv-XFJ{(vF*C`#$SO|?Z>o|iB`I<7 z#y}t-bunQDH3crB(*M>-%D_3mG(@0*AV5Gsgr%{z?Fu z-=I_B0cAB1IE0ge`EoWzpVS9 z?`e;9d4e5)D}s@}`rUs2slK|O%kNr1D8T%e^!4ctaG&)`_|x_d*pAx+^yi)Q&H3H> z`vZ>sfV}>jelK5T&*~rU2Y@TYH$nb}Q(uKZ-}?uGBQ_)+*B0&>1U zzB%9JAN6ngDg-wHGyYL865kDvICFwi{-)nC@8HizhkJ{F7J$p|!cWmJ=7)SmoaJxK z`_~We{@hGowZF=@$#2Um@VChO-ZSB~V2$9m;GQq?Z`4m;tojmQ$UhQ5`9=9FaSi$2 z_851yx8Xkl@cTUiX#eORu3hPG33mC%{3p!Z?_Q7ngkUWo3;_G9{ayD)_$_$R_eU@s zfd5=6|#IE;#qI_<8x0{)+j-{Z;t`0C?r*PaMe=8vh?5ic+>JgqOo9o9Tkm zauNKwI`n8n%iR!o={y28cEdj)BRl*nC@lDEEkGK*(Icg?I+`8JAvvN&5=Pm3=6NB{M`uPk1wW(6k*Dgk9`L_GCHZb6(jKJ zC#9urz+ojg&G`rUA3F@g5V)6@`w}ietJ;d9Yl(xwK(#5bP#4TqPEywVfe0|l^PNfREnOJgK1BvP`lU@(Rx1i)jp>L*VN%Ge#%%HL;hwVZMoqN9z zlPY?M^?6?+N?UL{Mb4Pb^6@{yF_dm91^+BpV{m8jtbFi673~=xkoW$?M%YJoC!MW2$IT(#Sxi=}X_o-ykC4n#vcoURv zeLjkU|GtchMJqw(vH~>_{Z=;rTh`~6x2;ZX3idBdX&hwKg>F>j(R+ zwo0~pOBC}#sO#Z~i245wjKo%F2F!MLp^Fg}@qRYfzSAdlJZ8Pk#_OBQSPRkV#e%KT zKL0ssD*UcOSrHF>4O@1g*mEmQK@3YiyaY^bKZqPdV@x3Ivkzfc~#0=exHSUN33SSO0 z||+NwLi41Rf{0VG*ns+0Wll_o?dgCz5nR1Q@YI^ zH!ESIrkJAENs`!Uq|HpXWaz#AhXWTH%i0G{Ose|3PgER?;TWNe(i_QIHvfN&`gSwU zFAlHuLcwt>BBe|W3;9gsL{W!5%B#&hSZCvg);@!HNJ=)l7z;${TpvifKVlvE)C_dz zT~kbl!Q#MM%}Vn{_g5HRpi_EzEFMd?X~tg3F5Jh9cd}0*ebG;2;dQ8#;P6~E@3TK{ zjVu#?Y)(U#I^9CvbkpBHlC8I&8|OeYrMIhRcANq~X~r#Mdd4%&%~lTkmLw*Nc9jSj ziAw2EAu+ae5UlX*M5*}&uEI*RnlQf>=>MmIY#y#325##x*oral3BY#+&|aegkd?dz zFCTq{R1c#B)ylSmjdC;H?b$k%!h(X$E;ejiRjdqlCvSWb0a0m^f9(d|*4Ts1SKY)1 zy9*ekJ#b4@;YDYtr+T*P8vF+qq1%-h1ETZ{1zwDML@597OAm@b&oAs6>f-A9_J5J? z|0q2EA4O3uLY{l`;T&1C>_#&9NQE5*gY|nyH8o=_!!R?Bf2xoUY);>)mQVlBQC8Vd-f6lRUd$La+PXc!Uo5P z&h#r*#v{GH6v;vi30?Y-+CJ`8#Ojud|Glxt9c7th%A}5ZRNgz z_zV2tTBR?BzU}{Z^Z%i(oTWMQ(^1bMv;wg`>`}P&p`iaqr2iLUOLRTZFW~p}>>m)2 zM8^vHs7@9Rclp*qYq^f!rIQUxt;jN%+DFl@Z}7wmjeNsCogPk4n|wr2;6Rw+xQDDk zhE<1D|IpL-TLx5eAE}iy2`PU!dXWQ{wxD#k)U!DCoRi+Xk;DknpJZleT5*#wwy(2B znnl49;j?_AOfWp3^-j0JP%{lDaa)1fC#=RELnR5DGbSOfdt}_Ocr~N?d{*&b?U%`Z zc)+6(ROxsWkH#TCh?)pN1ar7^C#W65LYCuiKKeH!fgej3(Y$|lS$BQW&p}*=(F2=P z*~uJG+Pt+sKh3|y`$4#gbbz6u&HzRC6k`ZNzn^b!C|4i9b4$!mK6y9ugs{QZH@&Y| z6D=MpeL^W{8WLT<6c(&#W(UxeLNbLd_nuI`AEtd!FroB{#lhGws}9tg_JCb+hN0nx zh-NYtAGz=bQwD1cbguXe2$u9e^dQ~izX1<{fW~d2)Q>I^>)kM~Vr|VG`O`0|A)XBm zDEVKE;Ib0+XszKty^AHN&ZJKe6m@LG6Lt*Jxgw|E4rAb}X+961Gee$7H42wV-?$Pr zWQCh8k9*tXxOhNC#ZFb3WyM;NfAhuLm7HBHGr~_!s0j5zsso&XpJXlZ)HqhDC7=$% z;}oqN>i3Ff`~*w@%`C0y@KR~t(uejnh5II%*gdYIy)>1~o%wchJwc-?DjG=-b`J7( z9z0-o6h5V>{}`yXp7X^cSNI0x3AxV(hfrNQ_{wn)7YS4PI9gAC%pimuk@5g9x)H{! z_}=UeUtrUW%y~ZUA+21P550;4Q0%bm=DHoP3is^z#)UFg9$8IyqE`sxh8K^#sC}*V zWv~Cf*y7@|K6&AKLuw<7{c&3XgGsYbOe>SO9jRi%>%Mx;;F+y1<#m1VH=$Eea=Pdh zDSUc_$imgkths+>NyJlV>X}t*?T1j`%}jWuwxX3#T8!ez>+oftyTp~PElbRl>IgK5 z*b1;FJBqODZq-R6=0`@RJ{}sQ_c=s#$vuT^F41a^P0XK#cRI^?FA8ciuWLM-4e!ao z3gKG}$K9^=c;=utLB# zq5dM@yyW~Q+1BQ>j~m4Xld=$1IeNu%+oKjQmdb>J~%FpC;kzjhlzFeZZa^gl_6fYNj5M}6 z)6k*Q&ig`vw1FLlyZ*g^psNUQxg=Qak4$$NufmjcO$>q49_3lkUt8nna5-WaQo}Vn z06J=ES0j5*Eh^tU7|XX8wzQC98AD<`jzSc@#^zKeUbNC9P%r!yn&X{Bs<++G|ftF<#g*@*&_SrWG^^+j{h5{SMeLVmaa_F%1o2lu{(DG_T= zTMht`LaaIRK46>+oe@k5tkKMSvuRoi9 z4s?bNt0a$xE(O|)@k$7#+;ohxm@XFAs}G8%)22$sZXZs@u%r=g^FkzJq{nvqDKQ9n z?K@hTT)f*BFmsn-@`rs+Imu2|4y?lER!SXt6}+%Fl_`E9Qh2t!Izz3#G@y>1mVflP zRF&@0;9dqR;~s|Jk!crYHQf+^mX@6Dr5N1p!|Qrp&aoNJQQ>mEvD1+=pA45}ib14HZLMr7iLFK!f7LI@;atJN&DU@sv3M0m%r{RUU607$ymtg2 zWYY>dqUN(q_?!#cO778p)1kTo)Zgp&L(C~cO&uJLUvnC`1QO#j~GF;d~R z7paukGC>+7Fv^aO0H-4Tt_M0m2{M_%zu6}a{kWQ1A^U>234~MM=!C}@640e%EY6`s@*CISg8!k-Jn7}sn?rTBIv z=9!VrR$p}C75W+UEfTO;m~ctD-Z&8^+Z4MZs#c>26ERTqXy#H`sjMcySK=^zXO@c{ z=^}zJx(>vVVMr#Le>Q2z^Fd>>LtRedp%k)$fX0*=3iHq@qzi$dP2X3FQ}+j;Cf_U@ zHiVf^>DAT;9EP+0#f)8|Rl|Y_s@V}uL%C$b%#Jk+qEj@ix54c>hu)3%kFR1thdz?& z*n(+rw`SgCTnCk($z13=i^+E2j(YDf0M@c6zx>X6?pJ1^Ft_f0Lk3qETJtOu5lBue z&ogGFGsA0^GrRP2>4iR~1J*a0PzOXoWCD{Mn9_URJQ#4XBRnn3fQI56{V{@E9!s~9*jStt{PpU63E8bjEjA0l2E1Y6hHqCj=>hAHK$r2|b4F&YJ4L8_<$pxQ1!3FvN;SS4;k7S!QH+i0 zOMYp6Us4J*)XjZF$p<69*ayDr7+XL8S?-8PI@AK|CD9LdW9AWJCn8Q0wU*{OTV|uZ z=gjh5&RhG}B4dW7tA{gWIS7Z$S(lbCt+DH~ksI%9X*i&_>M?yf_b$N`NW$5uG~V5d zaq}bacY+djt!}p9+T|B;J87b=#VyRc;e@LWV)`(|agCHSeP%1-dnK|I>AUvaoOp^o zDEhj2OkJ-WYKw>S1R+);7l$|2qbII+eCa`JER|?3n-zyS@MUON6<&QWS{VgxOiZk6 zSeT&;svUj))x++=F^1e4t>q{jOcsfEa?RzDReouR>Z(eEFP?FhT^2xW9$GpNgwhc%8n}X?|q(O8)}7WDHu*^^wNF>BL)6D%l%Kmn9>Xy zts`uNR3E~hrEr}6KRUKl-W3FyG20j94IJPKduZR=fdeH5AhMW{!komln}sIuZRhO= z%hvf)>M-wbCg(n%llur~2!za2H5Z%aGY|d?^k#BHThg}CDx0LAC0J6VcGc}p>EgRP zTH{&Bj@2nt7wsjJG-#iNyON>%nw13U%e_?0p=wp$Op6~i3Ti|FeqIW&!;fX2-NfB> zuFfyn=4jDj%mkl=&hL<=(s_hWEIw-z$ad*?0&Q+d+lRNXz~7**uYmbLYR&p(E{F?z zdv&mvVR#?ssJX6*YV<@OyG-xNeYz>W?N#tpUu?V=vrHI;&U}aI< zV$gYC4p4%P+4Js3+RrfMMUIr(O;k}Rh9*S{l))nLu{H=uqvn10)2Xs_X9?9`jR=K6 zxUeXlU{%HrizH7@3_;oG-czQV)uz^R-YVN}ciafS`SvtCmL}K3EA?s3ZQ)(Wa!rCF z`%b5={~qXy;)){Cx$lxnzN8^Km3juADoh!aBzGebG`i7@&OwDOT?#a6L5&MiP-|%) z-XX^C+>SLMkQ2D@3FJ$FjqSz*%FMSdhuhOB5o&xGVt~p zT7?0mhyRZ0QDMBT^JkjIx#MiL$iZ8Og6Y(x1g zxg_|RlR?Xp5a?8@IH()<^ocvAg>!yt^j+}gk|ZJ(t8Lu2z3ydKo!YvNb>A)w0pubm z#vh}4WImPxqN)#b4}$?Rp53?wco4os2RnU=4k3TZY2x!fzJH5Aoqx_DBP^?`cEk^z zYO^#b6G-t_Ygrc>m5&u%*l@#|`XctJxy-heJRHTds3H1++z)EjB2eTD#r|?IIrrvXy61%5F)! zQd)gbEtQphut#xGeYbl@;)+X%XZ4QZE*{?t@*HM50Ef{EHt8nm3l@mZZroP&U_-xu zgy12*nE0M7`tY0Z-5-DMxsY;Ok%t<@p6jHD2he!$w7pZ2u)$(kEC*s&7?e`qpzgPp zSRJenVloG5v+XZ|_l>qeGx$;QeCO;x0g*|x##_QCK3OA!Jr$5wYWvgan5T#%GX8X} zJK^G&gmP=R%Nu(3g3y`v%I>F$tBJMhZz(!r1`g7fZeI5k(2)O%sC zdH3%uBRXDd&Dl4CCf$7)%GtJocgyz8=8K?ByuB_Z9lxO|BC{mkIGFJnRyVRPzpQ{x z_>v^@%ra62;C(*WO0c%jy7kAf@MiXIx**QqB1r_fHpR7RUr?y_#Y&n(mM^I%d0=dr z_s&JjEi76Z2lP*}s3B@EnUyX>1tX8blDMylPD)@~NK=eB8 z0^~E|l@CJc#nHw3B!_MwgPnOw3c3*nT8N@P>x^B5@ty4a{?w$T1*_SWqbn7-)vS(L za4?}62#S*%n4t@~k=MR_de|}d?1TUqP{t%^-4;l&vd%VRE~o=yh7{sZ7gB$}zKC%i z(Xnv}VXSiKCt3Fps!%S>z%%eB`N)Po9tg!epYXfq*0^96`e;NRXCs?9&Cpv$KEABY zSa}XPx0xaGbYhee-6S=b|>NY+Kb84Ys?1oj&2VZL5T+K}CX?3I(B#YB%LNFkjh$SG1#}_;@@T zcd|r>612g|iG-#aP?x;F^U@s`=R3z6R)!DC@Iw-+MmNTrRHQL#s6hUO!fv%`0y~-^ z#WfKaDB+(E5DqC!dAwB~Izm~?mI~%S1gPF#OM1l_1-}^fH)0?OA^u}!?Xn0y;8`Xb zVFf=?AOZ}MS+n`7KWX8pA1d|Tn&e7%LvTGfy)lLR`WbVwebVm)#*XqlLW|6+y*6`W@MM0W#;~_R`Iv7F z3?=7bV=Shknb_iPDx862b$Z^if-N-Ru5Y(G_I!&-w~RXpdQ4iH?E%En>~G&2YhIqv z`0kj`aR(zPdc=4Zbo?NcPg*(gq-e9MDS$9+*!okDn#2cmxxFQ{I123VGR@hL;Rh0l z;qb+23Kh_FwGJW4KpMXVmYirwB4#E7$$GSSl}s1m7TI>!zJ#Phzc_JVNfr`zW*vsZ z>vaP!n!GY+=!Rfh&1VB?m|g?hDMXp{xX9q7`>+RiIQ!oY;&_fHS;#F6#BOb}q_KFU zMP^&Y_`2R|%Eu~UyQZqHF5OiDoqXMzw=J4N#Ni`LV!#>GGGJQ|@ztI3fWidq{@#pRx`h{@L@9RyGu8b}~}%oPFV?Z<;1(PlZ{&SotJu)*Sq= zKZ-5C3=TcRw=yBa)2g0&7^vK9Fzw?qz8}^puY`Ea7kK;*b)>Y=C06ui@Us2P$^#}4 zdB8G>gcU96mU}!q!VO3mW8!lxKd7@!_lfr!1ix;!)hhu;bR7zr(<@fuYT?&{iw`HS zOuX+jh6;uc!Oy?W@5dav@zvATNhTVznZ^ps3<=cp#7_MgX$@h#G%(u3pKbAhDpfV8 z`A*m5UsrM;Hs%Sgmfct3ec8Ntuc;{Q^AW(}GAgz(1|=?TNv-ea0o@Yc{TzkRgSXid zcYhmO#rw4*oL?}~VyC?rr_U%_XH&B5DDIEzUjT+RBPlW&pNO02^4+{_KNE@G3OGyGFfTd6i2R`q( z5-+8#5H460NjMO^uZLn5E5BdctJdI#v84lf=Yn7wRKXr0*5g>o(Y#BJNOfcS#5HJ> zf+U#-_iY&(gkMp`tM*yN_qF{_2DCl`Wme@nJ;8O*OT_h z74mt?r4_1cEq4$n^6LT9uIrN1i{^+eV{h8As-}AY1f11ZGFxb>!3`6HY&7L_p3bo9p+s~@CCM4TV;HQiE0ExbRfgbtYF#jG%PNitr-3*_HDc(3oRmk6O?UAtrA*}^Y#_#SkH#;aL-8t z9L_1HQdqfA+J~#%MlgKbO39fJ3yL$|Xk{hfhOvUps@2$4P^Hrp+uI7$AslYxBH=Ek zZ=VRmkd9eO`5`N=B5tq5@=#kbx>RhK zo0BGk+{UvEE0xZhds&m}So+zmreQR<8lU!lDT9vki3HwCeI<_I7wLgLap-lR&;`}Y zB(~f*nmrHo{@&w>d5|@?hLw=#V_qLfcMFB^F}2=wFx$^2746--mTYP%9NvqodW^7? zOiquj-RME=WQk{F{F|0&Gu?_XR;%4GU<74K-wK*a3Wp*p&Oz!7qPAI@je=KCX)!k0^5rxP1y)8>UPACb*mrjW z7}9rZ@NlHK+%y|$ur}QT-9NT|g4i$`w-#$ebSZQ5?S_k`KeE|P*AGX0+xz1WzJ?~+ zG*aeGr&R+>DR9mTR*Y0VaW=?h+8Y_|s5r(nyw6ZDSL_-E=t{Q>2?|SJv}m>jVx{M{ zMr;qb|5@Qw+YG5R+Hn?<7~jT9N;%#Ye-is>k)DX^vNN`*C@tK_hJr=9bflT<6P32h zH%tU2$e~(Vw5ttwH1Xm{M@Dji|BfpP&K5hdCO7shkYJMR91^A(XQ!dZF(+uT&^I zs1KLMY9^j%vRHdAp*j3O>uwXj;$_J|yF4BCARH+5hf50NZAYPZlC5^%TLTc?0ksqj zeXa62Q-&DpD|1lv4~G;E@>oKet=|W=6YUM3g*GM5W|K6_rr^}esj2SFB$g)XaD-YX zTQ;7xYl${+E8PGNtHlRb8+Er@BPeQnC8|6M; z`sM&b%>N1fgl9Fh#x+u> z8uI?NvzW}*1G1Q&PZz`miFl+aH7{+R-h=I9=bJ)5#0(gCYdMCxxQLn9*=RhVOHdy~ zZQYazv-iD$D={fy{vRLfLd*1j1mCjFdaWN}0GELr_qkR0nft?Cx@*sLp0bz6{o&8U zHSX84Mnq&DPx~hp6_*8mn~D;S^0PZ|Sy2w&dYgT&remZ>-Q%tif1uO*I#`F6tHRt$ zna=N5+rMQ7`cnSq`(GUe_yF@?jk!#}*Ww})WoRP)Xq>P~KAF43BNzV)m6!$=dlW7L z+@J|F7P$U`(4wU~SNuYnf}6|8(Q|o-2Nw$YZDMzsu*PmQ5+C;rlpzf;xvo6)x+9Zu z2cC7^RPIED370hsGIFJs^dx)t&BwS}sE}}|s#5Zr4}OJcNZAc|vP#uF*og3a$vo9_ zi>0ArSmz5VOTG$GL7u`eYzxIBmHQ*~l*L+y?%@>I^*o%f6aVy^48<^j+5+AFVsmC; z$d|!}({Sl>mbshOl>hjdk3=;h-Q_vdn_%jP^&^F|5nU3*L$Us-im{T_u80;1=o>uf zO1_`=5a#!<7AzeUugkKg2 zKH5#RQyhJB)7>$v-<8rCYa=}Xmu%UZZk2CP{McT%GTT7kxh=4wc zzdsgccg_byR#F$tfHzc(NMoXo1&j9(Gk^J^UYohZO+Eec9-P2tZnscqMS1p55Dvou zaw=xGN|0NJ}MRqctd!4KTmhdKp#a)qW{`c|HlA>q_j43_*Hjq_X^g+i%u!h zo1O$&1{@L71}+z4t84@^6JvpbR|RLW+dg>(M;a<1?1hI-k?3Uls~S$7W= ztC`%W_73Aj%Ym>|VQTY*u#8$$F^31(8k%p`<^rkjJCR$tkhQY#0;243<8&?G+nk(q zNTHObSx+=ExBN5gax&rGc3OMi0@mQqGd}%qO?{_T=${st*$&4JYt@e3Y zfb@ute{{eHh}y5D>Tjiyd`cO8!Xl>o#6*npSP z4D&W@vU|YrkBMLHHES1y)?J+KbxT^2db3p3m>u0(S(vFP8Kz&j#VmR5A#uirfBv@3 zw1)E14%Fz)D=!dvWc0dXAb3I~@)H=H=yeg)$VEe-2)3Cx7rVGI^I2M$0j0|1me#}t zKfIikLg@~IcypQdeJWKVIovV!9%3_(q+2MQUt2_L4U?YMNS!VNe9!YA6WPtC;+Hj! z-~ehoRzAD2Q>Uh)4pV2C4tj1# z?xmKAw9ljB2r|9rxV^ZT$(4^la=Sv`csrgXWz+!45ir;nTFVeuo5%4GVgvSy!kPqf zWyw$HEK6?;MqjTCeqX3q8$*j-WH}4xgn%i`G0)G$(S+48p(RmhPKU|{r74vr#cfp1 zkD1YP#iIkt`x!NDP{k*LrO*cnyMFY*vW`QIJ`9u?N*0E2a!O!#1nJhIESgeRWO>qp zoUgKdo`9BNxLR|}S0?-L8jYsZ6NRxva2i#3jNajS?Y#Aj({>l6T8JVA;d+a-&MgvE zsNn+XvC>?@eNFc7aV_qEs-5fSVSURvGEUihnn=QlAxc;0*Q2EOWG_Y~)Xzzzw11a> z80X4ia4IiW7m~ogIvgP>B3?>iRM8WzRQvS^cn2CPY(R&Z4w7#Bqa=#+Foz(`yexxT z)KEMGzfQ9B@-_#H&3N8|r zO40?nF?d7A(^pu^Y~|6qDG>Vt_U*#&TcobQ>$QWr%lC$pGG>FM<}DM}J`s)XGh?Sn z9Ur1iIpVXHQ5vp;XGVKg89WLTDUe08-XeM8D0mCso!;PtUe2@U9U@k&0u=x!uermE z!3jlMMs89YGzc6O{G^^}fZo0IF1mS>6uNAaO1}9+Rtf~%dTDK-WwGAB`)3?dB7VPk28}j7T*?#Z;bpP)MgzMMCmao%(NVns0OL>u40u%p`?dcFz~+ojKKyuL>J9FPl3$fi*QO3MQBM9(EL zLPOu+?g~J?1ift7B$NTZ)++j3CmCE{{B%t!7ua2r)Qi)s+gxD>tW#laW9C_E(GnHE zuYAh2apbYs^aEaHPYn+Hwr-YB&-j-Y#ndhEKy^z4S`TV~nJF>D(Q2wMH=4p6+t`#o zRt4kmyzfXd>=iX9zj!Cf->R{xF=wn~MoC;Ue?Rl#cLmB{O%~D*8C>N?pcLbdW5omC z7Np;8?7`x>TJMwLwZ}Mlu2FH}0yX8g4%n?A@r(NUF4*%VNOh$y(kEe=Z&@~?g4?Fa zZZ~Nymq$(i5ozLd@+i~G@=m9dp=(xgA=)N`-Vo)?>N23b^(?JSE8!27@$fr}&J|W1 zcE_RR0b?wELR~x(B{ps#16#5@IPN zZFki&7qa~P81r5(q1MJaC}qyo)4q@}8(WA+z*0%o2Nx?QE`92-g1!S>TJiZxkujwF za7mJ!$=6ZaD!blq>M*}Xui5dtAfYE!u7n+{s0gH2o6i@=4bh-TyvM(pUPtUt9WMP> z$@bF5^}v7c&9CP*D^Db&&u1mJ#oTS7b43c7F+bYSrxxxd_e>&?#f*gHgJ6q`1UA;z z3ufkqdQDFh8HdIy2RI-7bbmP1#r7@!YbXeUGK{r3MFuw9V@t**2@Kx z`GIieH~QD9L4k%aJ%@EEb!2ED8*~wKGKrffE|KtBT+wt-nx6!8XzucM-0)F@gm5sX z-4sf=q`dpbVTQ1;Tyk^{!k!&FD1q;#<#_NS-#VVIhpdXWd@J20(XjrACgSoYtBUSa zGG0Ub4!OtR-*lkZKn~Fhb{hYGJ&mTD6cT7wnUhnc*}2*uSeL@JER;{oiiK>Fm73__ zy@2}T;#|{ORz%3a=)L6N9x(SVch#WBQ%*Ku$**8Z18?*ZSb8f60{0{&OM=%o`!bfk z2LsCCz6jsw!{?;yUq}B7SBBjMs7ZqxU5lOo&YVS_hy2LQ+XeHso*}74PYSH5NzR_I zlA$nLSa^Os=asS2qEmnTmOHEI)9HWm11n9X8wd+Bz(C&WAT_T7w$+L#FMN1BBEsEZ z)lXzA)dkRX^_`oPT8)@y2avt{#T`{0@7aft^BdZ zv>9HOkdbHEPsv+c11F&+ND*pIJL%CE#rk|=d+1cb%zJ1_%2J>##Z>|XPv=S2lK})2 zre_x_f>HBH1d`$A_Iik6AQGO#FLL9a)=bsX7Y)?=dV0Sk#h2yF+2!0W< z3ufXU_?JoCDa%8YP5@_>sUbH#YeML>?M{khvN>s#DiZzvbjmiB2&Wv%KaF*l;z^=5 zu+>9=Za+%S@Df>AsinQ(C({jS{>*+1MCZ1*3$5|)ek)~ps!$_<%Zp9mGK>`@Qls=Y z1lQ(9{WD`_PUFdyQhNwvioyXk zp3(KUoRKVLGItBV->vbgm@qeI%8%4VHL#5~)$N`n20M`2hFzSI0C$UWAF@J7l& zN#8QzmfiMw7%Whq4<KI(vs|@swfO!Yzs8H9|C!CPLZku6`*-Bofp+cV(vjVm#bZw|jp{P?e zoTPRO3#ba+f5nx-3TT@H6U5(1Kx|^LL!`HSKyogm*$qTZQJ^^AnD~jhmNIE7n)1nQ*}L#SPD~HoNeFp@O(dI{)EsY7Z|uGm)LkS;D15a7oW%t#)ch8IO-EQ5Oa| zT-YXjR5L$b`={ zPxQ)!9(oyt0(%iE@i4eInl|dys!?l`-mq&cUh?ZPn`^Ob5=d(g_Cx`FVkZnu+(+tn zMVE4N7BocyJHtc?e?N)>`bB9K0u@yrE|OQPF-G-}{n=A@Xm|TLXm_|nyZ&3uBwF7K z15VyMLBwC=7)cbFs<~TjRa{+7i8A%0aHTHG7=`wTQ|O|+ErXg{YUj=DsW*6jr8uoq z85bR)P^?L7%3w$3W?p>p(GWPcv8`kmJ9ZwUdUj3|V3(;b%c@4npr+Ic+@14p;;Iu0lL&WnQv)57zhvMYpja}IiqX>v zYcWlJk0C@w7D8TOvoCSo2*K0vDJAlM{g&npwXkSkL3>pT4*$!`j)N0qU=;MoJ?*qL zPGB9Os*0i{1ZOFp?aalcX&lD)3Qj#J{Y-rNhWLh{c?!TpJfMSH{tB=(ChOoILJ7}6 z?lg07+HhGDe9iO=Rf~N%7QC*T5bgeimXvEtkFndc zWOyTqXpC=6i~o26$7AlYs%FP|c#TlG-r}!?6e-ee6WqUwjX{cyjbRpuzb69THi*R3 z`-q1T0$R;wc?8*p(QrVlQZ7;h7nH1{ESxOzTt*h4gC^*|c2FUv^~w&ZuV*$WA7CNx zv?g-1zoNlYgp6px)~+S~I*c9?u3-rWlk<*;72Y_Xc4->|7LJddoZgr(pTb>)6kmBUl z4^q)+Ayw!Ys~rmygz3TsTTFnYVHd1u1hEtVMIpG@Y?f|^S(P$pTbKz0!)2NQI+Ttn6Uyqv@hl&u-~(JuR?&Sk%pbWN zNR5n!%qDIh!^t^6(!%R;oR%YtdOCwTaO#-A(lmKm!Oo@N{&6a&tM7%;meHtLo#|s6 zn?sH91F{)1|E6YboCKO7CI?BV*jAZp(cFE2aIkhx&@aa~kvV!5Eoo=8^%EsO!;F&OA#O;#auLkipWDoW&r~dB0z$ZFT>0xGC0R%0Y>cl#0-l zc{!H~j=_en1kW;v0`J+eDxhra*bs!)Jnm)LSvY89v@lS?P54YTv>!6MmJob@>|>DP zgsm3l%a;ZCCX|V<uTI$dhga}A;5;_&)Y*EfW zG*1?%IIg?{RJW&&eJB){{-+gUqx%O`qAOqNJ(7*W^@}8kzjEL=5Z(IH?=Qoi81Jeo z*|bGv<1PKoMq}^d-@4k01yK;!oczDYW$?*?Duq83RVn`ACOL5?ws6V7=kCIr@=%0^ zQs9f|VQsnW>>HXC=U2lZT)cp@|&)$#2; zGSg$#X%SCq#mD_qAI9prggrH9{$OhIa;ul4cL>RsawU zlU?ppEjUwwPb@bWIYZA@L>n`CjH*!LM)uXp@YR^7Wu-f?W&bR!`|hZ$S)6Y6Wj13U z?Z+u`!^FD`K1iIqJ-N598!_p8uGisL&FsrfR9#h022+%$7-o?ow|@!}-@MaiJ9vU1 z-iX~`Z3Bu;oA9!9T59)gA=_joOvy)p4}}ZmcaMGMdkkzb;%dm0H;0!VVr4z1JUX=k zfD!G;REzU18=xO2y&XU;br#4_HtMbrEm+&59grA_9Ez}#rxn+U=8;dG^|DneOW3~h zPTV|=ji@Ui$%@q+!TBIOh2C9|>-|?mvY%zlvPBGGz`O)dZ_IEsWvyyw;(7a#UMQWo zmdly$lgmuycX{>?yD-W5+=?hQV%8@SHj)nT;kAaj!lqZn`FWB`afp*Q3bVM893)Z2 zJZ^u#xux1qo4YrEwUrvq<>G_Pf#!oEey>f;7BB$R-K> zeS=U9uFXX8U5yN@<7AnO#s90ba|jki2LkN1ZQHiqwQbwBZQHhO+qP}n#{3$sW;L5+ znO>w)sqWLK^Ph;M%a4Beso!;RGRD$b5rM(YYP5w9m|3t2v!rCLGXV~C%QGZbNSQD|%(H0L!VPil~@sYW@O(jv<3X)e&=Mkf^7I?;Rpa0N#N)k$ z15Y?84pFMEEVg}?iPTt_CYHTC5;)B58P(}}`&4X_J{Gl*B z4UkcYrQ1^%X*?7MsqqmB!LnCMHf6$tX9p^SN}-$7e{B6hWOnmO(=R=6U35mOx$|BE zthe>!xr=!yair?JDOp3OY{maq1>=9MiQ<5`siVitDfx`ccDC+cLxM;)4&x{!Uj$#C zNY!giPOOOm-V zk9n`gs1D`E6*{`RG~JDNVHjQ|X^0K`9LEDdz#7t{x)~e5yl=!%ra-9;H$T|dv0apI z;K>AtRL2t;-`28xr1m<&4az1ZDI9bX?>*QQ0cZVXAQF|~JWURfeH5?4si1o>T2PhU zpx>{I5e3mhwGQrNDx}xjfxSJX-g#p;S;j4SN@}V|03GFyxALS1%KW4{*HxR5XcECH z{E|vRxi1y$lHvR>erlJg?{gdrv0W8@7JfEUN;v~SD`aJjGp#Ma5Thp!FZcYp;x!xsCA1a4w_?;w)?+CspqldszG!cmxfV!@Vd&8av^NKznfG%F8b=hJx#~V=4M`0KWV|Vt zg`iUP;%iiEq;N~!8UD>5#mr}u6B}oEXdj?Y4~%k6IB5<*PCwKumDR(PtQIN|BLz0{lYd?h z7_h`(bTZ)q?IYyXehSfD{4<0>SRIlRqE<6O@ajzQcY8L)69#F*_RDpMwVAQM9IgSpY9GWnr>*K8#RBu& zLXv$pE5F^V_-NpA>y*%q#5H!7Esf*CXfm4x*8-}My{ce3LSH3(g4r)BNPTK>%tgXjts&eS-3n+cK!W~_wx zT~-Fq9g4;iC?g)4`k7sH=ul}=jw+Iw-EoIraVD70U^i#6Y9=}xEKha+@A;hh^9BRq zp9Mf-LFnLR|6n2xAK4>Im%pv0KHO18KcNA(v4(|-73;9!6J#+w(`>i?qilk(DLH(c zKv{;(MB?ue#%yP1SPv2o9%zPiDA05BAdy-aFz`CMiiH;Wa>;J zlXdqVB&B>$cB0X07-y@o`0_+8M}c(NcyaVpKXYr2E%!Ag`8m~j8?-8w-GP!eF>L@&tAza14AcL!se2=g?~UfTx0^O_f6#+ zWJ5{UwDO^p3ADlRFbuy%^AV2F6B<8xr0yY`)QV_tm_D|ljx}i_g&E4nj7#o7km#6x ztM4`;q_O)=8xo<{Urzfy(DKaHjQ0KaAt76sa=-Cu3CneA85i#Ef`bwCVcEDMwLXnU z?R5zAD8tl4s^bV~gOhV~qW%|LQ$_*UPiRoTt_DGJ1N=lsX z0q~+%7FP4w_GB)eJQyrJ#!4@&Zyo+bD3PUdI>QW24s+`Ta772KWN<-saXtl_b9G-R znQ$HVoX6PFN6hw`(!)g4T1diOcK-gGi*r3}%6ofu=vV@m4+WEY1@GD+9)(XA>iKX- z(6`1{&a@H9lpuK2ZCM!m`gM?PqIUC5b!hiNTmL8&JR$&hd1f$bN}0rrAC5BD-q}~> zK@2>~Wl1{{x9ryJ2|N$fCW>9?@rc5OqmB}nW7sz6ZGP@<5N3I<8yiY+E36#4RGY(Q zHl?~C1_YqVDbPM#e#WFxs*rmkXI;ff-ncTkl;>%Y6&@pZrft27$!3sKWQBX~2w|CY z&3h_PF8V%*7Nb5pG$sh%SD=c)CZf3#I<7+P90r-^HgrHlC6lANE~yHKu}^}hHSYuk zi748+&u1>b@DcIBpcJ6#8i%mc4{XZ0T+J9V2S~IPixBMjF1BQwD`&YBL0rGmfZq_E zJ-R~b_OUtePQDcb^bjra(rhby677k|D8s94KhHq$CYLWY8?(AzNSn zy&Kg+4=Nm?=ugfe{n>J&i;`u%H3P{&Efr?k!hMnR1Op6^eyQ`GajpEAB)}e~CmGAE z1DQJZn9H2dGwt*JbOg<^(%mbDk?#Fx)3{ra3JC@`E`Vg74F5~w{cMpiFAFij z30T?k%D;cjF6_vD-9RmNqeno!>SW2_R+Aci=KdvR=3b$Ocm;q-Y46VT;;eL^9ua@s z2ad3%%Ci3$*(~A@#;N?XOhci#PG0H-C|sa%f-tJDG~&P$66*pB7d}e4Bb(jFS^0|> z$00(#P9AzEWlkL6g7NWI4ry%db3W$+Ji39*g0_m3&tu?>CiBX+GJM`nF(+QEkL|nu zmc7aQTA)DJ5h-HIFa8>Zah5y-D)G@+FqZg^5Ft^qWqGm%6u9!Tk&+GM~hXTXw9xdtPtW`q`b+O>i1X_ATXuSYOWJzbp2hd;X> z$?K2@E!}kLL*LK8N0t<>jasXdqlAMcCaB*&E#|X~hF!_>TT&nCw|Dz6qN!oQ6$^T( z?ipX9pvJ}9^idEZCX@LReTgcWS7GeaV`uLbF7Z!y+nt_r?T?PWGaA3>4ex+$;PD?I z4~w%PIdo@I)~#h*)~x+1z<{!AIuUh=wbw32!M!EOvKwV0uoFM1qd$Ccb2H&kvX7&5 z6!!N*glyX(tDsp&%|#7dk8?N|eIcmKke3|%3d)#_!rRV}6-;wA(R%aD_F=`dJV|1M zc(=ZHWjh zFctPuMtn2QECsPg8rQnl8Afmo*Fa!3FVHU#1BJpKTnKvfB(0?B2laW?b(>31IMSSo z<=gO36mo#763I8gp(1#Wis&^0&96Z)51igdP=Vk%?N07TE!`3TZD2$(?1EYS_zBJ@ zTj!s)T`8H$crj^&_B;v;Zk>4_p=_762m!&AvVMoK|H;#>acngy&Nqpf05BbNeXB>F z&!a(>I@|62o}4%Y{_tq&&H>0+rf*o#{~DP%8~U5lsES1Yje`^HR3aly7P|dzutdk6 z0QrkA?Bv?Yyhh0Xp^e$8-3017@~)cV*;59kfL?XV1w%d-%AIMv5IB!vlY#)F2o7qN zS`Bj7)^bJv>y`87h}K^Wv2=Ceq$Y@#a@QDhz?t|>?N%hX3NHF|3QgCVL&gW^IZU^{ zeU{>v=)^LZHBn0c!_0(x+k2E1e2YZM)NSqlSweI?Ws2o7o0TNqa#NHI>x92i=N|nI z=g>D0mKxjOep*KopiPP3KpUdCd!V}%4DtY`%JnDMDt*d$ z?)#Alma$Zw^&rfRbNQxC|3)9$z;WM@S>5+!1 zTXOgo3_`&JZDn-=ox>w*+fGiq+Ikm-5VvVI?5ZU_YM4<53H`KHFd2D3GK)s)C4xS{lQw0vop3 zB77Q1JZn7taKHjL*LXr(2|%SEiR!(uf9Ln57cvG=?!7iGAMmNAw+ZiIpHOA7yy2k4 z`FDbk9)fhuiRtnOf-t&Fbu{KPxoL}k^*qQGy&XZ0RM{KHy`wfDsgYwZOs8AFJ= zAu`!{K3x{uS<=S~tHdR=7+J@EeMRh_kH87v z*LrkxXxFqchHWR}%NFMT*HSA`6gIl@4n}kE=E8XhM)~Pf&HO0q8MI)f#aeQt_V-V3 z<*>O9a8Cg;It$9hM~IXjvu$-%6BD1>g)F1AWRVFQ36?^}Yp5`nEB=7^k`q6wro=7kL`-l<6Er z4<>X>cP*Pyk>X7Op$yeeoySmJZ2Y@ouxphH^3I`F(P#&;KpNZqft-aJMY8q~QpeL> zLa+Mw>~AoEW$xkKa5cKv!R_WFdYX&ldu^~NkDZP{! ztt_m?fRNDl&;F}ZTlkF#jb&wHuM7r6pB&4J$z8C!F(E7&jD}STvme0a+JPDx9XEZ< z;N@ULb$S}xve6z*K=7AT=rcav> z@haL5xSkXktZgp$*QpKh#sJIoTD;>oRr|j?&VuS)13$79_uoZORmD$0#$BsR1QC)+ z%*Gh={5Zd}6=!gKQv~}g5rAk}gDpam<7ZCQNFx2)7}9{^_54WO4;eHG#S{lmjgna3 z-VOFCsfO>nxe3=(7sw{Un~C=$C1N2wPqBVpmwDht<)T#tb}iQ`tHz7ke8K+E;$3OT z+!E%E)sv^|EXgj9qiqul%EG>(okoA==v=HInwg)I2WaIgz^Yp4nngc@>Mt>{A{sc~WOJKTz@a$KZj3 zJqklZSnX4mmQT zg@AhmL`i|rr8W;rCB2rV`rl&)3PEfkU7PDppbZ!+OL89R34WyQVXsYnF4^{$(SCio z{*tXcVErg*0scUNXkR&?&?##y!_>H5H!<9U|9TwBYPJ!gzI?{tapItx`YJhV^_WqS zID+085d5jtRN*^xMg>y<6TRcEpL7FqiSWsU64mCJM`=c>R`q2rXWnoW?t*tsFI~ciiLTAM)OM#?VW#zWZWFBY31sXLZ7B%W^pQei3_R2oTBOS@eBO@K~-;2=W^!Ik2{aOpzX#;Vv9*3oQk7^CUBe2A#c!GksZK|LsL&pp4?gH+gKbS%)|9jQqf zTOm^cI(+|6J{ed9np=peXmb}XQy3FGG2q%S%HuT|6;oM7S55B=rN9bJ!iFED;JRBA zs7P80t32vqF@1y6_jNBKv)5|qfHYeVbNxnZh$vv>+M0D~} z=N4*9AU+fK|PX-ZSpW zkFCPW!AphWkJE`2DHv3$OZl5HIg`8~ktyHqE6%?lyn*mto{OVQM}Hz*yvg4{&;wWa zZ>%9X!3iyjxYN!k3CW_P8_ndF^Jc@pMrbNs^6=M><043D_BRUySHzn_n=t~>r{%s9 zi`_J2+^b2DfMV^S9mh3Fc>cF-1MZ{`#Qzz2O>Qq9oZQl|J#>y1mQiTwwsI(_J3|zP zOWL2)S?TXWsOFEu1j?Pe{S~aJ5_Rb6ZlHt9E|5o@Uf1}yfF$75MM60wNE&+sfBdJd6`FP_6z;XvAoh||zt(+}3d z7y}=ayp!0!-AjM2kn9l52FkM8d_!+IC_A>1Ilx zq$RZ4Sz~ytcoCYT`gW1n&e=2OQhRQzmoeiUJ6C6XIvT`-udqYWgSq-752Aond_p4h z+NDd_o0*zwvQW*U;0%ADt#Eg?CL_?aFK_K|aiH<;`%p7-5fD~2nbjo&sV>=?B6Ga` zBlnMwqk2OL-09C{I%&buLKrJFCx4gipG%kb!$%~3d!7EyRVY5+egX}jr`+8_vzdKyo|W4E z48bQ1TU$buoe1zHg9IC?88|D85ACF$W3;@Fxi==>T1&GQl*Ds4-!hmOfmnQFMGBnE ziKaBFr(at(d_+5U{b;eQa6PEBw4lz;PptWX@K=HG+kDV|?@d>OtL3MtWjJkBtpw?! zZNq{cJdCG8jW~s2-74SaT=-OOW?*JSGj1O-D*4ix78A}SsMu9?U6J?zJFKOl5%CRL zfF1gv6_Upeh!wDGjpx%IJPWe{I<0f;)SNDglq=5{IXQtZS4>+&1ePC@1V`g(L3ASD zX;Pj5XL+zMLsPdEKn{$1HTv)Vx!{5=K-uSW(*^_|ow8xJAls3eQ#yJza zF6#Ug6HY-DZik$dgo)211DnZ(&Y?GiK#S?prbQ48GskjVBTzf{PQI>f$Jlnp7!{NE zVy4Xzn{r8j9zm!5vXRHRw%3W0a_zOKg{6N=4wenDz4o)|9{(EF_-E)U%#ghG!zc$8 zEQV5qykJ5yZ%=u80kS_R6`*s&-?yLVntQZ{omP~Jc*#cv81Vk<10aJgo)#qdZ3u3u zXOd_Y)T6TBFo#<^DzrLDv=5n>(%NCLs0hLIRSQd5FazD9{NTo032!#T^fY-&^tFrx79M^6T3Od6}Fw- zNr3mEc_C1V5P4}+&N}jFCly4!+TC)+H8uO^eU(bX z47`JDGI@iF8o2k6ltEStMb`rR(@6P>@)WiwJuf$=MNWh@yRFxXH>0&_@A6%#qtzgY z3t}0n@#zg&SE1N+cv;rHTe5q`s~q;eIo4U?sq@q|5= zT5J35ejmM5GW|2t?N6*@!7cfk6EVl&(|wGJQ`Ef5bUST%69ejQN94PC<`t#<%@np9 zs{d*yq^DR}UzA(L0U^4rZVq0(Rz=rLpm@ay#A)rTr0 z;|WDPG&eIW&o!xI!Tezgza&B*3R>TN7}SzR37Jh$t-3mgpFJ_IOw6{y2d1i>L@OX5 z4yQe`Iq!cw?_HLO9QgUHw9Y|)P^mnddyr%s`mo~c8v)<)`nm1tqe)uF;^=| z7%#2Wj1_bmoJc#9mFr@z8~D-U4S>9j&=`?WId{(K0(a{YmsYZJ0V>g(ZmxOt1V^2) z1~_&oEWW3W9LnuZE`s&iP+ZEngj&UmN6d%^+pb9=5?0=WXXyo#L!{z5Zp0FttJuQvjKIcPNm_cIH~3M+!} zH4kvTib-ad>LW97Ct4U%%lUJ25tp+yZi+-7$7Z!))?*jY%OuHq9U}v#8H;qi9cha+ zV{&${3i^?E4`v$sl_pCvO(~M6@rE42`fNY<2)+aFJ~NGP3C_J=-A{d&m(^g?Df;as zf!6ZoB_)1;(+)y5arR&PVUHxkeT`JAQK9iY0rXydh7{}V$r<+E;uCPOw?1diX5 z5_^dAq0B>zEpiL1fv0!q_OroM6p%Zkven|#>H0>bG_y+J=7tzw^1@Kc4F6R?H82yr zs!+S2s`=*+*_CxkVE` z#dBT&6=Bce{jPG4TZNy%Y(Y4}hEQ5`7DEj!PD4EW?IkB3d^e?lsICw@3My3O7nHA0 zKu57(;k39V#%K-qFEHoQK?=iK6QYn1$OMyeGo!J{9HoZisKUJ11UNIN(`tf_0!!2R zz6DB3h>W*!DfSexSEWso>`{CIn97*Aw7|XO)r5cLm-a4)Q76Ku#&c58IZ-X??N?B@ z1zb!W{7AA3zw|yXX`6D@%fmdxiu}B|_Qk1znE+L)X~~|fIv3%jfMH>2Du{cOY%@Vv z6A5u^!C#xP?N_Y|RT6GMbm#GM3beiuR4%z5)aJp)SaH0*z%EM}WlzOJIeo#7)5_;R zz{qa5NcywVC&F@&oDG z0Cq9Ay5UIkSOV`zpu7?H=-)Lc7$J)ckB+ilMaBsK@G1=S#KW{D7QDZ9b*vp>y4Kg& z&Z(~hNwI+~7Db14gSk6Ub;0LL!lJ^K>>VLCwi2Z5gz zX%shYSQUZ;q>u&Sq5YZx@%7AnM3$50cjMT-7#o(}cp1_D;P(HIw3CwLeO?>twpDjn;mF(QP=10Yu zc{2Hwzb!kKSC)^o!_mr*OKJ8X)j-U-?UdGUTCo5Z-7B2`kx~ii`w-QkiVx&d{LKRo z<)i!mR(qjt^snw+o4H{!>R|@_oFBi%qls;^mA$LyWY~WG&Z|01VOCMAcDg~cr!>RPS;|v>r(MtJSMrOK za8Zh&v#fV!g`j-ua}2k4Cs=f~6>aFKJl}B*eb1b@qs-ns{bh*>gUhw`1WpwfS_8FT z(_2^1ygLp4h;c&l10`AC5ESx@GJog`>J-HIYIoh9av5XCwQqhf;w4Wm z(yfq-g2xn5A~H z@0LA;NVeUmhI7#!I?W55HF)ncb5M4Ri+8DCwc`BGx!V}-WEEVKj z?2lCUhgg`77lGCjBL9pU)ZlrI`3Q?U7<<-X-Uawxseu$v74-2DedsOeyqf>Cb@P_t#hyG|#= z<*uK3LwJ_IZG)-Gx>DxL4KA5TXW_NiFcG)io$<^qT!**^Ikc{i-?RwIuN$Txe&1Q- z)TH$woTFny4ll&{NyH{@9mHt{h8mMauUHcim17r6k_9B&_t)R30_b`Z^A;C`Rif

Xm&(_3b@k{%lps_qt<1mk9{=#1aIV;qFDB-2$H}=G6yN^_D7H_>? zg`g1D>xZ!f(Ol=hRtcwxyAf5A-pCNrAA5TXMuRFe%3c2&Q)Uojg3BiL9Y=!%a?Pbd z!n16<)lPWo)GOeh;H~#;N7v(Hc1DJSKqjJ35mgKd4u?2>XckHz;2#0T#txNQPfu-8B9!6m2BmkklB3N zN!~axK5(xW^S9qOC}1zWCPa+g=h}#~0_>kYGMLD~l(${Ej>itTb@AcyX%1JY1dj7q zeu&LgX=C=!1n;&?C|Vg_)C0Ob-5ZpvtT?!{sG>&mS^|}2!)1k&4p@0;5gH*ETUh~UQ!`} z?9v3TbY8p(V@~NKl{gKO)W#~#+4$be7FT9REc!NCuk(5h<=4Qr+sItsE==&Wzsy`Y z9n>c|mHgS^z#JbooC7Da)An_2@P%%q z$rpPO=S^FS^CnR9`W=-geo=^s5@0lNekhBJO$NnqH%ozgFTEn(WOpdDuSMLVDueDO zFs(3?ynX_~;Gk^6Qg?7`p#*{f?^kqCZ8GMk)KDIzN5oq4d&;;0j-1WOK!GzaE{mi> zlo98jr+$$7VU1*!8QdYh_1a0wk{TbUBIuFUP!r4z)etOts0CV}?V0qt_Z)#v^M^r& zt43`Wi9|XsReGV)-LOg!_Er z{HyY|@qtcNT5utD6raii>xZB4Ty1P_R)kW=e$OjJQE9L`nRZYVj!g_TBotKc6o1g) zMi~OYb#1qlPOHa30Nzd!LSlXia0%E=D|S$M4o7j$h3XP!$#cld1hlj_+EB4ys18T4 z*yLCu9|Z@^6neDV+RfMME{h;~6-EJkf6KL7=S<3+9Sr+(HWs@6P(st5c18)CM~;{O zla(G8CAuo8kG~XS023!r1wT~JN8kX zQMg<8UeQjjd>}gNQWF-={m-aVI#=M5l@e=!?7_aJC5o^gEzaqK77Fa$bW9aQ4GhN_ zMz99hc~OOSJ~Y#ueG`RQ;!V)>XllSD-^H z*$k?P0%rn4Ez1ki8cTFg)>6<_bZLnewU2Zg;|vbXtG`mym4UZ)nMf;8xM1Tm^yrXb*jn>s88(NDi+%0(84ideH1=Z;WS_vul3A*e<0`VBt$^g*7}7w401X+$HZGhlfFW#Ta9 zu8KbY$D4e(d8WS|C;70-6qS1c=L}M*-h9*D!LuzBK)485593i`*?=*HXZ`5 zKRubyoFtlfC_qm0J&!KJJ8KTfe{@BsNq+#==mCAfmD@9~j^pEmR`P2zah^k>GY)UX z96+dS*YrTX8=8CAWX_~8H|^_X0Yz-HA!Ak;Q-@aT0NZXH0KB=BRrMk9S}GjL5~>yugInGxdEnc^U` z#TT*Yj|A4lemx=~{sJ{7ap{h3g~wQc1BLmQoJ0~RJQ!+)PrF_+%ajthIU&3GN~3Db zr)+R_Ea!3XUNP%i4#DS8w%+e??=Q15_w4>EEtk?A?-)$8S~!4V#9E$zqSECUplp$C z1_7+G;Vu-bbe#hv7cD+V>g%|M6ZI0M@qw%c>|K+qN%m1sys@&{&VIOcSI?ivgw`HT zMwbdjGng?&y^eNM>kr%(`}e+2BT!UL6y`b8>*RXcj=(DHt$wWZYar{U*YwIAYP)wc zXI+=>XLBo?j}mIlnoe3ivK{u{`erF3wmCKK!6MwKHFbz`H(jn7$0PJg0IAGX5{^_} zM*`0>;Esl-2NuD(ZlSS(fk6R*zq*Rf2gIaC9SeK?h&hCqs1L{b%85W#tBz*Xb>AH` zc<2?j4?;_QqnC6eUT>BaywO^|7XINdU&gh%0jd$tk3$H8$o)%Cxy|3pt|EXb{!qen zv=Z90*v^=6|J#xHf5kbdUOH2H8K6LAqxIXx3QWEsjqZN@{YoQW=b*z;!J_yx|KVyW z<{j2_i+cmT>lD<9ed!%;F##yinW@{-OJhxJN`U6SrDfY1s0%iTJ|&4fo&cBaIsYz8 zw}(U7ggm;l136F{r=N&6m8(r$9E)G9(?MapjU}2xs_x-Xf;HYX)*1>oeyP#PqPLM0 zZ{4gMb_cG@a$4K%H}5LhVSmLlkVz|0=~HOqv9b-u5*A(zV4#(Xp4j_pa(zNrgv^iz zFD57j62SvTo`U5s#1Dnc>@_-7I~AyeR27}Es?tlb%5F&5m`oX$PC>Bv^8s$Q<=Mxd z8KwOHo%9A`RW($YDDAR!1)o;v={UNBg^*|&BalzJ4!Sv)Y`!|OGz7m)1JS!f>m=5s zk}Ey2_j(i#%_Tn`39cK@QWTdAQGEQ=%eI}ZLf_S& z2XHe?G+IicKk(3D>s6uWgD7|aU5hg$W}k2LC1PRbsiy~Ir(-wWJpjlu-N`LW{VUd>Vng#tl!)xWeY6FY7TJBFeRiu5O{B-E?X~{ME_P++*s{e^n8z>?NLCD??X4F?B9zF$0i>$Ph&~5ib`V0b zl>!VY{QV);c8cB`x%#NKSHXm7tzefiM{&8Exbd%UZ&<}0J^2F4lS-%Jwmiy8@ zSI?4-yMUC7R}-Zw4-EMgR6Q()Bw&du+K@hcz7D@M)!Epf`)<}=$~%cT1t=xWukZPF zg_q>>#et#2Gl-s5*sdJYtXVF)tR1ASD4rbVpU`;)ad3Bgn(MdwS6N(_f^tycSZTl? z_EAQ}s)d48wlg}#w+*-B(Ioq?kML@!t#*}+TzEC9Yxd!Ojn^0HM{}Mq%hd>~e#@jNpG?b^ddpA>ob z2DKTsx8|X>Zv7YmpjgOs#&SiGBov0PNSy|M-|ey$9!$b8LUvLr?`onY+fPKg&nF`% zmgI&MOaRZGC?~B4^D(fc zg})3?&)-IYUFSLJ;-w<6hZA9HW9-W59wL~1vCMRIe_1UM~HtI2U*B4O(E2rN&g8m>f&~q76{ogP?gI@Wm z;lDO9Sa0UwAI2X|YYI=;3KFsK|5U6W@pE2!0Y6Zio+l?DSXW2|>R;@68M9oteN1Qm z1}M_ha;n6hYwk8;37mBBHe1_*P-97-^Bl`Yg6iqim~@q$r9gN#}m{R5M!LME_u7l@w$$GaDq=aw#e{auIjD`}g`GP*+mB?KpG8?;+KMtr!FzQfNYN_;w%jqNsgvX?h~~rfbMOPtqV<%k z3{~i8lY1N5le}G=jp*ed9ZrWWty#5!bnv26w`YXRj-MNXjVGNs8JbK;L z=)FxFz7Hz#@pZ!03=B_4gv5cbFNSBmm%e(#(GaUi-y&qaUPbnu;J!Gz`4$zNzaCmMfk$$?XNUG>xtHz&8OaN7-je=me&$BS# zJ{a6vf8)6tOqYXg^Qw;D&=ap0lOfy(mNTBc$uk^cJiPUI(g~jb#sLQNn>0OK24R}! z&82A>jz4ZTA2%^CPtkRfCNOqGBku=hNQ0LNs0l(+&&`Z14TmQapN2`X5))nPlaP&Q zyLWy#Jz|f-+EK-AH#W?Y?5dJYZ=)faBEN}Zan?UuSH57|L-@FsI^ZzaJ2(P&Exz(P z){SAg9c`kU=@rdM?*hx=iM#x7o_Rm9jg$>v>qmP!JQKS_d4M{h#Vs0qnlUchqJXFn z$x~kNdu83!wfM<+n-@qA9DC5yqUnT+ZTdHXz>XF(O;GS9A zl0J0T-h%k7ynL;hip`HXjGC~ml{HiWTS_~z zW(X6>_q)u&rBUg~U!d7tTpMPQZlm&+hg(&$3EXGhvGwHq@Lr7W``-9QA26lRIr+9|7tlBY+;pV zcF-Y{){5-tzup>V?Er>;qap4pAqPsKB=>#HSy%S0d8V2eM)jANHxXTdEcE8SJ*o^B zkrnU@PLz0$(8;%Y&ihL;I%=7HY7PNEQ$jdDjeAYXk2g zP3`@%-nrF|0&xJV=rys^FG8Mfb=yqLe%r&_=VqP6uXDg5LaSK>lS>cf$ z-1;99TT(i*9k6%2QFi=~by*&{0q4X7#k)fnk5!Y$jKx4NZdq@aIQ`H}yYEqKwa9@a zc+NYC!T~WmisnJEop+sh)BY+LB5EWz=17 zuL<|4PAPxA2+ZZ3^qPF!CgroRWfBSnp%w=9$(Qx2``m{W3v^NFr(-Jq4+BG*BA)f4G8e-!7JL4Er%v7Jnvg)4PhQ}MH> zNM}Vd?1x0#&Ol`#AQHmtdnx52;h+SXx7Fki1g9?Y# zkJ7w8)c$W0S|CY@X``YI?dIaj`+*SPK;l>hHAt;FRAWnU7ng5sKzD%D>*L zWEo9?li%)Ke*N{Pf^{a7T>F0@n9c?NO}JRK zCffD82Mgn|8s$DNr0H9h&L9Y(ILtI~_KHlpz%K$MvV-3Do(+C^9R>wj%@r6Lp3Cr_ z+cm^s(rFI6%#K=nViTC!dO?%AmQ9vJi@oEY_yQ+!d8wq)W3{xv-8_ybM?juq z(QS-4iYHBn(z(OPT`%j0FR*wcfk}W@uVXPMe&PxDzq@{r@7~6{g(A%?8@cddr)9dn zI5ZZc2h(}aeR3}LM1uZ8iWbTb6wbEqx=|yI;*mPYI%QKnLqS+7g(@bK%_a7>RWTnRbue0#?82sBLJ6oF?l7|Xe~a_Rtl(vy^2Ss)ctjm z-pELFubrCL$=A#FJr1P60Q6YDxMoB4^LIMERF(9JniM4*1knSa$k9~&%iMo52An0p z>fc^ipam6@hH*H?)3F0SK^*FTKCS z4k$OQ+AICj+RnXPrh4R9LyZCtWdURy!>A$5_tB{XTo{+|>iNw-%=Ii(3{^7oKCC#y zj%;P2cFh95VV$Jx({<)!Nhy?16=nl6oR48MDg6HonVp>Q+8=lKJ&6|+dQ6R#;3S#A zt%664(Dgj;VHw(77l?%!x2aXSd{5tj^0)G$V(fq814}!wLtZ(M_e?$qUlZ486S(O4 zj1t@AtSXqpD9^hqUtMl3SS8AwObB+ukD+!2&^0B}YY$CgzTELu>t{o39Ap3a!2S}1 zn;fVV*xP3zZJz&Up=R{zs0j4o8eCMOuDrVW?o*aRsiB3kyT4Rqz3=~4bVoHzg7z$e zyY$@Oe=YQ?FYd=poAgKPB-2I{hM{KWEkv#IRO(PjIu1%m8ewJEhxVHT%D_mBlIThdGO!K+%j%Ux!i3Sq)ZXCkp^2h2N?@JdeKS5RXg z#4l;y_HXx9X-kFjGawU;Df^bMj5-t*(=Ccl+qTxUZO=7r+qTxUZQHhO+qP|+zw^)DxyiZb+uuGf=Op*xcE&?@ zb<#<7b&ndUQj!!C+cX3MQWq6gP*dO}n*GmQ!526On0g#+8w3alh^(aRA@3}$ZX5Lg z5K90b-BkHY`@8rIdei3IXX7_}$A_M$!sqsJcclK7mqWN9koZgFJ^0=7v$YNS%<^e| zS@&jtmowJg@5lA4|MT{>);0HOubfxCcjA}$4dh4iYk0wOjsL}R4gB5rnO9l&M)(9c z@&y231@!t>{PNymeuI7}ZlB%&$)GEKMFP`(1-^N|g8;iP@Q1!JKr+DKhyAzVm*;D) zLme@muj(^3(Lg@ct_`BYh6K54*$=0Z{l|`StqJf9?CoJMB&K zG5^eaL0s)~Z&wl%W4!`X$jh~=z^>>CR{WCzX-{xDvhrlc0qu+ktj9(i76)<#<`kwYkcqOnY zFaiksJpmN`VD8x0#%=h0@E_}!@44vr0UnZ;v3V_nU zfZLxm0}V^XRF7HM&!K=l?~hfwy_9`&-?OXRcb8GDO(||eaj**A`P@eRnD3m{~U73EGb#;pB?Fc)*PC|C?2eGcm%X=+8Z`)NI3x_N) z1Hb-XdKS+9X5IjtM(4Nz)?#~v&}m)NRJw>FEL}a(Nu1`YJ~Tw0^nmHS?TBCQKpAO` zyo#6yoCFy#H!+(_X5gUwXBVY((9$Oba6*NyE96WTqMp5$g6#zn_;rQwkGh{Sujs%c zyG_n0+QhOkr`h;a9>*5-{|_8%Zbvqls^NX`5KG;?I$1HUV!Co-DM1I5htdV6{{`ll zv-DbLqsK*ES&3n)9tXF)pQC(UdDm~{>T2&p9_iyBH|U}quc_zDZyrksxJ7DbVTsqa z3Jng=AkL&UdKi7ZM3UUm`mQ;d-IpxV|10LHA2(sKo0gkZwkcs}(0w5jN05^?57X!Y zE2|lP83v>!oK>_> zeYNQ658++9d%`5Vv(Q$sao?j)HA9kRR+%JPgo(W8*^~~EqxTjPOGH!S=&eXJ?=qN$ z)Y$8!)ZIj>>ER%S4bKf_{U6uLrgK?0$^XAWl9^U31uMTaV5HtNUGUb*xw&7m(R8U# z*4Nf*Q0N7bp^d!v>+Vu^tRuM6wnrn2YyVw^-!j+fr}l#*UpE!59xz6oB2v)Eaqz(Q z-(2BGj0j%qxrcaOrA@q;uQPb}cm4kVE*$?|nEqd~Yht3zzaE7$KRV_3Ync?RVK@*F zP$6Ux5w1;YS}z@&D|3p12K~>s;o9Z#*JF`oed{RVe|!R=p%3_!Px1y9ZOjX|rCkS6 z{VaGm>+ye;WguA}XNOrEF>O%qXi4#{RgZorYwj8GPBv%5j1is;$34L2M_W0!aSMI= z+tOAy;-xy`Ss0;qW)<&!D~XQPuYDeI_m3`~0eoo^Rld1F$LM9rzMwqG$~3gKrv0Oy zZ{QmnENRataFpZPOP$F~_HUIf8@KXc`0hjo$Xu#4cU6Z4E%3-+cVoe&A6|NQI^#*+ zf88dTq)Aqo&tS66cb1@-KzZ)Iwmu-WZNjB?izJH$D&5V)c(@18SgMu(99o1mqS{Kd z|K~TFpAqHGt+ZpzMO?Uk?Ven^J%xGWw@f2mD(D`6Vb8s})l7vmy~Vnxy~YxFp~Vn~ zldkvEa6)UN z*H}N2626l6pAD2|y4#h?78{=kh8jH+h_iF^uo&{Op{n!#ncXmaA)XU@(+%JO|kKgmB<~CAy zsjxe(Xu`CyO|PodPc%#NvLrY(b{erM`XSk_P^C6XWK~1yaD(H*ktmktLVo>7Hkr5K zSZiv4$|+<7ldm-9K}{~H=47{n>#x`bWW;?T#syz66n_n(aWT_ zbvy}tL1jR88L2_CGqzVt5S8)-!kUNm(tno;ZJ_gfM?Gn;szw7%irPil)DfzR?Rj{} z#1{4@21j`iRTCInKC>9@fJgq9LgRlrUOf#rM7RBar)B;(GUxx9t?H;E_l{9HO~XhS zIqYRl1Zw*9Obo3vriuUFeOpv)&DM?us>el1W3B%Sm;RrZWQ!U5$)b$i{jUSZDosRM zO+8}CGE}?_lC8qljlI#{|35^4#cyc3)X+RaO&NM_s6}*UH@TQl zG#Es3`H{gY{O#CYPRlBZO4+%W?q`Ft*TVCP>>eH~yV8|_2cvEN7sB)OlwL80jix+g z1GdoNztvetx4VS@67s>O#QTQ-+R~WP1D*qcgNRLTbDR*>`}O)wFUkE6=_)r_sb;+} z^m~I~aY3G`?tn3E^T{+M=7o=OG{Tk&xm_{WW%T$FLTnB*?txwUDYKOOjqs^P6i=~@ z?_Ekqt*d$175XnjY9Q;>Ivc5e*zEK9pSu8C#F|f^VAQ|`$52>!3f5e$Owjn;JL~Md z8<3L+6JA4ht9j8n;EhGz8BO%Ru$Ki9rmk7Pzl) z*Qr}jA0e_$m^#o1vb0ng`XNfNmmhP~BEXwGNt9U*(dr0Wa%gHpS zEC(6$1UYE?`TW@HZSM!yYxJX3YcLZnsoO)|kk_mkW(+BYyMCcid}MrHDLAzKDS{C8 z@`mo$7T%kvmtU~!Jfm@Ef|NPAyt(VPk7vW1`|P;bd9k>*o~-XB%q49h;W@A6-3}Pj z8>52gy147>(GBQb|B#BLrqh3>bwxjn2pC83yv%r1qPl3X3-`gxyC8Lpvz4;3NcBUw zKUmt|N<0vi+PTWy;h1_*3#ToTav;OPYJ2t8CD!@M8f|=DW`%U9{=a8H5|)Mc`*c?dSL_`~MUbu-CvDtc9zy3uS_5 zI_qL5!t4^%*R!vPmNy7J%HG1E5xyCH&x!bb81W41AJ>$?kuxx@44q%q4SFnPpfc7~ zUXdlBE9I^7^_hE@PAvI(8faU@sWqsTWAqh=dty38`jHx$^Yr*$Y=o3`WWP*3bww~f zF=VpqGnmw%d*5n;1keF%xHIBQQ=nHz%nz|U#bLE^B85`;^3msV7^&7#-7B)!C25BvbqpX4Nn@( zt29yFanJ9zXk=9{7fn4M4H7HV<(HdcYFEhzdnHv|Y$7Uzcv3NPomt}P;|I&sR(3-I zuVU@=+2ddw8f$1ic@E)h&GISQPwthGrS@yRsURY$%YvKFB=en?JzdhW6crF=O&I?X zTfgvx@Jd<`*ucMggy5BDn#5=7X;Wt~-ejP16~K6R1WL2Ar0!{**TO_}`vEt)d`vWJ zPh)TjnrCLNiXi|QaHE>hC2GtXAn&>Sgn84t`J)ag2U6R?M&Uj@F$`-KUf3^`=xAb0 z55RNuPS&1d7pF%#DydcyXbrys5;eBFo0W_sPNFD)!tcp)94tb46P% z+}pkFm9Ha(KC;tn`KzAw(I}^Oq15{vQ{O-1bF#igmAX3LkD|~)XGONmM;#75(bpU*%O+Sr*#ul@=b1Ip|wCEn|w=r;txj6nV zel)UL0hWR_DU*=0iSx#q)dWL~ka!hhLdj+6OutV;&m}m^O~?m)yj4O%KcP2&b^FK) z%owI!id6x_@AV0x>wvRMAAVL!aBO#KMbl`={z1sK%aG#hb++!a5I&$Y1|5`!$#+*_d9MFd8Z)cdz}6xCm7i++3kB->&4PjZ zrI#C<=CT944V)%|<=s7A2Uzu=oiUWKkPt-=kCdP_q5?Vo4Ym)ZtDz!@!$fARGzvKs z<(uN15JAs;5Z~C(nY0_eRrzrNHTOcq*F?UXXRNzFmm|kwD9IiX40nK}9SmEX^U3i> zzu7j~35E(&Lh-ZMUFhByL)}RrstP^CVs8%atr}hM!KlG;70*u4N9W{X8#m>!z6-$PvuHNHa_0 zw(!H8r>y~`+)P}2?_23gROIIWvYaK4jhnU`ruqFXr`*$D7U1+zZjt^FA;|#K9m2y~ z^So&2*CU>S=$2?tvI?`aW9|r8zS{GU2B5y!uCs29Y4z0S1bWr-BbY&nft4|F$xhUZ zVl=%0d~M1D06!IE#f=<=3}sKe$!?T`%CM}|OI?$LqmO*0hAwNxVRO0-K$n`Ab1;}P zU!9e1)G;xTq2{g&_t-@Ayan=40`0E-Hv(cU3NBsfdy#R+(Y#@8Fj zab_#31;d2H^V*Q%yWLg}w5OK?-bI=<ZsPXDI&Erg^)_8Q)rsa&bg|i;LNEQ-kH9UnGJq0MbdCR$h;Wt z4gg6+9o8_a1x{=g6E2Rl8~P@7<6#*b^oIh@ai>C3(*z8rfog!s8lu&qstUdWBou%> z{wid&E&ahAJgA_KBQS1zW#Z3@bs4Jr| z&9=w`*!tkhb>8l%N5)az6TP&S!9jzb2S~+S7W0T88$u{cVQg`jb5yHcDj$KUFn#GI z-iP?JKYP}vDt?6$myuP?roUBsDY$JXaQ2b$(qEn}BQ!ev4qsa}EO4j|vNu(})eHoC z+nLdL#VkDOM5ckESOzrRgi4R2jw;64J80wjKT4GGw;Gc07I-|j62tU_4@AN4f*-*6 z2g$@@KGG?%ynM!C{M+s;XsVJMsO`E8mkd?nOi}I`@S3hZ7)S5wKH`cYjNH%QiC3Ke zt?`UJ=niJh@ZroQ*w+N8cj&Ac*%p$ZEmn;8 ze^0|wHscHm6oJJ_`;8VJ_Cw2z{D;X#WCemAQyvY^)lvVX(Z*fyFV<;VQievr#k40? zv@-`TXl*z01?Bh#$sV6DnZy_xOs3&=Wt_+{;lh}(US_Dp9unFS<|eA?dONHJ@_Seb z9Ro>jtu`YOm2bm`RSrpMnPUoxo9`uR@36TRyMp5%2>s3+A=b`dRR=(#ndN!8C_~;} zy@!qKH`uk7a(m-X289RZ5KRx#+prPd*`(dI&DfQgctTphl{u=;Jex9t`)#)5yWlgi zQSwqeq5Ag=TY<<^+vid=@Y#$5`KeYRf-;axlPV~@m&W0%kbd)~p}ViFW16*N~^wMF+{Qd7X-@*hB83fEq#rUXscgIJ=vIz*!K?*7@ z)uF;K@-NODRQQdPzy;?FVZ1vVd^qQkZDpP@veq+iPA%OK2b<08pW^uby1PnJqTA`o zN^tCj{RnA(Dv#sgCL~vR&Fh!IgwiGy8x4qLboLbhnq+E-(m2kw6vs|=j5W7at00S2 z&%z5AYWs3b#~HFQY1q~t^8+9s*8L{u16nUni;_V?6c_6}w~!pnU${cc806ZRGAOzo z$AnyNrVF!RBHtPNlLCUiQRp2`^|O_pVucJrjn4YRTBYR3-!1o$fCJgX11z*#JKlm- zC|>FZtuex^Xs4<^Vycc)anN>hMl^6=6LXsKv~DF76UeH}Z95JFWHHb*KE~molOjIv zc|kw^y#zqiQBC4=6mI7MJpJ_u(M_^#I!|)N+qHtP>Vs0GeEQ}nD?-qp*UMpJg^;nZ zt!{3%K!y;Aia}l|_PzL^e_8kp#4TWP%KPs|*M$l5(A_};!7mMTBBwB@;7aC@nS1dnyAykR!)^JPQQiiH^_+F{ZOpn2) z8gjE!2Ft1FgvJ$pCuq7j@}8}btXVq71NHuZ0LQwE!w;MX4Ji_JNl&hA6wj61;_zS1 znd&%>u>^&@(Jk*MGVQrxr;t8p7F`4?3%nIij=z1FDChpW7iotYy3`B^IQ8KSJVxUo z!)~EbJ~C6pRy3ueD1`jG1NTAkib;nDJ4p5lFL8GcYyRS+EAG~`-qWdTJ&{l2+#(PO+3WXJ2Yj!-XQ#{E7^x6>{t_f}SbRhkh@wqG#JwJ16w~Sj$$DN|0-DJwuOd1j`a7#MX({D6 z(Cf1(J#wkY_O^iT+7I-te>03KAS*mp4@ZhdNT}-}Y%{u*`(;Z~9!rNG#nRW2%%3zy zLLR=(6JZDD5i)!Ml4q*>Xqd4h2^Z0c7~aT(GEAhKy5RC3|kQXEK*CrQuhnB zn>794&jSXIK;~Ig#f*NpvM;A#ufk&aurp@E)0Z;s%i^W3MS$#XFjXtdQ4j(f+5nst zL&w#8p;s`iJ&DqWpPS`C$u7_NK}o*B%IwY573t3;S(; zHYaa_!ne7-MOhC48swGlp-9)O(Haqgt+TrdHO_t(2%sR#zTlRjfF%snM~^>ZVqy%uPDUgyI=Z+T4tW$e3#+ury><##Nt#ySXpvHX zf9n~zX%ClJ+tlA3SUlZ27-DLb0`sKFYw?o3Z7kIsik~PaOGUzjF10?3<>K9GOJR>K zGAtR>@Z?QXS!@hH=EWEFMkO62S?af;)NZ8erc8d?RqH9A{q)xnX7On#o~fan6K52= zvT3|VFMwpMSUryqZ2@&a%!$C3;yhYb_N&&3FER$}%FIvux)`l@nGSy2gXsUTZk6bJ zk=|)uybb`Ky?1c)z!CJZRl|QMLepjBMp|e4BFqEMOde{4+hKJA1*jTs$JGS0f3c7t ze?Ku3R=28Rv;!83JoUo#q_@8yXe3CV&cH6@hsW)OV>i8wh=xHPY;C{>;vuyJ``Iis zje8-_B{C@DvR+@|cywN~B*O+tz{!iAJ^0wLmTEqhbem({5B&1Ut|KI& zACtN&#JjO#*5J9(f_|?5C;%xh{In!y@-S#jQ&=tvF-TU;-5ez{!#|$4bvT_jIJihp z-9IvS`aX4j^lh=sZCFit)+hl(fA$6^TSFgT&0c)Gz!%%?Yp4FFq1B4{?1GLT_B`s; zSy*FWO4oUM$l>W&nw-F|51S;>dA0FU9^}=-ylM)Y%BQW09UQ(3f0MbRhST% z2SYAyQ70R+qB1tENyKwb>~WM~H=etdhVCk!mczF1lYLWbFPTgXM4YclaW=!F7xu^F zm2Y2f!IkCTn(N6W?k!&{ z{9jYo+^rp|JGxWX5x`yXz6*LGr5RexyH`Q}t4(tUN%ik*9(0Npn23sN7vp(30YMj{w!v3|pdpIh^V{9gG{G*#tPus?}4>BIDz!qY1hLdXombzSAwrXyQ z93iCrqCmITzjTb(-r(vQXOH_Z`i*jpUx@1_B|zm}&iO`<0{$ny)M%7^S*n~Vb$=&a ze~XsY;7F4V6k2O30y%3Yxe<<%uZgML3`%VSAk-B`I(r`s~85i+QfqLow8! ztu0=(RUk`6bSzd#7+pl=#PZJ$B?Bvp+9k^S1Tl|1AFa5uGIQFo?e3UCZ~-&B!&zZr z8d1jZZfXiup?4IPgc~&7L{dtvKRe!o)n<|v8u(b$GiK^SNd|CiB4xO_UEq32m`rGzsIw3|53vc_ zHjty_C|eoOn|hJlsK~#Z`$kBqF?`~)m79rgi5RT#Ri3Hxy}%2_i}T?dX}toJb7Knc zMhcO*n_`&CaC>odFjCd@OwJ&gdR9bq)d4jLN@#PnLx_v8(rB?l;BLA$38}V+v{O%C zr{-Aa5=T826UEh$!?VdOh2y%?IDcx8mh^n~)A_uiBAL9!k^V^VBQj9!nK&h>hub;d5(JE^Xz) z9;zh_T8JJ;@}XXDrr4@br5V##szY-4gH>sB0%&PESlC!%jb?Zy+3E4i+|#Xn6e=;V z$}(RB5&(Dx<6O&cVti2&QFY!JreY!Tfi5WSa@}ewD??)6{uvNXOvH;OcLSj`g#tUJP?`khnI)|hQw4sw1uXLrL4l)ua)O`@py|c_g@vpJgL=s|xcw+ur z_j>*LO3oZ4B6Vrc!p9EM((NKtcTjomUF!Vr40>A$)TSl%3yo8Ta8&Z4Kbv5lM6_h2 zWJ9(wA0=*q916*v{Sb8FyXvxAn+#w;5ng&;SrNUFm=w|-<}Nr`N!;S^i)?keqyJGv zTA!s%E2)qleT2|Pa8p=-uI`=*Z@e&}q%j<&ASmkF(F;<_EtpduMMv=dG43>e<0>d+ zx=%4mPH!X9ip$$vP;|pZhm8~EE1u_K*CXk_8*$E+l>PP@7s=FMU#FR$Tv1?`J53AQ zfWs7x0mJ-N+jO>eS|xj0@U+3|>WKN=PO3bWZNS{ytc=owh6e zB>%wV{)g0pwt^NnceIqkM3(oR${)GbGMfBTCSqC>35Gmpczq_{3qN6M4!)t(DxSMi zrR+qZ=s1$c$FGpz=gt59M0|+!H1b+f;T6HvOhGA4{$td_T>*rRM1Ryyu7@o`f&Zy8 zXh8;A=m=BB+&E@Kfo^oDcEgB@hXDG^{iknwvhow1I?+}5@@Y>$vBdocb4}BZi)I)W zuT`l7_tMuot78LKk&j)&cTbT3YZ2F?ED_qCP#W}AmaxsEyCL0u3G^FsuR(ZxM|@_) z`eQktg++NL^aFz$!NgKQTS3=dwKgy!uuOjHf2UE+H3nCRx}5A)H#wxPXeoM0>O7Gxf^wu7aX2vO54yXrUG zpT?DxPe)2%&dA6puxk_DWLn<=MiwN=d9>+q<-h=&_s>W>Z01Qtd%p(0x|&nj9AFnWiR&&myW>7Wclrvy)i*dZ z${Tp8e_PzML(G(M)z7r6l4M-IR7S#RkoTan!PJ_*S&%ah)wo%i`tGzGmdsf7KtniM zoedNPi9cP2W(A?~sT@CcilWHTP(DiJwkKna@u4_ck)_vkl-mzJqKamgi!Uwym!ENs zFRFst%JC1Yz5LU1$rXM2SChw$W+6d&{@UCu+$d6F*uJ_Hoil2J-=F%Hn)w1xZa4ao!kJ_E1!w8HiC>UJT>QnQgJSb z%8)h3B9x7ktd9>(Ik)>sfl~-4Vb*8`J_;wj1Xp>rxd=t529FP`ZjwPf zB$!&-HtT3w_K%=<4)63hk3RRcDKw+@YXrchdt!&`&pbjF8&yUW4d*l@Uu@l*xR+nP zwjP1Ir?2A!l`nK(7SLLOyxQpisx){E&e(VtlCB-1$hRd_s|0~L%^xFLJ&Vi}NcWQd z9ke;0zhRJ7q41rae{Z_d`#J5_R{jy{KG`on9+B-6Ws2>K$+3E%S1p0Tq@A2gDR9WI zkrT$+-BLb3WNKge_ChHT`zyx&jk#WJItCu zucFglBXee1`5jhG< z#YUU*>e2mPt8RX13-;J)+E(*^yi|)g1gGp_y z6tT(s<}$c3R-X6Cj8l4@-z^<|@)3y{R`tj@>6KdCVAwYFF}lkAiBU%aAIe+fHVI@S zTuzq4Vxv7AJy6@VwND=Hw7Ez<2$;QQOOes8dMNLR37K^6U8Hxe>g1k>@jb z#yp>?PJANG8-|X!*Jhe{zs|**AO>NE@c`@3Im(;u=lS8yK_0(Z28OYiE+p-ZPwqM6 zZ|_0(1mM(`6Q2K?zEEwlqCrR%sPnov#71nvZd^#!SjW~xv+|uXa#P~=2v=Wd5MT~cugvVjgY>$acol-v#2l2gOHL@Va~PxoT@DARDW-iiDhJ zWcC}8*j1xg(B!lf91#41(!2Tu2~l+fe?{4e;AwzTiU4%JrvX0*Vg-{Gry%MZZF|-L z00aj&z=HUQU7e%-l3b0vIgsfgJkg+WVC=%E#@c@-!QFcK+eP?j!!oom&tUcwKo;!w zI2aIazfyKgLkcnEM;8`hVmfp0fWM?pZE&fAJ?sFlHEN)`{pEIosRV+DWen~>oWk?> zC13VK0Ir(3=PH;6naRS(`P*un8E9rAL>Fi3iQVpyFQVzG?KlwL{DQM9e1g1&56qA- zvVYNO)H}QfsTU(Ub#u52MSj;p&@rAg4i|Jx-Jm$&SI&t>x6J+Sa{6$MwAFbz6zyb) zT{;L0t=`ychhK0#Lhz?LasmQ8?;A~DZvS&>9JGRV2pBOL4@o~ywT?X& zKLr}vxBiEi5aWpG5X!7l^{|HrQ~ZGX5km{B?T0~cF{=Q(6jA8{HYQ<= zQMS#@;|>p^l!wLBlkzI-7_nI$?Gia(v?jWAX>WjJV%yILf(7ua=VoY{g)q`F_4$pC zC(0~i24iW>nj3dyz}^r0Lum(x4aI4WJb@IALChT7xoqc1dk2+4U1{NoMV?iMyN&`< zk{yoC^Re3uja^3+v|VHO>_gY1C>5G}^FH7|Lv*xgRh*RKxcSbW1x)|qX{W4+lG&Qx zu_<=xiuw_`OSM&bTiHZf0tWQuz3HoR9*kX$?us9I9>9>SpJ)@JMs^C0!@S+R(49(B zNJ@$F0A1R)q!^?0!P;kovBk&q3alPxj=U9VM(S&ox^evl)a_jatq<2UQwPvVA%N2Qj4zd>H>oIrO+WcAIJ@FyubsUA z|5RnM`mzBqO_91O1H3nwUE(;B*UpT-c?&IL1o>Oq9wf%nOxQP%G)#(;ZUH*0zAR-gRr9BmL z>ZGaQjCe!0XTyX~mcjUtVyTE=XZ#BNORfJ>?`r$AhFnG(;5efYoz*5w>m1_2BTq#d zss8?H7u3s5axl@kO<>L2n}=*a&Y)Pqiv*lWS!2%178UDI6E+J=i23@J*k#)EV{slH z$gKZk5_kTPDL=|Eh;u}NC5bJ)HcRt5^JiP!ChZA(n=}5xuy5A8>l?3y?1xz8(?O~| zri20r0Cu68AB<{Sj|^S3|K*)sM-Hu)^v*IyDu^C?Lngpt_@Tc$1gxdBbZMv%+bXFh zzWU^S5&K=@Or#StP{r+G?=j|7ky8;sKl}X~k(23fM&Yk8ug0D_pSJMppRBj2u_Bpm z)Q6a@PslvRVa+(D0fnGuU7*(QJzxLxm+q-h-E+TJ)etJ6FUH|NiKT=a>DqrNkfi+t zRC84cW=s*Bt@s`X>JT;Wdu#J1`6P<|FVpTLJUpy(5x4B1kh)k9aNXpldpoN?j^8kD zu;ra`F?tH|ejDDYYqtGB9e7Bc9Ct}_;qe`#Um$R)TVSkq5CvB%&5H|?W>9NGRC5!n zMr7(;@_$-m;kaVWvB#yj(R*zKN_x3o946{6=y52rla6DI8y0|o9V>fU@Qq!dTdNo( zX}bgXvArq*v9Ybq8ih`LA?RCiBbrGcvh3)mJ=&dXV>8#KI-a$)bv(U4Ijlx6kFKx0 zh1(e14FZR?o})|QVUa&ZX@P9`3!-s5_)MMuNQA*?e0hb1Zaa5kS8P{ykKJTAUEt(h zbvSj;Wso32@z1TsdiI;GRm!!4D5dSCuJl<<3@#9=ySH%JR8&{wFwCuN$QnrZI zTE!LN504q}y8~Vp z86{LQ;JE(nMj$2R%ZarGw%?;zh2okw{eK?ItDtKtqUr0}lLqYhIsb)ClKrkw)XuGOv(O4e1rNaY5Yl7hsN~1q%al;+sOsepk6N-Ci@dUIWW_FlDS- zr$1b}u0>+7YlF#z!7`N>m4oj4^C`e128`hdigWzpmc>s%cR!JXj(U&xY`SyVqlv2X z1&WJ@wnHJ7&c*!@jhq{XCmi{#8!c6l%%$ z?gbnHotRB$O%3hDipxXYmJegFy18O&y8@MMp;pLjG*Cl6Rh0Q*DW_e5Z5T~fqo-Hg z@nV$In>Cgqtx9j%r4JhuQKL0(Lgu4`l9r`TBtDt5F6(fwBx?^~`;~4A`s(rw6CPPJ z1nS$o5@ck7j}WqKY(xRKYZuhP2da(qiVa#(3grJ)fkL;HUpKaQ-C5+xtH@jTV^CQfyFt6y;LsGi{dP8;K4P>em(5biO9#uw zHm{SN8Vg?`=G83mU~jwES}r-L6LC^)=}ivxE>tn?(%AxW)ISES@?-2ufXujGGRL3s zx*Fb)PNDEnlVVOsePP9jyloV!mTIM`AMw?Tga#P8_^a(g&tHoOJwlhco|xFKf1bb6 zD(V+sWMTCM^npSOSS;C$g|x-@rxmjr6ra33)ZUm1)qLN6Q~i=LSsufafqP;3)u zD_N$2RUzf&D$N?~6=%SWKy4>zG)or+?T+oM?Zp_J50DJ)YBqgODSG9B+j43P?WN6? zXBdjX5P$)Hg{dv7{P)9;d77|@GB!NvWaio`42Dlu{0ASb{_dpl7HPF6R;VE`|H#EtoUI00O46AmfAdmk9u-WE3HZBm(V*& z&h%#eLvl0Nd>9o>USGSfKN!+If-OLQZ5Lg1(7%X6+nL^)9dkpslu&7Dr`@^<-Utar z#y3E2Bur5pT}d5wpxw>v4p3EW?oDedUaBkrLJtdoT0=f*?Cb&6y)pmwatn0wptcS& zhbgkDSU;}kT?J5RmT#xC@aU)`q~dAYV>6<^wrtM^E>&LATJ0Rc$;7l84;1 z*81k(s6Bdbkoh^1%?Q7YEV|uZkTJlOLVMA1jqP1YOh$}rZUD14ZIH$s-t-mh4= zhVTH|>TOu^!2vRmGBrJqmfawGc%i9cFb9AzZ215qxy0!|rr1>z8GDxWy+Ubu1wJek z;>l?^Q1ibGJ<_}3Xpo(v+Z&*ytFl|JRhz^D7>XYz3drC$@)Pye`aC`P=KI8QqO>#SNKKBc-CE>tAgP<<@ z=4m%2<;rX?p^smdenVthMzh(|1st`X{JS3~YwjrHhn;MYiW*x>r!Q<#GS z$hED`^VO+q>gc&AmtN$D-Iqdl(rq(-nDw1cFmhJRs*Ziv?*4kE^ z`miUIP|djPU89uBKzO=EC`$v?S>PE`P@DHG6n(CUdZXZ_)gh#c2+WH*IF$ND@3r}S z3oGu~bqEW}J}IfWj>+_Ma6+tGM>C*UNjMx~wG$%SQ$WS1A_*;lN);%9Ypnw%>S&rR zWGiRfNb>`LFFQsDSzx0GgZ6SRBYHV@cHy6Ky+oMy1AH6$acrk2e zgcb(iyc-(5t~;kWDjT9^p9i#tPls5Gv;2gEi6#;8n-O z;1;YrmewZ~#^OC)cy=3yHP55vA-BSrO808%-V=7F6f+Q<46%?o1kE`lGNA!p*OTOq z1-oY-cwD3SMeQM=3~BV!9@{?I<-yI^g&z&4{WrgS191LXdxW@nFBBd4z%|Z*FMlU0HY^)m&Xk#Y<;z(lp)fe?Mn)tc!Emmth z=@jH5GXReSA#?IU0%=XF{`Rto^4_zP5k@MRwyATMh}iyw?yDxt6>I#F!u5)Eq;=4SAWk``f895*A8_yD0!?b`iQ`ueV-ZVe4GPmX^RrOl~*X zMmWX|NRQ#q3q;)^Wia7B>$B;ptH*T-4KHQG4`BFRa3A??Co3?U1z``FL!@n`_@L5+ z;Vvd4u(K__jwsy0G-0ck90?#PGMf;$n4lKRZQN7N=eTwl zT`NW*$n0mb8yW&tb!*a48ZzLu8M^TS4oU7n+F z+}2;YKFY&+e?!>Pd7)bw@gxzQ@Fk(_HV4GvDaO(x#*%std%UhgV5#c9d!bnt#*bSm zyfx*6gmG!891zT-6OmE|g-P6J1X<84z{|u>(tv)oN5+%CS?&}~O%${^t~=IvN#{=W z5s7ib$B^Hf_2N-4>5HHU5!WA=kH7?dtQ^n_lZKk|G2gCNlh_^MOcC7)s3Av@vQEgj zTE|cyFH|)-S?TIiI4ajK^{rr;0J=#=MA5@Gt^F=o%n4F|>CR|aEouQK$;-Fq$zvhy zzG^BUvv~0itv?|>-P{p^vxv(p4xj|1JAiG^x==18I z-V~eW_as?9qm7HBFS}ihhyxTcUrZa-YTVtz<0F}SNkayL(5}YHB>o}|mJx|%V8+lm zx8q%4|6|5G*u|hS4)VE5PzL&{$U(BXJ>qY&zzY3}L70o%1cimcJqlLwr|eZT^9y8y znoEo~kWyGs;HDKG-yi>cfVYBGQxxXnWM2$~P5DtZSkEaDv-3Tww#m!Z_Y1hSDG=je%%BpX-lYypsnx#_GwXEU5ir-Pl7u z2Vrm=pVKA%=3_<>XyR=PiIbi`XsI_Zy@h@dS-1%Xg-Bf``mrLeFfb~37R(AsH)}2% z=}#H=c>XM2NLcRn5W%)s2t3WP+mcQa*_|fEfT`dA2l-QqSeV%4(xxUHd!Q7c{IC6hw{Nmy4@e)CA-s3&}1YSO!xMsj$&+8J6bbL9t9qsTt>uVUv5(!YVCDBu{O{He>s|wV+LNCxem$ zPlD)_`quTfN7(Ts_!TBhK1$Y8T z{n6MHpFeC>#mdz>!~$zdM_Hh9M6eV!R!Vd)!1@izYoF?kFw2uodSW7RHUs6r6d}62 zHicN(_iv$Tis6+rL4q}>#m|B8ydReUCi8p%#8bs1*%GcI^obIqyx|Z!|NDWE90B@EkUChxuV@B{;k5QMrbiY7>&Y+_ zZ>@{{eJ2EOtaww~qi8`PEWe*Oc^ik=GA5fG7fW%a4=T-ZOOY-;{1v}P)cvgVl-#*o z46;N){JLRWCK6IIeVM7lI0_u~9^QsiPTLy)N)F<*H^Xkr%{g&zM}Ae|5YN^k$K%44 z%EfS1B(n+Tyytq8-YK&w!pIiMr8SFb&Q+rmKo0KghvPEu91oCw6z=9;+Xtaki9^8A!?=YcaEPEE zlSV=(3h}2uz5bAd#EhApds@rr*XpL7W7nlE`vkGXEv|Q@7`pd8Tp-mug4|&#ivuG5 z9oQ77pAD^rVyUQ2QL8J!Pe4ujM)IydyLlf4??A?pW*dD`16_(ww>igudUHtnHg>M2 zkDn7a6z!-Y2Q_&wMY%(RJb0#^i3|XMR^~P}OfrmwBE(KusTU>=W`fQ84I7V3p5A6Y zG<|Wv4H1hzhUTF1d4x)(%Cxnm*{`Mte58N=<0H}LK`8o|rmybJ!dH9Py)BomVS#=v z-aOA%=5i^T;&}xVc0#;^0xQdG8i%mR;z4uve;da)TsWfp=ZqNmd z47t_^%Tm4x(wDi$Sa( zvBqqUI_E3(G0aO6z6pjmj(*??72xRo`3^(^7m@4y#@k1^j%DY{ zD)nYeXlHior_KPYQ@|;o9W7_gb1X>^q`L&98A)E^uaB5G;Tpe?OK%tA(y!N}2Wcin zr^L8Z@?~P;QsRl6s`;l^fKG242q=i z8WzFVmS%tg1n$SUcY}Fp;P%0c30LKbSMH0#f2{j+Q}Y|+tP|*l)ZT%nsul??Pd*vk z*PW@9?4vvYwDW-OF2`&D!Z^ zz4c$tA))1?(U3l?+TdK41#jV`YK zi2N|pXv=sIIqzTi`J}A|28HyLm&~lenTEbwOxx?Q8o_k3+N{J~Z8CByWl)t@Ss4MM zj0~7fcy)Hoin-2&zV%GAJ18+%(%U?aj~LU9F^+Q4*fOHK@b5p>t|PxZsE!Y+dE5kH_6!rD(5%EGAaowt-Xhwm5V`00B& zUMlgh4dB`z2P`YbuPwjAXGOg9u@J0H=#{k`Opgf>@Fmz*ycd>L39D1KbAaNjiG^4% zOp;{P6Hlg>x4Pqq zHPeTPV8i_#2fqrC8ivj7J#@}3WurdayLsw(6D(6MxZ(4&roq$2 zjUpT%{1Fow^)GWaHz7I1)hzrD9d>>O3q+9S3;&UAua)nsfwjYtsSOOlzh85m;6j@<)4@Op?q{%jRDME-&64p5o$L22kP+ZzejeNdn|y(Y{E8k0L7Dho2m zH=A##FHDsw^iSi{)Kd5zEqtxJlb%3X89=i-bwFx*n@H|gFNW)vWT$`!;Kmm?#O~G5 zxm`t)01CYzdDn9E0wtpM@`;bc%)RuW(M!r13OzJ-t;yOJzdze-;M zCmkE$!9Yg0EmQKqaFEI*nuW2u0Ai38KNkK>%fZMS`ct=ncwh0SFK|A{l;mkrh&_9XP z19f4a)fBCWvAh$a-`rsAaG(GRz?u2uru4enx@cxjHLRkImc^()nG^$v8fhqRYef84 z_mXLONY8_SJr#hMG>H5`D#JJjS%Ao=++^Yau%C1PBz*i0%s%R_&egmc;$FGXd2$ z-DC7zb4EJxiLdL-b4)hB@t~FAP@BiX-kztE{e4FIugypNGR~*pux?Nzw*y1B3wh?w zw;5N-lMNnCY>gsg?`3N$wE26>aTh6qr)d5^S;D!MZo~j*c1DGD`zDvpy{fWdjTkz0 zw6qqUsheV9PP9i~soJIK59WWQfNoYjnQFjYyXW`cI@i#`Z7R8AYa}b(xx%bTjy)HM z5l~xxwe^U#NkbJ?$XJqyGqQEfES3#vk{_Ke;b6eJPe9Sqe<4fe@yKRgz@{uq`j z+QPsgMUh-BCf0_PMw_Li3Ewsi8+|9kwpHbKp@Nij1p4f ziVVNw2!5IE3Qx%$XIDbm9Wzc|*BvD`*YHh+lViI-v2rW!W8-dXe#dGF69}!* za7r_7=k9SbI(5;hg8~2eG*Hoo9xk?L8tt8L9 z_^7L8k{1>OGk(b_-rct`0x`RHuK8mf&o9r?o5Mzv>P^+qyT238AmDcS$W`K*$J zsXY=;`-E*D<$$F@4d|E~XhJ(~;i+|-m`}R~uxyE6_0|7!X=-!NIsGAK+q*5Du{WAP&Qy{JA1~Rq zV5wjo;LTpWDbkYscocxaXKd5o__}a@JNwz!fC>_|KPqGus+zIYM%^p^XEUL&i1M+&R+wg?jC238><=ykwZ!A<4Zqw5eXxo8 ztcGq|lTvK*Z7}D&)uKu0)fI0kRrj4Va~%)OyPzqxune06`%8CcuIAW8csIfCY|sGk z`iV`%h2<`3T1?)5M0UM{-9e2}9{>e0f(<8WG8=P>)|mi*=N;&__xy4#QjorDi8g?* zm8Ljj$BoyAE*x8lQd+tZJIg}&`O>{ll`f>WsSq5qFEJrux`4Y{EZDk{j659QMztyj z@me!32Y1yLbX+ycf`d%cW#jyTDsKEB&neIWy4_zA;Wl;4N}=?_4{U?afs&+8Opc z_<|5Mk-sC{*mkpZwk7zL8C~X^Rx)}j=nuE0?;?3_ndRfO(T4_;lD&VF65XPwW-trc zA*fm{6H(54p}Ct8`S35L%$b6T$%Eo_^UJs-S_quoQ;Go(5=81>gEO06_;LY}TWrnb z%QKu944$=U;J>X4cDHC8s<>ID8XAEp&8}8}i#4G>mX`U}iL2kPUrN_yxDYY!-UF=` z!KBXV+#1>@w2KgXrh$T~ufPAsv);xoU7sp50`A?xn1q#4Rh?HkhXe82B3*m3_<2g1Zv!br#}FdlB*!&@tOf8@3TYuq=raF8$7IEK@z*H_)6Y^)61gl z#3C9jKXtd3S-sy@aNtv>p(Wf3$w1Lx`O*UXnYkbhqB#~ zB3gvy_E;=;_R-~3;Y}x(efwNz9LVb4=r3W~k120osSUJ8sz8_&0e=(h{wbPcIR-g~ z)OGJCPqxUX@31rO(`Wg=J<=St%{Xy{U0~wyCheJc0fSqZFyd`EfmrrF|2GfEgyvIO z*%{Gv9~HB$cD=H>NDwUGAMQ`e@%5*QHr2LeVBqZXMgy1 z60caCISFi$S&YE-7&>Fq(%4E~}j%cU_%*4S$7j+@Hk}11|ReQQec6grn(UM~z#LLD@oic~4%roH9z0Vs0>z#_RzZXP--fH6aJw zt4tyLantl48=!7lgzgoYO61{a|Hnp4eGffm)IqJ&G?WKb#VRTeU7$>c)u~u*{pQq+ zfgM1s{=dP#Phqk}t)M^=7>YmrHsgu#pwyO{_rw$PuSP$sX-ubium=z?F@%>#lY;2b zi;S895b#=3z=P>G!?PD|HSA*uwE0ZZs-Cf!G+&?2;Re817Q}4U-mXTM$@`z4i9J*i2*S|ab$t5MHKy|nLzcVzW`G|g)vt@?lWs5=xC&Mcu)ViJ zI`|XjeMNc!pB@CS#GrPz9VRMao` z%IjfcomlJ{Bjf?nm6`@#4|yrozDERu(8u6Ux0PaKM?80e9o)`cil6GDHH~tr6TmhGTe3+lsLg3 zGg#H#4yp|^@5?HSQ4h31GIIKS!s4W@->bX|MGK7@2qH?NtpSWE)gh)rLBfb+RK>@! zJs!$7aNxIK@}|W|nO$+M$bNnu-oyA7+DPlG;)oRkWI&Xnje$WL`MgXu*JiKzk0Jwf zK8o$+JP`R^55xjB=F#p;#>FjiHrR9slQj!4t1WtB8nv+$O?LipHt*THrr8$5m`s?u zNrGPjFb%7d0aefOmrf4ZeAwW=R24;UL(CqDrzI}rtz|Z_Re%5h00000000MP<3fG< z5f3g~Kpn5OcPyKK3;~V>c}X70^+L0w;+uCwylrztik(=0*}#BBD~k$&FV_5<8%YJKxbFA=$&ao@k0W$wx@U(?*SI_*R4Tv)8mWMhsRBR2xhHp5MauI0*xidyh%Fy^wuOB7MHIe{| z-~iT5R~;aZN2oG8Nf`SNykz^F2`+Nd-)agj@G#=G-5Do)^H_-vdFlWBJ<7M~DL*uZ zB%gEOuS;z||Nbk_XeA0&@C!(eO9~xB2f-T95ZW=-fNRXNQpLz(O&33wVo|1f+N&V|&BfaV_;niR13r__CM*44h;xrioJA$>y ztDvfx_HK5U2uj^0-w87BPe}8XzqOauqKx`xQ2e3yR&Afd8HMR+(7~QT_`LH_!rLEU z^M7H{-zT*(M@m{4F z05N1Hy7|}1ZnS9Vet}S$t_8OJdQvb3+24E_Jz{K!6I%gL z3J&6zp3Iw;$+=P}-hZqn`jNpAlZo||8;f#~gt(-i0UEQ3^i!D$V5TxCH&v$K;CfD+?}oq$$ekB7v$rRv^Y;_oD;((X9@Z1sBhif9&s{O{YABA5dBmo;Y|>w+uiFaVURI%y?|yn`cIE4j<82up90k08G-$7?O{X{ z*1#V4IKlG`xiQorSiGWIOVn+9`#w(gdNq}>AxXBSuEdGBly+f-@FWYiXga6Msqj}u z02@U`tibB~MpcU~AOHXvUbB@sCoj5t`rMBT|1>EtU3ZKDBD??q0000000MWMKKbjO zI@>k5W0kxKkpWNO)yrmV?)P!)n~y(4gAzF-^DCqwJ~c0rDa|mp`msZhR^g&nC{VXu z^%mnzZxk3nX>B(E>EVvvK2%1jk>t^q0EJ+{l%9#^lq}Ok1+t1U4S`nUTSD6)D4KdM z4%~^d?50JUig5GeZ?(&(6k}D0Mme}!WLME88pgLWgb9WLD%+}itvkf2hkjNY;R`xM zr??2XV(2uOApr=C_W-dQp%VkUKhx!a@eHG8ikHwQe3Gl3>njq)Cy1fkB-odE^3{Em zIL&1N>*?T;0k5Gz*>F$nkf{?1^2u&Fyf}8Fv_YA7sSL_!ZjmP&YY!>l#YfZP09}37 z)bM6*WFdmzRPCy_K_+8bG8U*bUH}arr0mMkl2g}!B09VWuZuB1V?I<>wl<7jW(DU} zn1uc!JD38$KCB=4orciah&qrl60x%Y z9cY6P2?$Bt=+< z>A92oUpeKx^@y+lazD*dPPT3G23lNKnGq2s-%37deoiuk4bO0z_`W=j9&G6kVD*sF zy#4(qu1(mxW0gMQ27-)9N3J~)2XN$5T9myb^yo&2kf0gIUi{YlP@B^TxVf6o4~oN- zCWNNI>VEuoQ76f9z@A5qHCFLyKiSCIobAsZ300m*bQprTzn7bfrXdIUHh3Av73PZt z7>P$c8;AT0cM{mPR0`jrNRX6)M|vy0ih`cjDo}QRCl-=Flo0jM00000000000002Z Cd~<;S literal 0 HcmV?d00001 diff --git a/tutorials/images/0_to_litgpt/instruction-1.webp b/tutorials/images/0_to_litgpt/instruction-1.webp new file mode 100644 index 0000000000000000000000000000000000000000..bf21f6adf73c97b3d5cce274e3aaecd582aa8c5e GIT binary patch literal 84740 zcmd?QRcvL!lCEiHC^IuNGi){&$bQ72dy`)C)C^gbx6TW@d+RT z7y_jML%+74L0`wW(r?l)*S91<&<}taX!1P(NdDz{4|3XvKN`F<}WcdX;Fq#p*0|7xV{$So<-dRAf&--VilfK*hL6$R; zg}x@gpD)`7fZu(+{wW|w5abu^df$*g!z<4P%U#_$Xsd6~@94|*J^jG)u&<^!5=8R~ zeg(X_F6txsYWw1OoqoK&&))?hd_8|G-*`TPK1h0jgaEA%vCnM~(G%i})(vpKZy7iQ z1_+L1rMFPuq8%_qqoXP2tZ^(@Fc{bPFrFb&$wUjsY=F8o0tXn>ONQ6CVrS60sMHE)&iKaeGh5!WT| zYw~Ee#J{UZ!f&q_L&vC0-o9h|iiM{Cjuhm6hFZ*0bFTdl!~q;u2_fPAX*++UjBk(e zZ^_(#m3v$3f4T|(qaFJ{C(j~@MWJyuyX+ATiPNr?tUXuKt`?}zP9|?bC6>8d9H1kMZWD}**LCQ<1Gt}Qp1m6mKa`S1Mdnvn7g@PM@5!Q z4hS>QwcI{>Bnh?MzsfsC&i1@4b_{(Ppmr18z|zXWO^LD=c_XJ5Q6y7s(vjHY>;__e z)DvK=uC;gFceefL`;#WEvqWVIa}WaCyS+;61}6t2XULT+JneZTw#+zQMCvfh8LU1N zPL?zENZ&c-bhlhKk@vqp3i+H^-wr&vz;SwrMncz^S~N&+y>+QnxW~g<+58HJ!~QB28GCd?j#KMLQ}dWmxo;s`!u%U zFFLhSkp*%#L0VCz%w=sm16Z=|SgPrm!VPjuxxH!45Id@9Nd;tN?s!0p4QJ7FZ3M%7vvkc`xudodU9?nry( zG^KR__ZYMc)B6rQ`qUcL1lI*KGnAUF^x>49^qo57sR$6nyccZs?-VR-9hj0urH8=%v47e0J+yY1qjWncDr+;En>rz5BwW0f3KP_}WZZOTGZyO%n=4jtir{Hw65DFQ9 z`2Pb3_#fHDzn;80QOks`i{obn4o{l9@ceLTQ>sH_AZYi4XBJQMxfnrd9?jYpu*@QUR~<{7iw(pk2DDaYiJyH!^X+Xj`N zQO|Z=vS@FqhMQ^ivbbQOCXyM$68WfJz#AODfXfx+j`)0$TXHX@4i*}?u7VmuDSt4_ zNecgw=)eEQa{kwPMf>NQ@dT}#75*DZZfcX{V;5dRnTexuFrV&GV6ET~9tIx#d&xl| z1484@56wMMiuc|S`J5o~QNvDVo@CSa#*Qc(FMZtmON28AT4bOS7+Df7B0GBfqk_BB zBZT$)8V8aYB+a}gtM0)ZojZ{*?KFMHOgZB~cIm&DlxikM8^l=55dw!Thpm2h`pvc8-jv1`k^Ecvs*35? zznyLs3$>yJ2S{ncu>CQsBit|G3`knmckdYt zZIb?vDHl&nkTVC3n^4f_B^vM^0qi@b#d_78OS)RznXvLMPXO5~p-PdSe|*!R({obL zN_*dl;b8K*ii|I`Nl!t=zJJm%E-^uk--x1=nJz>fRFvu{9g7R=!*Rz_uegyGV20-@ zx?U~V0{VDCj!^bY1$(zRwoYQut;tLs41Jw)taAAc2PTIE5T?H^nkvz||I-+J%#BqO$cx-g6Oa{$$xchQ^vpZdgB=GaYb zB)Orr3X_nQM=WRyCQx^3DI~3-i#Ul8vy5Uszv+c)gROX9q+NmMyW}}9+V%y^ z+N~}vT02L~EW%D9-!fSxKU9?iJBo9+fS%r(d^&Q#x zFUbp1&%J((uCdidl)d@-+k#Lk$`(|FivoFiN!T%zWn_UJAPO+s9AARC(s*#-Cvp?u z!fmmjTsKX^&*ZOBDNu$yA#`>HIo|~hcK##;nW_tD=W&rXKSFymBFhm*QuJhkbv8^G zbt_0YmS-c>d(LRFzSxfko0mm8>d4?9d5y*f`&a4^U+K;SdD9Q4nbvN4x5#e-0@^Mcu!Bb2dUbM*O!fW;uvUe`D=pD+g8)zc6>2@r*6(~8UVm>taBN$M1*6v4bTAuzhC-)9B#3R<(YPFSbKq8;hUS;oTgkg@?j=WH74dC$ z_QY}oAnIBDIy5V9zBkTqbWLeSbZK)>c-^JZzl)n{BwUveC1|3(KF7Od`-8nYb~jFo zFRs2a{bb2JSP)7?q%h;d*;#EC(Lk*KUY{Z79NRLp=z+bE2giUmoZuN$Hl4&~;gNOg zF5iF~ZaTeTrZn7@aZB=01Bj$-fp^+f_KR$c-^fMsp0!}X(TwTPE@}8AjTQs%PqMi;XeZ_)@6QnFvf&G6Lg27IoGlT;KOfazcy2D!eO+8HW`}el0r_`)P#y| zjTZj%%&To*)K)2u5}YFy#C-f(^&6#QTz1fa(HyupF=VdGMA|{2@XXp!gg-2^cncSt zUPz^ilcHO}M!?|ADHU;?WzpO=<9hV}T@lteUNi%__Shk*AUU9Egj=1SOB^?U^T)|K?dBv(d{N1vj!;>9b2jnyDGdfCr8jF1cQua+& z;%RG*S2t;=fVr~Ay&=Lf3$HKWyE01dJd1g@lGYj>OWsVK3+$%CIE%2lI9>Di@uG6a>9kqNb&UB`QqkW6EulwcYsaH;K++98hIaI94**r56#* zLFa}tfWLM2j?zoJlTAz6*Vhxpzf5i2RTg*}jv3&uwj((WN;>7@3U^dsZYiu|F1z5& zP@xE$>2q1=W}o_Ufln~b_5 zOc>-be0^pKr#AX<%qy$jzDV2onhU?ip6Sv0skOgxyT_qd6J1n7k`FQCOl%kk(e}c7 z)HHaPfZlMShN{*53*5jTq_9=f)+Cw9r&O z%(0va)K{mHa_BfO97RbIsUj%Hsy<*?CWV4f4W5zq{XPFVl`zN!_AzuMo`p{l>puJ^ z+QDZ{m#;p4d{s5X_Qp8;!WH(Py19QFiu{wO_-j6g&40@NZyWc2D$+5C-6q@lb##R(EN~3t=6@8HD`@*?CXm_%k`~A>oJFRUg8c!5jtvSXC+r z--}C{vlVXXVBOtJ-clR!$6&xWy93uPe3u3Y>&moYcki!fbWgE%23FF@-1R_PVHF}a z1qZ9%K$SZCy?ZV3Hy-=)0LU_#Zb#|zji|H%Bwa4r>ALT)iE^A;*9pG1IDHNh8ZMt3 zHG-Pn=8FYpp$soSYgwcouX0ZOu(3mAC68*9X-q&uHC##vz$`Xn%;!CHjF zq3U(kG1TJNOwgbOJ&^t@kU~G4iRcF?<}RjJa@y&06-5px&jxe*#6==XgHj$F6IU;D)|xG-3#$1`q_w7W0iK@Q9Onq z!EL^jsy)7$V0||MD5s!gq&%HaQ;UauTZm};3mH4Y77+%O5(%0%-4^QbOwRIzm#gZ8 z+4g6EAXJ{!I4Zy>e~(W!qC1c~Aw5rFTBIuPJOq6qdGA<6qGT!VFW+Mo|l|GX5Wi_NY@|+hOnSc z?G^f(TCfGLmAZ_I-sDkk*K^aTJaH6GzuaJx2F=L6jBUrlWKsIlVrrdgSVI{?A7RlV zaJ3x+SXyKLYk>SmdMP^XQRZtBcvmz%ZWHH#s-sEkH%P10`9HS^Xy>!CFJYDy*(n<_P6^05y)!4U^ z4!A(V6Y(dffK2^DIy2ju^*q>(6=^!kc5@1{of9g*fAmn|zuT9LdCNtDe!*);ePRo? zBIQ8!bFDNE3Rf_Ga6ZnMQQd$kJGb!~?g+Ow^hJE!H9GDug$^b8?{W(*ao5Qk)Tui0 z%Fqy=x4(AkxKK7hYL0BZTROYz-|al$lFhUdtjBo|P?2coP}H?7p9xlpd$?qSgk=`^ zt$;$j^CxM@|FS)ezQra4_^|%>VSw6*Fg9@E{VxLGzmV%xT{>K+e-@eluhsMn-Np@h z8%&$^zh)`_@g3h2+pXA%t8OK(y1N#DfJsk&tK{Co@ zMxi3$Q47CPTdc@3&P%xcX1Q)(A)9e4eg8hvJ3-9~e>UIbk6<_q29|*;5oaerrgD?X zzsM$!&aahnQWSslQP?2X1g*fpRq=x%`<)Q{9I06zT^FfJ4Dl&{Mg$BDW3@oC>7cG7 z0~WT>r+3(SUtL}|mr5iu^fq+m^5nwlz!0kVqDiDju$O7K6KfV)CG~G8C%myqp>f`U z{W#P84>}8%E`*4jdi#euFF?yCj66o(=G0JV5mmm4;m)JaGedu0tx~q25wMLF3`~#T zYqOs#2MpHF9I6q+5Tb%Qm||dRi{iwIGGwCaIb5kfjfm*pD& zT$W#MM*tnr9gtY_s`B)e*GU%H6mr*BgAKLA2uY+B#G;@)s>O&9gSFrJk1Z1-zh4Zs zr3TRNJ`HkZ-)P}klrX(|HEw=i5|i8W`8wJ#UUbd|2c&=YCa&`&rtb%`w^&*p#H~-9 zJhqri5c?hZ2!{@La$ihp@EF%0V?7&E& zYVC!rp_&oPZt5r^dF&@c+_k+P9gPBQyJH4Dyx7^q>PQJmSUub?L5w!N;jJa)m!X=j zKl;RE!->d&manboDh8~L2?Hn_H1%(!GbJW^qv~`15_Y7{%DNC#XoLJ;$iF3^C#T=a zUng5g*l%oo5ly=XpW3#$^qCq%E(_@!ZUub6g_Aw%NjwG$8d{dS1T2}LpSrspTE)r( zA*AiS(@`evTh0GW?#CLjdpN)_d%tO!su1e;yD+?z*iu5V%-$gD9X zHt-#s*g!h2LO#d0N)nzC){X*NA<2{w(ZAEU$TKO!MG=NFvmFe!C1SdsI+8F@&vQIO zMA394*e{(NyaxgVMj;;E2BOq}JWyn(UzLC?-kjS4X>%jHC?b;WeIyyj+5?%l3bSZK z%29duS2SsWV82FkLDiLN9mHwx@VrK*lZGaLK8;bt_l~y{;RP)?fi$!PMHETqZ~0nP zp5d?iPm9;hB%YIIZ*j)K#cFZcDyTb=v@|4p!gB@QIz};%Q zMC7Y`K|@vkL{~2&!+v+brB~rOV1<0%s>wwYbNRF=ZckDBV<7cQ=;G1Asi9=k4y}pn zq2B#f`2$X5_okFz&U*=k%AaAPVgF*SvvWnke6Eh=TJ!N6z#`QJw$60AC)w5_FM3+W z7`ln^isXmmML`!4NHvxsX=bpbVq!IOqm-#tvus>%0%Rk!Ch#;oCNY5K^{33xN@*Qw zUN0!O)c;y>t*w)|TKVY!Z{@OK&C(oT8aLB%GO=+|_ShLR&?FLAI5+Ylm?i*RYmFyn zK>6--{VjD#`}=R5uuK6#ylD@%xaqUS^((}D)mBHxTQxU{5kaiY6ALHr!Gq!$Usunf zR=IH&CcnK!-We)jzl!16+nJPVf*7o}SzPHJwExa#l!>-;KYUN-UHu{Qn=r!Ri%Nt{ z%?`Uu3?4djcSKYs*OKZn z*HO-gRhA|)_Ze@9T(QsVSP^C0k6!eE4Lu}_6|(Ad)py*ai!jt)4uv2Pe45P)Chya) z`2kOyr(<>k9u$YrY!`-Fik6Y%Sg$Y!X-Ev|bMg-@Gui<RrJ~`-Eq~ZyTP#YbHA41EAmKd#0-G%yN@PXn7ji zwb~0;nw(zaQwMMr99>ViUFbl$jPaY~o*ZG#g~1fFU5?u~AxG5O4pf18LPafXOBF^S zQ7D_bojhqrRk2zXSv&qiG>m|0nzex&?1dIH3H8~%C$2es%Z^1I zfkd>AHz*}&*?t;b+ZUv9UxR%_A0t)R$Y@mxjK=z}a#S8PjXc!M6%$;kCRQux_D zZ`@1zg{+BdlRbONwU8^QaWf>6EAi*>lqWKLMTPdeoaiBEofh;c1?^friW_FnKEBWw zDa8ep$HopB4w`FYVMS5uUy>@+u(8j52Xe$stj7S^iOUQ6(%;NVL;b{c;ceb>WXN|L zempgBy;mLNX$sJg4W^O{UOgMt84&16HLRVqufaI5>#?m1K zAc3RsYE$8FjJmX%C>_XOA^dp*`E0^IS0n^?$C)|Apzg}k3;ebx8H;o=7C8TrL)EoN z%#o(UX49RYjBT})kc|-@yWG(x&5#VBL^0>>p_>dG2h|qr)$jyu)AXTLcu7(^W)Rqr zH8r{2;OP_5viPS|-|1qp8bQXpe$@}Uz1}|_7_S7}S9|`0r|+-`vd1wO3?5e^W5n-> z`qJ6>Ghip>!8$jquJ(Z^s@fC0#Z5of`bLhMVWLw?Ek^Be_Gwedo$Mi^cd{#*=FsY8 zghD^vc(`&e0z;=tNv16y<$}3Us3Q#jI-EEdnpI1-dT0iJ`_OAdRC9s~jPzcNE^y!{-ZHU8=W|$Gcq!*WP!erI*61P3G9#wT zc_?#_EM_IzLTc~wSU7#kSX^tjP}+YnOH91lSZjM=WgbGnMaqhj<4&RpTx-bmE!Nzeb9I18-|I1RJ#d%xA2|u^+_2TZqf|LLE*&9p zaR&Flkc#|zdO$Cc6l4U0lycpJEI~*o!hl!4*0rM^gy2Nn<0~B3|NI!teja7}E+iGv zN+BxHJN+(YJlgFnDfO*|zY2$TAfzGlbwGEgYY5%&>!~>mA=+VFaoyJ_f!pju-Nj)a z{i~2KgCIV0Lu zkx0dQ^$OF`d;9fJ+jLW#geqD$WgQuv7ChE>AI;s!zq}7Q3t+i11`I`-Mg|9Z9Y7Lw}!#Tt5PgJ`e5@U`NRz1452A@^D)S5yXkN!Z9Aoj}osJLB16ZQNueO3#th`F!W7jBPWK)cqJ{X$91WY z@@97~?qkL9#p$)|=@R*|(PqLs@Di8SqPx_#eJo2F0cvGH&?N!3j!w#3f2%!sp;oR< z=yI?N%zVfmpJ=E0x;v@2Ur`&zX7WTlwlk}DlTZl<{L80u*fH2p!b|$NA}vP-?lxv8 zkFsfTEy`DDPIE$BII4rl7R0mq&lqzCDM=C(W%46aZ=pA>`S1IRCfy8$X{ zl};&rf)BD+8_fK6bc}t$Cf@ynG;WL9 z{DQ@)^!-Tx#TWgzRXu}t=2b=`07(+B!xsHwkh7>Z{PJj(JQ<2(L#^bXHKFC8g{=yl z^6z>r&!#-E@)mFiU zo~3ZU9((heQYrA3Y|8jU&K}&vytx#omkzRhNGQ7ZC;m-rTku^Bum30%Sxnw;EN{WM z`!WV^uw!fWFAXyg0rh5EFUZe}R(RCy7_BvlKq4k&!TD`m-l05-qR;? zMfM57(vi#Rsn1SDTQLI#i*KeM-h_I%Iy&0pYCcliP2lR{Z`UE1d9O%(Jx%1lapa-G ze~;JR0-p=DQsq;mg&EhJRUy4!tmM^2aYkl#RhB(^yKYc4f5q-I%IY16(Vva~?GN7* zmdbm{c7XnKNxmi$Hry^87c2j(fBjmaotAE>pG1mH(F5SrzmsIw-T zp_Yg>RcIrrGgA4y>-gI;nO{?9>{^hmgZpr_=kN#i1fZgm<(d@<2NJeN%BR!&WLYDc zD>x5GD9lbt-Z{qqldemcy(Y_ot>OIM7;kN4!Wap+je^P$LDQk_1;rpD0r{(N_}$GR zc1}(F<*t(Yn)etWdOV%o5*T@Sr1ZfSNQ8%wKF`^`z3>U4%y8m(@@wlNHB_FU(wQg! zh3!6Xd4eHJ#lRS=;fE!@i9Ot7nZHa{J;mL$v!$uS;5g8-^?}_BodY^EurCK2!*sS=i#2FDlDe_ zoRnp1ap5RLKZ(8|=$~aNGn#h~LxP)-dyH!;TLAu@X78;u(~AS51rxQ5CWsKUGHgyfpsS)AO>d-I zDV|EngVrQWu;s@As;t1dvx3NSlJM+0jd@F*5WgQeSAoga%qv;V+wdS|{OY{VEXL~6 z!xek8>BBYz+#hrylAAIZzcFKTeY=H!hk^~ned$p(h$zlcl2oUMm+AIHu9mUrKNGqK z5A0<&@?o>cxMoGt`FIq_SeN{jySKWBZ!>x_^HmP=GhSWU6d5dQnjo4twqrF>gE&{K zc}M12hISSt5dGG?@x)eaja1{D9Zqq_eB=nL0U#2!C%yY+pC$WE6=CV%HBi{|#*3h; z!&X?fegTCFvgaq^*~MVh;wL$}eMdRb{Z%=k9JCogIC?}@xG4K2ejdMeF3MI}1S2VR zrnM74{HEWbR3lQb)pwR0r3k9LaatKFRy5IlM!U$#-LcZO@clKc@USm3Bw@=fa+A0q zTA_*i+0@LqzulL$P&303+5K9L)tPl( zFZ)|D&g4Po@I3Nd=sO}oVkrieg3%GJ^^YA_*t8Y;rReXcLsam=kU88OK&B^V3u0su zGZVGWC!=8BzACB)+kSp{z5@PY1@dYz_}rj{1K&TqM^98D%XqzFD4@aFpSVqjHG@#v zL%Yb7mZhs3!KZ;r%d&!3c!!HvI2_#}H})xP1vMJ)h`WT?OCUcP0QcqZCweM;_A5*T z*mQ6wF@wvj-I%2a``!@VII6GQ_56cJ{#F6-QY3{jt}Q7X8&Wk1qqJn?nVo z}G3sTc!W{6XgE-*?^9yRemXHze&xHKQpXbglPqeRL@h27Mm{@~E0&E#e2z zq5X3&>TkxBt^JPFr!6-})+`%+gVt_Rq>P>tTF0lA6y!ZD%x}1g9~F6ZABobcz`uQ? zS|)It6$Dq`dL+@sk(WB^py_*5yVf!#{!{vP(B0Q|Ih=4AFn9NIidIh{f3KRGw_C9y z;iH@&9J7~B!C+;I>&v}04+yT4ot!7RaaQ=7R#Pu*Rtt;L_{A=S*u@+fhzb>o+uil! zD?+3qLM`^!f%teYaT|IR&#N8F5{*_w6>eu7K+Od5uV|3DYhonzb7JvR<3M8WGo;$k z1C@4EeP`qSkFUPGKbA&eh^E{~+YJ*JY=MNU?zbgQnORdAM?NUj2hv(tMIN2dL2m;h zT6y1WqsH5xEbPO6()!y?iP$1Xq%|4~l4#w4@MoG<_HuDhEaWdmY@=cj%vY<&4Mw&X z5AkJEmD2toYI5-gERiCs&x`|UExncz%MGfSobLY9O@jLM%#!jJbi+3vU9Op^qFV*x;`fETDb1R@f znj3N%kCxKl2QPm0@NqmG7sd{)K(<&4V~*9ZTa0yiL?Q3+SvXkJkB=Tf=Z_LF)1$6) zXK9$m5X(Dba!K@4$$S79F*ztH=DOwSbX5!7IPOZ zhuSa?+T^a$??D=L_XfMAdAzw9M4~mL)9wrya}R z>KKmJQB=k(qF2HUqpl5tK$6*@+sf40n%wfQ9pEcUi4eo-6PzU6^T%7q+*GTIc-9mW z|A|N@6RH;2+&CG|cVIe2onB!|X_gnmwS@O8GlF z6-jm~35JzNt0{r*ZT^~o=HtnA0rZbM(kLb6>ZPKnkgxRm8Y_uq96};~#SX zFT_bRTiex}D~l!|WD@mZ&jXgEPFHET?7! zs-TL|A?^kx;;_i0aNbh5aF(7|#4SZKMnMz$(`LkbbQw&D8}f2r82m_6(s|u~9Q(s! zxKrlgj&(Ji;!2@jq$vn)f9%d*j`R|s=ddo}#r%#7TtV18vDji?dF`ESaf~^_cx7=d zFH|2++WC_RhCJsDF>@?MAxCQ?9Zo_D)rtw_WB_MkE{#q zR%P{{*!^$YUvn0vZM4i;>vvm&nn>m+X1Mj`-D8u&Z-3So!Y)m!#NY|a6bhER1-w60 zlDs6qjOtV95__#R_C5u0I$Ir0eU|rtHz%A?@eeL^4lRCc1D20Or3Gf(#0*0tX%>x7 z^_<<818B~j$o9<`S3ilt58ZjL@)33$^Hcv`;BP0pyy}%y!x^(?192LY6R-o$II1Cr z%)iMhQw}oP)Akv1JJL?;>OF&jJI#xW{E)mDIH()W6|k?oraO0cJB2M1T;J2UKfp+< z33tnT%DF^(nE2f=I9At}^(%kQfbA!5{O!WI93!j6Bs598h@-|;K*SYTM(GGgqfzMV z=Tv@>GCH%A6_XsH7M^}n6gu;pCk*>S4A}Gid0fN9IaXGcpvTr&-qz(Yu?dx4u!c%8 z3W*^sv3{8rDJ@~+z9HYXXr83t#8oP8Mg!%}@imN7zKqkwg|)-s4x4THrf*9wCS;Mi zRL|H03DmiBXMK^#>ndQeNXw*t&J!*js`WA{aLvB{hZl^F1u3%#`C#) zgU2cq*Og%2F~dSGxGBp;t-I~l?zNp<6{g!33L`3jE6AFpo8_|(%t*{#GmX;N#XI5V z&fnJno|8;eDutWH*o!7n$bpyC+gTXh1(1}I{wi_lI(BPrlNpd`e)NcJPc4~!SBeYb zW*P3I;czr(NU{IbxvgwPvtiom98{cc=2}|tD~BjYnw3!_^=?c+vkDbcqBl7qJ`m>J zc{p<*+$CE~^qu`#h5t&zb%_0@V}%9XY(!8+jx3)NCuzMUha}1SZP+^Xq?HY(H$D|D zurO5i1vcg~>w2o4oB48VBs)jvoKuq)aO%4ZM^~zjkfM%hVerxdl0wQQ3`AqwcCzSe z0^{d3QhJ{pe(HL^Wn5iLvPH^N*f3K#Wlcy(*67zWDdz<@Z{An| zxhs)(ut-$Z8Lyz$FBPaPBhlwq+)&)Qfc~eeL=v+f@30$}uy&C(v)0y^2n%{K8@e z!WAzHNV)7k))4G%1hBWATPKG09P*tlc}2VrJV?96MJNAE|ZDm_}0>i**ZZi@6SNn2^M z`S+wa;72SmM3rFFth&jOvLZP+Kp(it`5q_m`3t@#6doDt3BIyWZ%0I4`6-{45}**9 zf}3>Qk*84Hl3Upg5zabAEfV3q?WFa&7(*vH5AIJ=TEZC{COmK_m{QfZaBCj&kW&uD zJ=IWAy_J{!4)j4vir%95_3~(}Jn*5FjVMEs<=r|muk-U>ac z-#fGuMqXMw>*TMf=yqFlVJgm#R_F~P@4sPIJVoY zLE+NYX9ECMP>TT8qD=*_YQhkfEv;LSDO1GN)x7xQq>^0AB(C9GtOtTERJm{H zOyzx!*k=b6F7oUq6Z&(YuKKO{lGJ^VXd3#pl2j_o~)lr;*WBk2i7g6$2msA8%NbiLVOMN;92#R2%Hy zxS6o9nv~dOnxxqd=Z=IuQ1ZFW4YIiPnbKpGiiYu{*{aoA`VVenzg)^s-XHCyM&#If z%?m*pFrGfGe@Y*;>wN@$wV;3p{mBW%>JhCRG2ZEV*Nt6%c%w8qGD0t!7;R%8&?Iy= zi-A^=4j4Klk>8@fzBXQjGgyL7ZoD5RSwb*h0H5}fzRxEvS+69wkdp3THgTmq;hKSZ z$P?}X!YECW4yZa$8t*xzuL$TU1wq+`EHAa&%ur5Xe2* zm+beWN~?V=^|!G%^Oz7N4J^>{Fp)gN6?fREvc34WBGE9$hYwmd&P$aqjbmxr(>pl; z=|V%v51%@v*r_Vth?DjEwEO#$!%a=+wPe%m(VRW+3}Q}AXk2KY39`9ewJ>7c6{0Gs znoLWW=Fs3P5+=vTpk>d(tP>}sP02A;m>aH1h4|>Il=wj7F7E~=GNvvK4T!q-zDOs} zK)<)Gby2jtwa^Ay)laHb1%Q>c&O>s^=@BdP3qEcDfaOW$L;+t2B`Nm%n>)|8S@jdc z`Rr2nuWyPEuuus?YNOnO>6Xwu`H>z!j$F%;_Q6hY!#Qj}wMayhTiZRZ`ag<7_;Gfh zdyG~-n|uKaE(j8fa@avcJ#np=3cTRJa4%|B{A;@dLwtzPud>$Pe+)wI=A@?WXlx#5M>3tWmz1TSAI;ne=0 zvhaPXH5LUrd8#T^&aJDc+%=hG6Yz&6aB-!0zG@dGYbXoZnfFy<0fZYqB<*oOJV}p*A?1#l5^t}ij#35n zd{;}Nqdns>)Pu$LiSx3)%tXonJ)Ea~#@Qx6(c9{*KGWv*b)ah~CCcL97eA(&uBZ7F z3izLbrhdY(9`aE622i=OO~RXgs#{c(UHhWfIs_yA;Enj+a`0O^0v@+fR8VKh<0X?0 zbj(}rhwKW?*l1;AUwjX;9k{)e~*Pj}gbYG$tcttUGYY1CJ0))qk# z^a!qvef}hIz!g|<0tLQGhrpA*7U<07Fs%EJnh}c#E|lEcN{07O61TI;XTy%`oUPEHdaXezN>tXr*~i3T%OTBR|%~@f0>` zX+a##EP9FqTiTaP?c}GtuWN+nqJWZb;#0|*d2EoV{KxOvBc-7e{Nk1YP8SNLT=&TU zcezwXMdBSv1eLI|UwMsq};ao*-cPeq}v(CPsv%yo3Mb z%SCd|%Ce|xFHzg5r?}7}^~?%^TCFWIs<5v6y@7#Gb^2&=f!oO$Lt-nNba*N6&B6l3 zwNnzdzk=6#u-6PEM`z3KB`VoT!Ie(+m*Gi@nbZ(Ba!IwH20HsP=&{e|6HGu8ShJ~d zpexvxAO;Sp2FY+zuzZVjc$*qH=oaC zaT5GMy&;DpiT}`ZF6e-Alluwz33^}cR!STt%wpX&;5p>INfw1pL+)pm;ZHN!Zotk@ zoV_WTTPN$H-W2J2;rwas^dc~qPZD>`5Oik$g!(=;TT2f;J&t7z%X4isSc53Z`i zZRd})4anim2_G<1F%-!$9gZ>ZNp3wCsbX4bU4FGB{Bt2c>L4}A(OhxM+5eH2@4BvK zQ!wVB`^j&dJNdP-v*H9p|R>4ALxj7J>;B8CDV~c8=SAA4r~1u1fK< z9cTG`fBF$b_vj>kDjpRc=#zuBsoy~|N~44Uy5XMktxQBo#|tLvz!OuN1^e~GvB+96 ztTEE=db3{JxsJ}-6xYCj>CHYHI-D9Z#IF%0YF-U;Wjt^1w{R3{nzPH=OF8^Xcrp73 zs%Dd+MKiu9^S$VYQ>0?9KdJVMnrQJquG+BATl&l+cAO_ELrdUk50TC}Va^)vsnwwC zvu(0On!aw_l1Yrma5&tREZq{OR{Wn<$wVDLi`=teH=!*X-}5i1RF4Z;jl%!&|2|Xd zqJMOhJjZO2nQ?>UBI9g0zBe=M)d&kQLUS_DQ{Y*?$1yWB=1+>MYDN~r>C;8lTX2M7A-#B`_ zW|N!rhmV_IMjSjuDch{Cne7*)@3-`dHmn6lzV^Z7Ja5=jw;VXhHY_Kd%;ODr*ye6Xm;DgGpp-2)L@Z~KUyEmLB#H^2aR zX0`r-d0&+LRNu>MgRoqrx&f=?6bm$1a1wQC-WKdx{|5j=K)k>ErK;Cy?iwra17!11 zO~@ZqOA3Gp!Frt?Csj5;tSO?3w;|yCnsYr-y4-#8ljv=RTCh;8@;Yv; z?kEa&{i`Zi>7j}=k-wGL>40vP`~dk4*ZP)1UAVjZKbB-b;624Hvb>-xODW8u7!nK{ zv{0+X&S+34@6rEg1>Ly?N-kQfWLyAXv8H)2jupOdsC?XGG9l}*(dHveC>*Jf!>jbk z^A0`BR|n-pY-)zUvl8b;*{Qi(JZfFMC)Tlz0QY6&y3Cu972yGG42W=WmLH}pMJ(j9 z0&AJd!_kU$r#}Og>uiy%t2890;mvvr;LM8jd4V63AMqA4u3>Pg>d_L|)JNu( zRYRZ{Gdlu2it~0ngpEGJfi|(@-k5|^#F>%F-vxr25R~98RJopG{l5C!0S)`o!G^Ql zHjF|pQHeQ2qZbB_h_(KqJCGGt$riK}PR`J)i@kHU^PL?P$pWe1dz*kWQW575qETE@ zgm>1KjPX0_!Vs*+iv?OiUG)%bf3xhN*7`?c0<&}nv$0G-~ZHrbQkT841;f^)=lW5Rnd1`M5GA})$j^_Lm|HgnvKbMDg zxYF5*LHHZPiRHk4L%8GZ9H+t8NI<{(*G>~G((X#IvyZU>p6Z*?Rpeg0n+d)5JlU?1 zOR;KD&XquI9ES?^Mc5~zbiyiJ*C?Yg|7i5$w{g*+#>XH*A+NkqglLM zpdaDS68iv$DdUW6)VKrWD{c&3?*6 zZ!0v1?Z3NvHWqtipk-`g$%y_D8btKIWVlK3y|-MAQui`=sZsG;w($f6yq^Z{LbGQm z4fLvh2=jnjt4y^`pl;%#MA?#pC6*Ik1Sg39SI)gO%BbkTD(LpvD#sZV+{s!{Eb$+c z6W*X}4i@1cj?k&EkH&Ur(?mLBwsAR1Bq{Z0U)^5wl3sHJsJLR3kg|R`l>6PpD|S;_ z+K38aIXvqwzlwkodNIzM$13+8fk`FRog9*ys2#P!AVK|A@2Rc?;*~Ll^46Z8Y`~Tm zbdnU921`As@d)ZTZ(UTqugIW+q%n&)bEXF^Z8o}avmd!gAw!v)PTZF;U<`M_i6 zDfP~5ABIW5ABCN-0v7l8<+>?@lMtdcE{d@&QpL?V=?uHr5g3LXL6S`tBT1rzje)2YzQHkA zHT{dR1`AgG9WvpD2SEg#BWAaqWk!^TcYKcMcB2uzX@(~i_bt9kt{y?|h&2F3M}Gc8 zM-lf;assiOyy$?Ob_8poB8$%#Qu9qdzkQ6m6$h5U>zpZmBNs{tdj2&)S(AOorV0~v zPd>5DE9U@o8GQi}9g%+!*mWMNM6L?}~r7&7pi1pX5fkt*(DmN7kwvoWj6q&%-wG) z(dV2%0})D&2&D4Vkh}1Z`mNLuFS8U|pz@Mp^xx zjJWJWhVrB)b5tJ4JK{iywrXML!%5+wdC${ci*Ki~r;=gXqA3Nf%$ej${cZ>-x5hGC zyeN-G7s@^|FibItYbVo#|GwXVnC7eAKkeTueW;H6spNYxUGG1V ze|{Av{(z%RHW5sR0l_O*m}K9cto2nY8DDz_gO7AJEOP9mTfeHfh>-t;$Op=A9o$tA zSUeZ^xNOa(f&!do?ELLjn~_{$VgzyyvO42pj!*>T5iq>xIV|8W`Jf&5^Wd-kcy<+D zg^aj4w-DT=GLV3Q^l(rl&5_z>dSSpk@}uO+jRpf%tsY87+=iP}Q629m-M4_{$7%NhH=dp}%p233v%9FOq69*23s(rO$R^j1gK5<3e<`dUNwy z#zxT`)Mezz{m!7|{k#cHuEbfKt@YN7y|JPxP~e0wk$T^cj2@6~RRwl)!IfrVkuc|5 z2Jn*-@bKn~x(m6Qhi%=Ma%Lg+)bqu#$dkfo60gcP#(CyET{f%S2_EY#WPJFF~ ztm)^VK+B9@@gKU!LF@`mQ{YvcCx*CBS5hinOa^qyi83YK&mD>FKjF}%qvD@UEc0gV zS?-{gLE^a?fAA{sC&0!KpDmHTFma@aw?Z>v&N65D3JLGlRdAKe8rXFReUipIZ^)m5M!T* z!RpO<;k%9?2k@`FfwlGY?0{@)_E#|8@xQ4%_f$i>q1+|KiJ=ul`u7BtaLbs&mAAa1Ac~^EXCZW%Q5nnA$#&hS)hSH z6A^2z_F-meUQdgoRQvvdj75q6B_)3za<-<4a@1yf;QL&e2_zYY9fK@$^O_dQZZ~uDM63jyd^bb?Ke^P=}Y1+o_ zP#dGz@wGbl!Q^pDqb5C{4T;%>*F#T0l1Uu-#F{-W*tbpgV|2ZZefi@;^=hN7VWdkJ zxfKY7Dtkr^X`=`Q^21;PKmHN$D)sb`tV_^)#BmI+?wR|S{k9PCI}C|OmcxMi{H?1q z1!Z(LZ!XFg@U996u9wyWrcbgi8f%ZCnYd%T_ZsO)8%aR*on0*RgL$rTv;fcu&)m_? z3`n;$1zxb|0W4xE%Z9y0(LxRz;L-&emM!Msr=+36Gk#1hecRyc*GAXy609ldxAC23 z2uW}p3szC9NbAPFKx^!%DK$Br_gGv z=GX>2j#K8j#n+gqG}#VKs4ZBDJSfwB;hDxoB%$efSqVYy_PlbFI5FohaCQ2|ZY_IT zX_a^?(w#o4#^Egy55xc0g*-y(wlHE^37mwz5XN(Q@eD<13btHy`&Oq-`<0xi66poVr|CS5O68k`T)nSzYXH!g6AL^YGp2(4tjg223tBf zv(^CcQQ}Y+OblyACXNLon1^7H3l*p$I^X_i#)PF@kf(LU?sb&nchO7rHX0~V7##Z@ zKk?O3Sn$Z{!`ZT>qUi*fbYDn3u@T+yiBtW>9&r0~2`8p{%KNc-$~HaQeoY*_w>3aP z!$@kDB-xn|Hs4K2*$2qIiCD`cx7|y25nu0OSjLrxnn*YAIesQ2IwMi9u&a2SN+B}Z zRnCxebt^K0?l0&zgjYOtIOTy8bxXoreb=72`c}KBcP>mrIC*>ufVnGfh?dM7$bhfE z!#yBZAF}b#YDBR-RtdNTjAqZDEB=j^wh3Arlu%Laj^~|g{BlseaA`4~rmME7Hg?Mm zS+jbguZs$wKdKFr!d);VY-ua@!2TmF(lq4irGF2?>m#7ZHl3px6Afko-Pl#d#AZEc zwG}+Ifg77P{Do(7_qOXg8kdEn5nF&?n_L~fV>s<3-v z42yczC@!D2MsrP_+<@uU=V$OH3>t0fp2Cu7g7KMYB|WM z2T&E^CgZEBNrPcpo7OV02SvqQ)>vIm z{>C8F>p^@)EW50{-9eE#zB56Uc?y}IMO}-&a#{8sZ6==fLJkr zw++p%#}S`2<{c<2+@_+B$a5wV<&o;WO5)lLnm|o}`&Jv_NI)&u{PqLHQL!9nR72lB zEK?kS?-5G|b|^cRxtgVg`{}UDERCZ-T?mcbG!HjZj1^mD=S_~$o$;0Ze0Q$6yVSYv zFKBE0cHowipu=XoB{7BSA83tk5yxQ)p9Rf(ZPJV+v@8rbu0${Q&=q_E&veiz9mz1Q z1orS+0}y|xnM=RcD~NhaHVKtYV0zt+P??93qq`t9Lh724VGCAfB-Q$e`f2WNl2$Yy z{@|^Z!Dz(G&ATK_E9n@kqRp9u<6`TDa|&RRc_vDR9w-<`5~ykvH2+0W+ovAv!2#~& zt$KBiaA||x?jG_x=Oulub0Y?YhJwvt)q70(`9qhDq654!%$RL+96=qVZI_P*?Mkkz zC+NVOR7?j?J#tj9XCd(_AOZ_|CUs}|q_xtMg3*$_#P;W9sXr9VoA8>1i7U!04uT8S zJV@w#_%|Sa^==2|<|_>xoelqhUqkr(N>Ym89se_Wszh#kOf#;d`VEd%L?=QW*caDHze}d^`l(R7dOK=j9$|(7INJc=?Jw=YFCtV zIKX~f-4|ANMHk0sB&r>+^a~*QMK@qyhYJkh{;peahFQfMz)$ zxBg$Qe0o0%Q@d(l;)cvwHna)os1=;`($^BChbp|lA;HUrq0000000000 z0000000HrYy4+E`f0Pa||B@5M-F9}w6j~aEXG$MYnNmNy`l19W5FWa)o_cUgLWVGCsDjjVjbT zQZlKQK;#9-rAD0#l?GxDx1Y@(FzzPSX>@~NiScsz3BJ<$A0b}i9u5fj-16{B z0NOAg?tv`p?~?Xq?5Ebym{326u{){5#{Iz~DiRj3jYLv__l34)5=}n5rDU{^n?L>F ziUJ85*to}${^C#>=a=?aRk5IDoKA-ROSOaFgN=N6*`LZ3s>R6`R3Y(Jc)Eb~7j10e* z9KV{0PH})~OQ?tJ)B)R@vcv>Xs6J}!b?^uerG!iM8XegU&X4A-v}Dx^K*X9pIYWrV z?^=|{=3dAj2v#E@Dic#BMz|R2uTv^l=Suvz5C?Eta%xp8v-^`{yd)(62m@hqx&c&^ z$0hxU6vi2eLKW`Me2KtG&tN#nml2Yw_ml>LrjPIvkkqzor~j8i?PIg35Q=S?yC^2B z-MIn71J-B_{1%K!gq6Ja^KN~zopkC4N>VWy&Xn=PIa$|ir&*zLTJ>sZ1#_Qt;IMY* zK-7&I)@C5K617zA?_R9yIpfaBe&*3zU%KqOwe@_aXKxVjqf5>~EV<&08!z zA23sZ{s#(FQCN4_+M0MvC*DRhz!m4>xKi>*;MG06nW{O3!f;xr+^T}q5P@vTLUboZ z4HI4U0!!2BNWZH6+su?ye70#!5E{H?g zEs(ZD>V={07^DwG;q_r#6(qcIA;%0%H# zO7uFU6Rb?z91EADS4^Nbk&XiJFa^6Z&tuhl-`(9JO>&u`1(ks`{+QmHKs{4Gu%p-` zEUKw5@~R7iXx|3RxnkYq^-_yw!wsch<>X9@A81v8M`9bj=84u`q%<)MGE z&?eD6M73x`=zCW@XcqFV(b;U^U2pDbq9Lu#izNwr-`-Min8MTh&bc~&ZOHr0sj7K6 zu}ag)2I)>7a`Y~h-qoJz7yQ84a_)$ZZlQ7f8yMVc884f_MmO9LVX|isN1sB`#Q-4{lm}-92HC{w8A+|;DwY4)G9R(LB@DS z(u6Xcdev-2waR9sCTcs#832;Rib@PmiSyRKe+@(oSL~O=Bs;anB!BMq2L_f_*62HP z4btHSv?BE&>u~$J0NtC9o-x8f3@F7k@K-q!>0WCcUC$nI-=q5Uvbeea)kA+j$<JGp9p=atUNR;34wv?Y8iYVXMX)SB+~`-Xso^T(_6zJOb5Vi%Z_}#{B4-;j+U#V( z*_8x{Zm%oU(-5^rZzz>!xs*rgF-rMoho!p5A34!as=n*QKqO(RcMB5j=>;fsy@>{O zms!Iiqa1(r50D9!Ii*~v&GreT&hE(b&fpDDAXUHmip}S`CJ{T#^ad+c{@KuUPtXzr z4G-p+-E+7@k}Zn7AV7-^%o*53K~7};zT^K5fr93d+)f|3f)mJ)p~HsaVDNt`Tofo* z@VwpmLOq6+g{L?dVcBWr6!dxqtLjj3m0yhF;vm{JdnxJ0^XlN90r(?0RFN{kak9~; z3{|Lp$Xl|F(#)GvpA(gs>N=$3blT;;t_!p&*=Nt_mdy#$FvKxm+>Cm>$Z*W6gRqFM z!s`$yMky{ePxIXb24<_=n3p@|T@1m`$v{fas9ox4gbPfqM5IT}==0v)|=qeI1Q%X2?36hk@DNpV%7S=BSm1;l-aIs&S6gz6(@)yG;LSC<(ZtM-3;dQa_oO31WZa zrHXlnjH2VU5N*&csal$}=&7QWN81t4kiE*B_IIKrz-tH1BNq(KiR9iGU4y9oca5P9 z*wuWVH#r<7#fW6!4kFPBVhQ}|`TeKQ55KcS_IEt*qbA3&ZWI(Obcof<;{7<7FYk8C zIB~uS6z!ngkV!U6|IZaLnk?^5s(STOM8O?%8cE})fH-Z*%0?KmFl?d84_P{(rZp?0 z4d{Ron8h`@4jV)I3H}qG0F-Z&_IDODL5~P>&6rYUMUnNNRm>rml(Kr?i1`LwTM})^DyI9H7v{R(>(Fuc=SeNwEk(}gM-tE? zBv~*zK2q@1VKF)}sPDDB9jAL)oX>jNb{J4zlLY}V?r>Y{yum_S+77EPH$M-bu{oOz zUz`q7f5FCsX*tQ8f()-Cwt+{IeDJzAq*jFa+c(?QRc~d@>Mn^wCsGzjLb7dh9jiTZ zIN)est@ZnTo&g<91JSe_Fs_&Le#cVM(gR@c?c3lw3qg)Sx6z0~;bs$Su~D)tK@k4i z2TEZo$ir#*^W*u3#~;``hWPTupdL>ZgUS6HGc(?lItw7@!4=sWfaGgTB3H|LF_Nu^ zxp%IQ(ZZ}yBR=`ZW92V1>F*7;<2#OZc28Eo(_6~BWX-W_+ASR`hrE4PZj?3)^=^B* zSyqYI&OwD|7)0JTP}yXeH)nN1I$3BuQ~NjHqZG~D(ee%4r6NrHr> zk-$rv;X&Ki8HC^s2>Y5()f1`BgR&@*N0&SLUVS#{&=_^%06x%_Lr!*7^h~Ul0)_+WW zW~P4$fA; zndom01WeY`3%Hb)r|qQ?zVeE5lZeHc7*f28d*`A4WW+qDUOX|lb&Dqx+U$$Uoj?Ep zz}4nEMG{4l&@l;KLw_s2YztpY9APO>he`MV3~sJKN?5nah#!fwEfdR(B?UVHui~?P ziu!dU#Z6ls_DtuPwgRmMkox!9=1V~#;wc;7BWz&ZDNjS&Fp?r98-clCfdEJTP?I#n zk%*L?^hjMLL#adTNI1dcLGOO$_i)8rP3XATH`My0L@ zdnY$8$7E#Q%pVzYA=jSxNlcj}47b72LrfXx_O+BhV9HTuCg>#l~C^z1r% z&rY)xQ34hu=u{%DMH!to4_uj?QbXtLJGCIU#N)3@3H@~*#?z&|!CW5MtN;WmnX1x? zn01B?bQV8svP$7BA>j2*hwz=IIqMQV>3Oacx^XjGLCZWT=BsolGpzxI%l!%g#YHyR z`Y_enY08^Yj<4$gi4 z91PeVT7j|Lj2^kEcI$MGKveF3|Hk)z3TfDb#_Exo-ikNu?K7y4DP6$J@hR|xJV0Ph=h#TpRi=2ip)dq@!*ZGu6)a1b{AtSzaZCp=Z~_) zZw;m1TrI2}-QY3_r<$kO6C6+-1!g36^qJpo+AQl*AS6d-VU{BSaks^Y$0;3*U(158 zX_8QykWf~I;1L9f8Wn#&+)s%uS#=WPgIJUPImePE^okm|@?#N7z6p;{tZCVBZqI+i zrkZZeIXEa0BgYSXsRHEiy*hsDCjPiI2?gl*#~;^*^;S{$$!Bw&_XOb7Jwt@1t8HXZ zpbI&Fq^OB~D!mhl?bXxFQA#swOy)eDM^a>p!K>WR64+ z5lkag=q!jaH8~G5&g<%zc2dS*2=xrT|MHw{_q#~hg&Ut9X1Anez(w504iBmi^P<~ve(HJZ#cdE;3kJv zwWPgpP`924Q@12c56_g?XTtaa$Tm`+<&^9=UL;d4J=1e%>PCzZ*_oQ@_Jw~u(Ln$u zYU5XWFnbqClE^hZ!xJ)5^Df$rjYHAMVO5ceg5g10#kpyhKy(1V*(6RLc8^{e<4uRR zfC4=pz9I=ui1d!B$EvrHT&|ch$ncE8+P;K%u+MOenE)WSLVJfFQu7e&=M3N4q!lNP zoHXsqyGH-NcXwhG+eTQOy^4mSdwVp#(4rO{m?PcI88mqyQRks-R)%OgDn=mK0a`%E zETH1W^B6s?1Kfc`8yojV(eU0HD`GO^-y7rKmPmiSb(X+5>Xba@H^TpYOyO4g+sDeR z_4Zm1a*IuC&bfIRhmhm?$G8er{}jUEGibiTnb~$;!8` zDlN4*ERa%}?%n`ow@dVJ^tcl)nh0365&GP8thkc3;%GPq(uYDTR`~@CAM5(R=j{Z6 z1Fu8ACacOZmeAo+i0KqT6!RdOXMh9Y4ft)zD9Omm#E39$Q{bU%)#u+s8|3?)eKsG2 zL?UPTjOqFT4SF!^{lWLDSSZ*cYWM=3V~@m%fMk{HDCFT{M#-?&mg0e`(+?5w0{7948-c zQpKlLs>+n>548+uk|IG;mhB940b)^ac^t&E1F@TVGfqctVJt1eNxo5g=`y^o4-#4k z{ZmN-duXU$v>LU0YAwLkuSPsz#C187H!^pE%AV+y6Uu%NQc$}Z!XQpe?$+rot!8i` z7k13um#{`tGz^o@Nk=N9t_=nJ_JAaME`camqxJJakfKbL&Ip<(ChPFfI`V$W{EfKC zlM||@0ZxOi9KAT~Eq)d?C2A1b#hv#3>GN5CN(;l6L%d)I1aeQ+88H z+#o7LIL*nV=4LtzYEfTx8LA+C_>je0H9oaNxY-JH4Qfo6ni+Gfm?fM-6Z~*qo(+f{gchy}%rAN<9S#AHD;QIS zJKisN+17Ls_9(xq$csbkcFQHbnl?LFl~2z6&n_rVWKrA=03Q{;p#|ooFcYgJ%ZLm^ zPX!~bf!uR%e^IgSsx6>YY0AZ>4Q^m}!x zyoCg4`PyHBc)StH%qcuLbH-2H1vhw009)k&Yt>>j4Yw_cUqf( zmG%+hgz(@>`alV~5rS5PBpmwo4%d%eMs5Q?YeRHt`nKwNsPQvRRSk3I4Eddh6jX8J znTUYl-p26wvG9A!BHRu9=_mdg+I21Sucq6C`Ib;5LPvrafvk=zvZY!E%K=WcG#J;? zO}{cVL|r$^M}J#fYhJSpHyV&q7IW=83A@5l?nwLcGYPuoX4uGV(yZqBOG607ccF;l zXac`{fV7#U4kr#aEE3Jr^2fo_(Fp905V)}^@|v~?s?!CG!h$^?7aX?`-i2P^Lt6*V zAgE&htJeeRqVp~%;=$EX+`~w^nxhgYVrHOK*ON$9tH1!%ry1VIzGE@AaS=?1h_e|h z?oqDF97@l~Eoz=#5kEA~?qPxYzr8sIeLFJ9^XPWtU^3qZabci%=j|Kg9L%jXJnHVX z`U@|}aL_v#Syz=s3so-2pDrEaw9;a0EzUkLDgi3O=59rJ;cN4z*2kd26^m@_mS*_a z7qOk$&d^78`=eRlSy+m+E}E8Q@LWFb%CyePBF|A(p@U?hZoGo&XL7nzeHaK@7qzXX z@t5ONm@=Q4I|(5W&^*>PoS$O9r%B*&U^7@bg29#h@a$rlrSR(jTV15Th`2azz_#H+ zW-#RPz30bf+=IXYmu$oYiJ;%~+X@E;;##$G>H)FoYbR%uxC^gPYK;cP@eg8r z6ts@WDvEr3*Mms?0B$^>48gJ)iT`qWfjTHP&(HVde45*hW}nAuhh%~s^y*ZtoqzI0 zQ2$YCTWp~DdvWp?Q2M4z+`6)T6+G4HzDN!0`DkEYQvb?og7|a~zMS`Vxps4&dK{Ad zlrE46RXN1fI%w2`y9{}08|5>EM1jvt=d4+add4FoFbM*@V^tB>l|HHUD;D&7n8|81 zb<^9U?IF3{qDtlb=^uc-2W=s(+)mAG&Rdc!?=FyU{z@26Rv;jA05tu99KVWIV?L1L zI~z5NLr3*81y;c&|0%O_0Ry6PFKeMb$id`S@kG7N&HUD@7_GWHQ~Wnd(Zqo*6=yrX zDOb)ja=IF|k=+EkLv2oL@7Aj6qmw-!q}Y#&=VK0$Z<5qfb(Fo}&HXG16rH8XVR}Jr z*T-Xy((zyKOJj|(VD=Wv{(>))-x&&e4BkB}TL%kaMbPg4PII$W{IZgSi4@vErq&$; z`5Y(aQ)6e(y`lYv?!(WD@P5hsqM0&d0ZL#nb$OHzuTdUjU)j=Bl=ALYe+|e>606ZJ zpwe+fJ$W!naGG~v?Z*9Ih~3Q(t2gDK26_P+?V#LKtZWAGXtgxr-0C%1;o!*q>2hv^ zwi_RC;8AO+OQlS-SQ=6Pd}G1HNVNbV2N}EyNVp$HQyusL5`c zD99);rS9o?eok1J}+l@xbf)l;RE-nDT1o`b4G;BmjYSlISAH1;)#<3qvo`{$k z45bxp3Mgjg%h^Ql>4HKm?xBfFuiGjem|nLbcFs5%AXbjhTInid6YoD7eNJbN`@Jd# z>)#28M2~0~cC06K49g=9Pg12RS7n@z{?e{Q{0OPD0<|63hLN137RYIqJGU>>BNRV5 zXU|&fVc+T{W8sYtJAth^r1f3LhzYnWV8~a~7VTX{KnCKZ^%in<-}yTZ>Q^xKi}=aV zQNupBh;5q>|A6YBupK3yEf>9cYdxl%!Ji3Aa+|6k6v=zVAEpdq+jJT5)heCK_6Enj zv!_0jcNP9Na{|vIo&$*wR7pB>@`66S5S1U`y%tc1Uauf!o^)bA^|?{CnNb4xuaPE> zPy&E0Sqadbz_ASwMCG)RYF13F&; z4{MF3^>CTPNBMlJnt}Qo<~8aJcTFGDAhFN_dNKt!6r@NV2LXi?;3j2$%I!iR00000 z01C)@r57?5VmygT7Uq9n*fr)u?=gI(hx3=*Fr_r^f7mWvrAn##BZaRvkN%@~WVB(M zjb>gWPK@uw{uhh1`MjN$Q3ZRY^+-wI1_e&>9~2c^{VMX(LiE9QkZx8k%QHciqimoD zh~dnr))S};-rO_obA%dMKQ(G8hZDaTj_hCH&7br3(GK~H_C2kR4Gd5uB)V>KsCp() z9v3gou3s4>T+dQai4plzj)8}xG|TVZq1Y;?9z;pTyvY(&jwTE3s!X-gdKWwN96ug+4E)BXgAIUW6^i>aanC7^U zvCbE86Vv^zZ5gCNk%H%3Tas3Y;8(Dc zP$$n9UN(Vi#d=QOhc^X;A98AVM&`NO@b3&98G>D!wCkBMk^Eiuz?J)g*)>6q*AVuj zYU}{Lpk_Z^hW{)?2S0s|TS@PL-b3rt&(;5a6F>N*lW$n1YG2t;P!MKgD|Vb?TL=~K zg7HZ;wL99HL8f-@^;m!Du7le8O+CRYbbKAZ<|R?9jr0QgvP0tM>2%B}NuG=-tM`L+ z*wvD8I3Jt{M}azaYQeUFty0DJ2g3#9f6pSIV0^uws3lEE3axREfqFcM6|}Y?>yo{@ zq9K1I&50XBe7D%b#4PU4KywuW{9ZY&MxOMy7zPgBj@}nsgCljO{{*-4fT$A75#FR- zEV4M7h*oHK0=JZ-UM4!Y3jSE*LC-BxgMDD+Wb4F)KWGd?kXSxa61%crSj;~aT+1pn zk{BI4fGqfOd4?bz8s*uvVNCoWzmK51{%SWTmuCOgqM+WM0_y`Hf?J{O)U9R`mm~#O zUiEcJ%}L%%3<))pS0oSMUt1~>Edybd?xAQpaccr@PRB){+5l2qQxn;p#m_%Y@E3&FK?AR`G@c`GL zrGZN$QU^okS-s5Pszuc$srMCI{h!m5WGumM_}Aa(uQ6SSO*!GX z883Onvo2DQoQy;r3m!&#n9bF8Eu*{;11^c<`IVHXNIKul0*v~rm4UDZ3dPv#^VsF_ z7)VF_vt=8UK+9is&YyZKS4Vg+9D1oFQHrPgXi}DyjWp`_7 ztu9nT(D$H0BTpFIT@KG|V4tPIlOCA(f_3 z_HhOqze8TN43!!@1IRTguRxb@ABu&-ShE?c?=8CM*q*06PBczPfYeY0s7pbWM+X#3FMNbMka)wf%ObWO9jN^(dCQk}(Uf1|hJ3R4p&@L7~W0|K(E9M&;at zFEM|RshiR1)wR{dedq;;whcb#MR+s#bn`cCAD;}jVlTpeXfuYTbF5M>qk5n>fJugI z((T^fttt|?xQrW&F~`A$Z0sRxw%X4SYS%~RPJ$oIH}3#FZmtozKaq=sb4Pu6+WJuO zpkD$}IxNnSA0J8wj2N00R6`H+jpxTEeX?eTG!}ko|L;XgbNb>TzwaD5bD@%cuJXQ^ zNof$!>sRdp=7mhP>`*<_sUue|E;Jqgk`(^2q1V3G8LXvqj2EZs7IK`im!=x2Wf>1- zrXoVio|MEvT2Fux8Qa7!TL)G72%8WTzS!q(j>4hq?hWy%3dP2;CGfOU7QW;;{#1=U zx<`Lgy-_7G3K{$RWH|Dt=$(}$^+UL(sQ^3-KnD(CLDM&+`Q~J=li=_Jm+=h6^0@FL zikpKcSXe7V`E$wdWS7}O5W*TfKScVF?PwdtD+hho{5XH}`A|5N;34mkmB;kd?@Bqf zDxu`Vc%q?O{Yd0sSAc4Z>n@$&cv^*z|D9z2L8Ffn*csqo-xW7-+{N(A9A32r6CLVo zNwdUQapVU2F6}LZ_d|4g+T7sMxqNxyPL%)WTo0@wVdqw%#1QsjszVMG z8yfudd6HY%UM%eDlLmk&Iz~PAWV1q-(}7=~EUPn1ju6nK7+U;hsOArgkWP!0uXWtD9WYPP#M_gl7`N*$dul@UE3vTt6Vldf0Sl@*T({jsj-J-nWbqd zfYy+4Ld{-M|jsz1S6K0E{n|xt97XK{Iz3K zfY&C-O*9PWR2C7&`ul*i+7WX^r4Q57F9j2lzu#0XGM&mB&+T+rR%f(aFE0}Di_ypb zU94d?hhy3cUHgEWr&!g%0)^2-y=w5eijnGwlZ7r=Pj(~d?%61v1g_p&Q#65N_UJg9 zz2_@lrAZo>LOxKixQJ2&yteRy7WP!bbRTE3w`$-a(Ise;${Ecs(($W(RcGeNnQ=oE zf3dPJ6Vy(CZl+lixIu^FLG%()kw81~(;;aagxd>M-eLn%#P|=aQ?=8{bu!5W?8C@A zHFgve+U{NkL5dtW787C1*)sa(X;;C9Ezfc;*`(sL{jq`n{5qi&QL9Z(*r!!xB;|)~ z<6qj>+NVC5@CU6PMiB@AACp-YS~!#)E+W34gMx%Z4%^4NTq^-g8}*JzeF}4OZ%<<0 z{U^U+jzDQ4bLsrPJ?Iyo%KLZ0zcm3AOP=Gl(f7uG_n?D%W8bSyVmy`2HFZoked1zb z7;?SkL7Ufr!#b1W{Dn7S-;hBS!`|UCJg@cenq({j>lXe~@_k)PKGAj@H|YL%1ZAB2 zD8_6eKnz$aH-g9;d+f(yW_zGMB+b>%{6bb)Ie#{TOGCiqlIUKT^qOv@sbsS*rn}h1 z6zp7ddjfP3{D=P<$0&HsIk)HB3*t7$)AE;W0}SiAnN2`frB-+Yj2KT*mAt`zKsrb; z9m=!?NjiiA+D$sw_T8^zK}0;=Z06R`0PgK2m>JO%Ofye=25Ym?KKtSwGLmW-J$kL2 zG+nB{Vxh@LPIU%>)vy+7-=oi4XBb*%)e*OQ5tW!P#sZmJbWs0o0j6wv?`Qd-GtMI9G)w}T!1Wo;U5++MNdJEZc z7jGNocw4T%Vqp;nS?r*4S!&wq>fK;)hYnuV1J3kgp|uyo1n%v;gHn0SLea zZD=BB_Tf8Ro9{pm6xbT%Fb82(gA(tsKis_Vy@X{l3JTVtT_)%0y&&FOeG@~;-#!eS zuVL5)(fp4(-p{UN$P8&WL@NOOUp|zMK4nKl&vKsv4?nF$kjjzUgJDuBK;l}0ZAppg z!%A`jv#!6|U&r@&QwVj|I=|g1%;u#Z%~+|@?&tANmr3E99C9UlGm(rUvE*FNU2q!u zOL5sg5P1*yknyXR*5g3`=Mr)<^D^%if&mdR-*9HXwkHUG0@Yzu$j^^ifRMkfHY7dX z2ckLpB|rgA8(CHJBp3Kl7HWRIR?-B@;JF4kC}np~)E6O^6_EF#9h5`7Yd<-C*Vhpo)>@t44QgcHVx zeS8n)vTYH$%^2US|0=ABlUf@PVp+4hatswpb2_0RHRYnEe<^4~N)ez}0QbHQ>!xe^ zx1fb(bvf9P6&dfk6ZZ&p6CzzEoX3CyJ+-)1-7fd<{Zjt1+hAP6o9mz@%pS@o;6xLJ z%@3rMMI@W#z9|9N5}Sc&`wq=(i)(Q-oTkKt-dHeswG!Ct1E_YbVyv6t_d<1-8|uVr z^RY!hNdVDx^o3gnUd{D(Q#8yyrJBHWHR+qqit(8wCYEIy&DP%`M3O!50IQ$2MNK|J z-RMt(O6)mXJIATh((6#;-NbE5-u^%kYGb;4+dUo^I3719wAf9N$1MXl78+g97JTg7 zh9>}?Uq|n-UwJu5I*)VqQ=sG9VF>&0*#=6#e6pP?`bnuS1jUkoBXBecAvSA*!E0a1 zgn%v5P-76iIAY^)&Uq=)b0D_e)OCm3f(lEky&|CpGDkr=LRzIBE&cvi3Tq6Wao|hm z+!3vz;D?}NuF3vdn`p`}p=XGC*lX?VS@_%P*k!bcY7SZs>TC_Vs7c+zhvWLE=V>sm zjCwPy_`^*WvP%cYCUUuIg?BJ%JA?Y<9YmuP#_o!;x<; za;9qk^FW#_?iUoxVuD}1W-}BYo^6F3yJUr^J71!US4b!U8 z&7kO8L%0sd@kL$(iVi~Xv^?RA4gM+DjA4gCma5|=zTOLBNS%yEhcSN zOxuKy&}Le3Sm^Vr7Xb&Au#SG+wV$YzM2S#rjGIr44iCs9PD*mHWuKKwMOM9Ss|aH+ zQB%H8eF|PxnK+!k*UozmO}WkeI-Gs`yAdl8yo8(pR%{+>7Nf5C%-sYn3de3_CD)I8 z>1%Oe{shAJ+uttpaCA2v^K+V!L7gm=?k-Xc2Lndy1moh9c!e#eluiKe2_l?_Gg#io zxyUeCWDk8^c#Rp%zVJ#9P>Pe}jquf@O}^Yw?!s51F6K)4Pvg$?Qdm_D(|d_j^{kMh zq^b^PqN$#5(+mgna()T`M^#UJHa*g1$bhAr={}BYoZ1Ex zYSdaK_$gL(s5qfS+@yIXKI9c$#Ib`v^ZwQ)W#D(oDFlb_`9R!}1N3qbxMHj7+&{X- zP!KU6!>qWf(i3Yhiq>a$3z1s-Fq6%{yR}EtYG~bgbB+$ctM*zF^J))N8hY2-Q-Fg~ z_ZF0wOW@ow*3>QMg(VVL_5zJ=NJh7HW(H(rsE1n(g!nk2A(ZwZpfJn-8~H0qFdVEc z_URGL&bR->?6U~Q0c@gk=NJ~uVF_12Z^MHn&!PK$UaxC8X+yoPt)){x948oR0wH;?tDwnm-zF-ZK@oOH>A^68$z&a0!{0tbQO+0LEO;<9djqO~&vr)!z72uuz8c#6VjKinvMyRbaFhQb??4 ztLt_Gdo}g_;vLhi+rGagPRd&N8g~WDRq>Gg+zj+EuwN=+i*caO`8Lx{d-52R6kzVqyUCKC?w&dp(GH3)uOorP^!d(Fpm6KF5PdrYCg%$SxW}R8I@ZJoV zE$Ld?1kK+VU$8^%Z1-z6#0*tA+B)#-T5Q^y!&G72rohI!@~8}`^^!>{Z`xDos^{Go zZx8A*KaARIt1c=MpG#n^3fX5W;X$#U zie*CM49@&w)tvybJOtU3Yg1jzjq5t8MyxH4^FHigj`Wc%ucVMS=uMFijCdW;R9N8D) zy78iE$sYcLR!S-e-+dM~t+cp$(&iJ3j_?2m?hCFZbNHw5+wkRCaU-SgG$4)$RMt4S zdtPuYCwd-|y&|r%EfLHY)N1mw8=Z>Y>Ah&2t}17`Rr?)r2CxL(SK z{F0-1k>qTD( z4t8N-m}n+5^KU_k;UaTd3X~ma+WGNYHBR1f9>Y~H(nTBTa<80CyPO!SJY9&l)-fW; z2LZVK_9r5~!U=bOYmiW1Ql@I1$biq1Me9Ajxbh#o7)6N6RIF9#0qix_8*=NeNhBX_xr!O+NzU zEOZN*J3z!XzP2$eJQI)pkV2V;LnV4`i(G|YPW-7a3pN;O_?Ky51?y)O`tc!K-Xif} zdN9IBEsKPV6|lO3_mF@AX(TaqpA4;Fm0y;b$^L6@Ih0abw{!RUbK7-XsYo1^vC@wt z3|;q8r(wrjZYL2ptj_;#p!Yr!r9mIHmE^AwGsDFua(cCSqU11y` z-`DXECyWa>@Kbqq2?x!)khCJJN+Ws2@&{7aiI2Yifzxp^w*>$%w|CTDf@jz@Od% zQ!YOR5}pwsNaQs?oD#=b8n)PDP^N2UpY;1LinOhsoSLVKe0&5{SWwuJWzYvhj^3Zor;1KZeMb<$2 z4xO3$ud@?5S2M*B_E}Dua1oY#dg+#`mxYqWtsTvLth@A>pQ%pDK$!M{i`fZ1Qy2~! z7b6%afNZXZlC3&oBcefqo*S55162#3fI8C1s9&^{D?93zkkf733p}X`U-Z}HTis@8 zQAJfLQsyO2cEMop18NJ+{j$++y+D7$?w`E6!fvF{dMhm5omUEypSj?6iR*}{!}W)f zG;3t>pO`q;l}z{V?+5UW;t@?n|C-s9Dp}_FAKC26_KKC`& z*ide&QNWg>+Wy+TIKZecj6*E5I7VTCpeK$MP%Fx^y=(Jax9JA5Q8Kys$ByXS(IPD-5kg?0yx)6(lsOFcnZ&u7quS=}od&tW z{9_qam{KM4xVmMBGSRrawFejv`xo2zsp-gB`p}MTbr<;#gX#j8BuoIM8-t6mG_4`= zo)Gw{###VK zYRk;>I=hVQtJp8Q$eC?{gwXc>$5PW`yd7%XJEkg8ddw@zLDn;W9=Wx+NA^XXPl4F? z%25{D*xS_W(K@lucd4VEPD&{q&xk-ge1DPm{rsp!OO02`?PD=+(X-PQE`fC-R`N$l z0HOQMsM^ZlYM%8zgNg>y>r$gvU3V8_wCO3O5<%+gy-%)?)vycPDw~^+1Xdpd zW+1jVCt#xH{ltNZ!(cI#Xshu(DlW+R+1;Y+D+O6PO#uP|xo~fspf563zBH#0Pe+Sl zBN3{CS&_NT)FxZFoi8nNh$BhC?al(2B0Fsy2u`k0Uyp6gT{OVt1AJ*7WK=aUu`n;( z3;P`zsHJ32LfqC=W+Uc)qhED)CCaV&^j`bLP;kl(nH>}f(3^% zgfJL99h&#;GteA*x4?ATN;l1ODCiom%G~b}e%z162iT_d0B^&$d3Tny0lWxE293)O z1}VKcSD-QEzclArmlufJ0_hP3-`wSU;O!H5tH-08OyyX9TQ@`Rn&M)dM2IH#R=zxz74$&9 zsDi)odSs`wRxY=ES7`XzK&ft|U7ld+p}Rqx7>y>$dATh1f@;zQG7NB}{Nk$gpU8^j zbL)|Q)9b6wv&d6&0a}FOBv^ibke3Trz&t+`mx!1e37MN!e3 zC|!!-sJ6&se^9QVge*9ln1a(FF69`OcUyf;AS3i6J8{UHLo9+G7oZFG5^&0xDTW|3 z&mrrbAT(@(VD3CGC||@&fu}boifRt*Mtmg#pO9hNZpVTYU7f*Z^L`UoxBQTRA`d8X z+iH4gTMw^fppk@G5lr18-w+ir~P+E@k??{h_AI+nBVht2M1r+nblyW zslLofcd)KhcE*2$(`-)fK#)TluQPJ#Jb822qYxU`_GAEs7U6Cm*W^CZCH1}1-Q7$q zncQm4(-6gbJ9{orfXzRui#W~JI)(<>?4L~?Wk3M^O50*{D=kOgF7pc-=I3O<{IfQ5 zT0)u?aT3)#ma`XhLZ$tzGFz928prq6+lbvpD(bOF-o$ zy=8k=9nkpL;nDd9UjKSkDcKCQr|}0rme_769bb{jdZz~iY`(9(x+xAo?tLd8_z!vp zObKmjUoBC8(%!{v4i2-Ov{2LsNOc|f20JcYeQ38;*Z8*s!MA7?0i01Vb!1h1fxHS1 zAsB1(dYNO?s=V04Lqm;M?_yRZP|3NtbGK!9J%5n@N1{8q@-53w)U#V_)N5fvx zj95o2m?YfrTb?ES5%A8_vE zX{5P@#~hRl&^8d79)h?pBhcDE8j|$b1eLB;X?P9QX0wnhYtr1&pWFP@A?3v#l*bRF zKu#?*id2jBw@x`(xFy}AZjhFTMt}3-eL*~a#m6pJ&dg84>Ve3wjw1e59@*?>)W$sQ z=4oLPmuv_ZtaYJ!`&oQi+8FIHkNy#A!h)v=G;9_70OdH=I-F-^7@-52@_EaW=TP>u zK1!h=zjn7f`2lpze4S2J=J-d}PriMlJH4Esk#0g3y2K1~bZuqzY$9gl?82}9WSi>- ze*dy$&RB0F$BTN*X%TkP(2GOJ7l^? z9|gYollDj_m4?W44~rO0gS{0GPRcQo@-=$6jLc3kCz54@j!bcH_ z@dCC5hHq^q)+@J#@lrsBfhdYQ$D!{#(*vQT%}&}z`gaMZtL=ZgOV(V$W^!xMY4!bE zNqxrRF!i9U!WEoy-bswFpq;9-dCRGTxdx*V8H>pEz9UAxE*17rrtGsXE?NDtkDSflC{N#_CcD)aEBmZb6GA<07Z{qS6J(^n5K;0*4 zTC!v5SJ`gqri?2b6(}Z@W!f%My0$~ z9W;ZdDr3Q8=C;SGRJ00gXDN!#(9V&?CIagW5~N3T-$aU`4N8uPon7+)!^CU@y5}6H zC{ymU@Sj>;&4b?Al$gEJbp|Ns$ z>L!T;)#HtkL@0~Qs9%NJE}nHqnY96DB8_R>96Qo9kf2XxIzQJdT9=J+ z1gkG@_*HfwoC2$x+|1c~p1KbOp3~c_by|lC-BzWj(ta^o=*4y-73+p~H?uZbYF$(@ z3`!0{PFF7&h5_mwotzSHy8n5Ak-T>>{2}a~akk&*S)$IyntgUOt1#GcflB(`-&&(A z0JrzbOZW}Nv&%h@lPC%T)IayW1K^ZT>4*+hR`I8f@%LI8tL)n@c5qKY)1{#TWcWdm zC&gQ>rwRNQ5@v-g`S-nb+;>`jt9N18*n?n2wSU+;N;AaIh2Izc$$V9qUIe4)+I}=~ zW#tyW6f0Y#{hR6>L*Nf9dRc|yGTpD0#^D7ff-Ufzuz%|;Ed`am@_ODrZPv_HWUEX; z5L58jAUIboBK$n+rv0mxqf^RGO_CFk3)}9{4CMHiCR>w}8{DKbwQ(8$s_G_-c9A&M z>kCZJ7Tv*6JtRblGkL>LOpeA^64Myh*mOX{6)heMyIUrnrY29p%TLF?-)=~?(uPH| zviuX?&kzJBG7C)pOJV1-%xZw8RNndsxw36w6iI})uli@5y16dvmhIirB|49}9Sz=9 z!#XY^(V(AYJ>+?w)LIv(^8?bqbBE4A5kh)R4G@0If;0N{Qz;V>#@%K^J{8p(UJYIi zA~Q#_u0oPTZlrd%sxnei?MOHbSlNj-SVZU4pdY2epvR5zvMdM5{wBr$$5v~&_L;fe z;Ktc2_H+Pdh}MeI@|0Ot)rM}RP^bO__KX1d8^tOY8Dzp9yo5;JZ%0ZGMp|_CTTv>e z-dVx;F@^EBN3EC>Ep;6Y9t2wO`UXvS>Lt7!85;vNMKPukpDQo?B)L8tESSam!gkOG z#ODh3leC`k49hyfOusaw%|=4ZK;qtxHpt;X{?2;2)@N9E$>-~ScO3(4VGBpTl?4L$ z&`-|ExODs8zewPayhVx9vL*fMz%`VX%Vqr%23i|USEZEcXHm>q$r=c&0Pr|5`E@9W z@Oc4f0i;o|3egltjKpW5N#k)8!7kf(p4|i#!9K8m$NufwCD@-3 ziXtx=>=NFcP%J|BO*S|Ba3`TGFqpc(G3-ig`c4Ld!3vWX`DV@Vzz%Ukfab}zN31`HywZUFL$tr%SZcQUuG*ygS z7lfS|zZXH+f>ygynr;zKzG* zT1%|+&8N=Rv5ODK5kf^o?WfSUNWKwP`jl||`gs*CF+nZ5m^awNfU(U9i;3B|eSK#w z>_8^WCwfwaORi$G?{J)#RM64aA);L#dd}-jkvT{_sVMS)hraWDdlu%~EjMaa3;#EF zCl1Gr7lRSQ%&2QOIeX)*SjBadVUV*f?(a~b9Q+E=ngw_M_?)djJAAHnnQs>NDEKNC z&GQ-V9{cj6K@g0?XPKPP>17;wXx_2T+1)FZ62>1n0mBmkJ4&M(<4cksl^kY~BGw?- z$qNoQ%QJVwmqZuHh&46SxBsj`MGxP@nZkp!7!@N$F*c!@L{j|NzqXu6$;xDlm3SHz|*^e(!** zdp2jylz=}+^PFD%sWiA!!z+y10#d^~rFshS^k-g;ijFSH0Qx28B)nOueZGQ=`5GiK zv9QvQ4GX;4o0Wlho#=!(2Sc{Xw}%h@^uH*zar}2A@R>f;*KXfGZS7!1u=C?>HgFWQ zORdl!9dV;|UhY4?G_hVxasuP7RF$3qF^O-^4KY0H<0u+TSguVsipe~e=!T35-;$-| z{a>!y-4&R%FZVqyNGx3d>q?N^o7TL{IDA1ZWqzYU;5uGp?@#bz{o7__#@P-gVP!Im z_g*+8Y2#=XU3>iK9Ipm0NuE%2>Mk$nGUNiqh|chTT2P>NSWBI@$U|I5HkMMzT4m{a z6YKwdlx?5u0?+Sx!0#HM{ax@0yhx&)8c`kWxevKBzlu>_hVGZGV_Ux5{wB~!S|-Cq z@ZPnn-1~06(Zx1tymPWp+k05t77TuUyHxa(o>m}p+44Zr5Eyz+Y-x zCjkGMrNnB5*r$E??%*On8{;_Fl``$Yzd_|pS%VpM$xbrYO`P5?ayVS=poC%vcpckS zb`xPS{}FVz5(YQhGM6N7|Eq=aQ-y8_Ro}DiS=0f6gzQF5=c}w~YFw67r3&>KYh;a_ zVyT-PF0;JGGE@^{7US?CuWZW4JJ{d?IgoEN(Sp*qlWt{re%ACcNe|ypGBeQpjMAak z&xBLx@XuFYxJ<)6p8aW%6x`mGsDrP2oimF13=3^biIN%?UG<(2avm_H;2r$sJRVf)*3EQN;{=RYDunGMTG00hD^07q-8h~gw}<=So!+O<<1?=+eo~X#9-)4 zvw)w*o{B{c99b%F>e;F3?%cU0+Z2zT$qD^F68uHu;uSd|RMBF{(kp`;P9WrDabTk$ zq;B^~^*r(#J~C{SHL^gG;9@2-wlTh3CiR3J!i`EejZ$S`_x4-ydEOiOT6q+5V#r01 z?3tlqaI^{$+|BE{Yvf4ngBrJ~4e}>YY%=D~e-{0WNK9`mQ`TT?sr#96ZR%7Bp7EPu z=L78Zy%@xN>A)dL#qdP#9C%H^b$m<1md70y|B1Qe`j^h@L_+0pXnOotC2|Uf!}k`x zl1c=X8EBS*`&<;kkRZ>MY)#-$|MX7Wxcz5LE^7n!UW}u;{lqnm&ptJhaqurhy^+wa zq<=np&fiXK^;iMd-3Mosm`9owJJ>vTcdorydra;_)iZQCjp;2nXqQK?N9J}iU!II2rqvTa`R&N z$lb`;Qytu}B?Ew8YZ`(I87;ASdq#CABnlE?02x8!?HJf#Ya_2tJdzeHvtie^k%Pe$ zp?L;7+hH!K%S5>~@Bkj^7oIXVV!la5CG`#c$V_Nbr*p(seaG^o8_ zQRS&Z2OM167Z#>j*y4u678ln^DUWpSGnl?DnD~0(5|krsB-hG%)wlD2k-G!9avGJv(Kj}x_Hq@jeno>KPymC4a{$HFA% zs}tPac%34{Eu?>o8vgTCN};f|S(QbK|`) zf&xjf!o6MCU0?4vjk&L@qsHq8yZr%oxay_PKBxg)vpqaRs4IyJPJimv@0IT0!yFeLPmOSKuM$#(!s?7Xibxuo4XkvCy ziVs8eHiD?ykFZ(y`v1olV1x;zF`^-}Euv>~)P%nbndR%JZpgnbeIUbrwt%;7H^M$MGxrksCCrqQV$j8Ln z%i+h1yFP`5VJ2q4J4@uCbtloKn6rh`X|KeXaYwNFdVC4?I=99pPdmi820<|!k&?@d zoOjjtq|iERLsOyjuwsy~m!ZSz^5JSv`z$V>P32Usv;{eeaHh|*PR_+>t~ShqF0wA@h7guCAO z)Xby)kLow;L9m2Pe+<6Eqyg{hGAs7Vj}yaNB`dT2&PeBlyBT}qls7XHn}KL-C>|4z zmw1^aJG@f2y)+oO#YM}kcK3cl7a!i@d=6avx`iwA*Ml%j-@vqmM~c18+)~h@F`67uARhA||xD8uiU?ImVg{VI7bs zP(R(v0zKSgZFN0l@I9Le6id?(p^g|5QWNJHa^{ zs=gLA%349~`{|*LWb6|R5@)+^GO3LmNoqh9Ve(na8Vdnck#fNg+3CfhEaJRkE&`x@ z?CHOO*z**{tZ5ElZMmkQtb3u-(W*>JAlhD|lg>d+`Ha`Iw`_8ykd$Mgh8}fg+p%+p z#BqBK;!FCiba(C&dqCUiO9Ju#^DwcuCMr4f1y=Kk?-sBTU#<`z4NWs{e6@C2P z$j=3Xxq#rm3h(y&y~`$))qf5v=IAg=h`wUh1xR3^Og7efuEJ|o z``W)gW@1@o2#nleVM0R>{=9j`Mj6;^QOVv7e;wWxLsqR~rlU*Lr%Q$@7(DvjohfTjxh zFp>%PFz~)rFY-xX*~rCMSUy4$zKjbuj_3~fYh|g)3Si+AyA6{~uDM*9yTcbE!Gco0 zuvTqX@9t$@)t4$OVr9&>RvH_%NlFNAQG(YD;zlHnN_yUA=tFQoq%oIK$>{8Gy%yd zh0tysG#BIFGj5P>P4!4)T7u7uVX$CV9I@wu@^`5~yOq{IX3<8u=yLUI#Ynb#>Ugcm z_Kk|pdxzuVF)-9W&zCSOby(G7u_1|_ve*roocf-a_xpX+ZPDwti99CfBY~uz`sx>*gJ-2qkI3 z1dj%FW!CTj00002xcU0?xbSMT0eCeHkVL zqY2f<{u0c(Ns3O%qW=_OfNUa__(Csvz1lU#jVuAw85QA4R7;0*A331Y8v-atOD&0LuO>dg|sc$;1)h4p;2-PPtcO zwptF32h6)xy6?UjKWq0kKZ0Y%wT#sAy8BwG`L6?Ya|WG@Yho6pmCgaQ>=!Nu{262o z={fezVIsT)3a6%VVJ&E(JT-WQbAYHnR)0L7;qCsC(H86xt!*E8n!%W-rVxT`MyeFd zakAb6;C5ESGw*V2_LkLQ%Du?`lM@%)^1b5+vYkln`CH%D6A_Na=u}??n6QN!?!PIo zXMg@)xVJjbB^%Be`id5vi+0YoqdvrB*G!F4r&{_J)tg(lbrMnfYy&W#<^QIHOflC! zS^7RvCt0i7qK6v>hz3mDp)?>f4Q{5>CaIzbrq zC(&SQS6aTU&!I%vibFfPG+*c(7XC6;yw(iM4Rl=RGbLj=bNI_t->6nDunFdRCPxL` zHq=2K3xl;2t`wG5n9`kJUnNApBycBQaZuBe0Iw<^MU`k?U;Us13j*dfYB{MO_2m~E zx}Ispi0H<|PxNW)V_=1kagqP_pE#Y6Scf1h9NHszre#Ca`g1x-KM5#?WDF5G0~x{S zD2g@l+R$sP9~K!C?Vo-ys&P9F+{XA2BaQX@+8)PgLO8G}7!ABPos<7j0Wo4|8dk9= zjw$SO8`2?wYg?x#6Qkg9=Z+&x9(=!53`@4qS7#EYF`01_vyU4_ zdWE_inl#p=#8y)W?{||lz@fJgJpXnJMC}{&0f^31b7jmn9OQUx*3=r6HKj~bw$0+G z!h*l6(}GAQDF>++v_yw_mh?9uoDGx{@I2|{Ppbz-c$mhDz*?i62TK~YSnc@a3;@vm zS&UO`rvQQE1cwEZ%RJ4Y@qqWvWZ}{8VB7TV5o=LDwd%9X*KbCn;|mtY0)O4vvQGi^ z@5qnHCED{W`FKgPreDA9b56%dampvEu=YG;0KLg+Iq_B)U|Ws%D^|Lhy(FvNWRk-F zBN9_l_dBz4K-&`nOOWNSdr-m^2Y<)FuJl6&Y|zZ(*4Rs1E@47rg`%jfwHuh-6XosHm8?q^C*c}^kH zQ7HOWbJ1KC8EhS#U!eKwQm%MatsFtPV`g}1M#7!|d#yrkUhxibKZxIGv6M!^2$8tA z2|&hXnl6X!s%|KZ=kvc;$f!9rY@GPO^ z5*L&htRh2Sk$lMOTQ^6cWtpOxUt24_edo!3xP+PKx*(pj=V;a)Q*-M1x@3q_q@#3# zhMdmqSqB!&jGve$|1=pS{}_n%wKWN+jZy17-*f!vmi%kTrH#yFOgbW!WZ{#~GTw5j zenmLID-(gH0XN)fxW8fTnBRf)vg3kY-n}dq9#=qvW^r(22g)DMw-X0idaVy>6?lFc zUL6}Ew_RWc>sQh@LVRqbZkg54)u2u;)5&hwS&$*_4_Q#a z;jW4GbIi~Hi=4kb1MPK50Q`=4eGUC+BitD_?&M9O-P$ziD!y1|&$5Yl9|)D?xi1ci zuy+A#+8^_B;$#bzDgO^9rn91}8y`3hGtvNfuTZ)`Z#_Y2s()@nlq|^OEV!P_Mbut0 znPjuJDemT;Ia^VEFT?3)!A*aL&ZP(I4v2fcUgKq|@5YBlh^61-mI`yjI{l<7zKX8u zD6+aH&>$abx!5A{GOdbAEi^#sS!Q{&KC?j z2NAC!ZnZ(RCV^^vrISRBet^e%1&`04+mq^k3sEv496KHLV47elLz*gpWdU z-)mUj^@~mf7{da&Egt`w7zt1;RQt2-6fm1}%Fq=BiM8N@$@vE+3Xi{Egd;hu+aUV z!;dfYuiSY2R_~Tp+N&X+Ws3NJR=orrs1gK9_7DW;@W4~}SD8|}Q__qA=?h-wcJQWU zy(BJm;JplH3O|I|5D3oM|FszNwoIbVifRzWmjE@fJdY1}+E1W{+O}_3a)Jb~Br2j_ z(`>J4?U^#B@v#p+>5=x~O0BqMqGLuXnU>CL>WBs6J1%l(-FAF)-}s-27C~ukByy~3 z=+uV1SX0;w=Z_Ko&t8(=mscgMpu!@)wV^&;8>6)|nV3 zbmv?JBcG#P3ikMa7XNHzbEqBd?}_;YJ1R(XIW>Wt0)S(@{q&QA^7#L--x;Gu;aANM zn{XP{|3AVN@q%1@yQk$@apGZr^?-wxqSzHP9ZTG$O^mI$sP+7!q2 zPoJzRrpyj=3@RLRY8 z%j!}N@$)R@g^T<*+aue<^Ok_*Zwkkc{gA^dzfH&)8f6L?T&NT^eN8dh{VAZ}OyA!O zIn@V26i}aO^f&RQt;ZuoKO?GEOlPb`f!*s1fUE2GIy<;XyX3zk1d>a4^Zz^kQiQm zf_@|H>^ml8b?CYcoO!^p-wN(&nk-McbYRbR{OU^>6$0Z@s^vw5OYpA#rd$cwPlo5|A2>rM12_$M*3GO z=h0Bczk}v)fXG)o|$xv9cPH@Uh)0GV{NM%^3QH51862Xk3Mv-TD|-Eh)_XGc^_F; zXrfUDEEGo6p-m;bR+3qGa8i~uhk>%!KXIlcW)bB23vu>WkiN4au|**-D3!M{pTqOY z);gIXXI$b#-J%UyEP0HJzmG4+jIc=ri!QMy`ypW*+7Fs9v&AO;+T3Ff^Uyx(K|DG5 zQ_Q3Bv^zsrY${@+UN}?6+=v@HDp?uANcHK(;huWH?VAuYlqDjzqg^hww2+QT_pImq zyN=^lk2J@Db=!3B_C6|+dHG1`V1?lG8`*25iwh+NbhNe=?rlQt1Cq^lS0WzUb_gD6Cx0T8cn}5$+S9AJg;G8|HFEjZ^GgIAn8`qf zAc=^%#zjn(n#D{BhEd%O+;BZ4#*$;*Cck+%?ouQgcV^d#B-lzB`m${jPlQ6*H2Wk;Z&Fq!KU(XfOYmJWeh zh5LMHjh2PbE!Bd={-P!$ChD@B@EZoxEhr;)4Q!Kp*DvoHXS6~vMI7`^$1C262o}MG z-L8!igf$)HT;f!Sgsmk=H%-WPRVg^iH-2-#`LQ@vu+;^v2v4R=zsk?oD7^aiJW}VG z{Dygk1MdtO0bpiaBK^7-E=_lr!)t?*`cK)*EFB4Z6q4$!!^^HwD6L~xgI`H&Ji=UF zdl^o@(W9+LF@a=1GyAh|%bOA3hdLv1=jj`S)ldcu2Hzh+H_Md>`n%MLt58tPQYhgF zN9*ymu9p6<4ZVcB!AiFvn)2OwN;m+g@=Tf^$u|>KptMT`K~h8;2q6bn3L+T<-lj!2 z{^|io_N8bQj5r%W(%3#-dd{fTLA8oRU6HiOTmRe0dqUcuYK>LiBs_w`2i;VRd-g6Q ztN`zN-V42&yq*;AJoSX4@;o>o))E&}n^T>GbhWdfpmz&|(gn4=^N05d4dMky3>-7; zGmU{W?Yabm;vEll!`k9fAQs&b7U+@X?5*yP+yiwq$x{rIdjBf6i0d#%LE8D&ZF5j1 z?WI2UY|{^DzkG_oJ3JQgON0vzD3Lq#e@L|aBii73p1A>L;>6H5rK#ly3o;1xKu-fK zzQq?JCe)aSuBDCe-i31l*^o0KT zD83@6`nk@^kwiiy9xDou6*Qunz5UXE?X!nK@7M1oIqxl^#(ZfD-oEoJbY&(Wt;0U4{k{l2)IZ)T1wbSl?vW!y8E1`C5i$i{Ff?}oK-<>sl>c@9 zPO;+Rr{E`~$fz>-=(l{+JT`YokshPm;*amW9)s_Edo1HB>j~hEyN)o%lu9pJZ+pdQ zC#kX!7A7*u(9&Ti79eS;iJX>J=g^G9ngI@Sg$j~QJn~EW50@8k)Hxom;;5kl>2Ws# zm8-Y5P=`m?8_fB31s{L_z<&j$2<@VaAcr@VZs}Y$Uid&XyX}!%oLc29dj!499nv~T z7pxjc-c&MbcePWs&uJ(rV8$?6XIIY(;O{Twl|A=`N@~kX)EIJE`pvkq%-o<#N5mbihEw$+!m3dXJEb*;P^qZ*HPwlU+(KVixo51aSX z0R^XUM*`ZgLH7m4U07E$;qatKG#>1%(G?>i z4yvlNKh`GZLi!K;Lspt+6%ZuZC<+gxkFzG`j|5xHlBOsG{eEjZlve0c)V3_QPvSJleUf!MQZ^+A)I7|u#zm6?pZl{g(ELLdSEDRENQ(6OWY4B_^K za-^=uhA}9G&!I@*iSvx*dDiX;;*mU5Q%_hnAM-3ls($A;?Ppo$YCX42qBp0+Dp6;r^{7FD)9u= zqkl&BE(PDJk%4-lfK=YUo~NhFkUA|nouKHh{s}~*a!Q=zN9azx*;?$#J=u}We`nOS z=^ZjD7hvj&-YG)psM!aj1fomEsxE8Ea$j2QweOhjLI{1>r?y z$C~GoGqtgh1=G_ONu$b@Ar~{y4mdZ3|XrunM==2Y);|z5bEA zWx8FHs_J8jCu=GBzu7KCd!^le+Nq}#85*G9wc(@F&B_z#%ZWx9^$&LEObne>=bQ!2 zKIezb`Uby!Lv>9^nU{bK_H+60Ss88v^`>(OuAV@A^>x!a8rtDz{y)Zb{YI@&aJAcV zho0S?2;AtlZDmC(kDWI#xdi{S<}{$ns(r|eD$eTq(1>iFEP_*|d?(yS311;E*SGE3 zF0xU`WeGicT=w36pP6u;E3$vNo&j@!m4$0a!D=q(zBZt#hywTM`FKk8;s#iz^WA(t z9Kh$r1sgg5WLSSfB1;U6?~ANhUz2c!zOjDQ-63(!olb2kp%b?zR8!_kk$Gi)IIi#$8iMvk4@Vhu4qpYa&@#h0Q zWv4ypdY05z))r}znnR}Mu+xL=`}vx^Q=iZv>C-3A9{TkhW{UoPUMPNQXGluzPkMAX zLc(%kkpo{XsTVBrXBo%$>X`W9t2bg@_pWz2U^cVJ`OAk#lOn?C%4;nRCpAqoO}+c?_dh;jHR^G7GX8s4dx9nU$nokrznr0>(ls zyl!59Q%;54OzHr62qW^(BObQKXp(5qwG)B~WZJtxBGE0qr)Xs2_pW;K$&A1Il`Vv& zx8Lzen#H-K#O|GRd8_?g^7D`U1Kj&-q1o?1@{Rig?E@|j!^cmZ#oeF??J2uS?4$L? z+*v8zGdi_X50(!2ognX2>98v{a|zZPP)~60F}dwxDV8=)@=q%Hg#{M^n3WaH-Hezc ztSaSOMKZMqH&2Q!8S^6n;z#@ z|CL6lIocpP9*x(9G7A*D_CrmJhF7sA4)Y8un3tr2+b#v5byw3{Cw0c?Hx8qU*gMhA z8q&p($>e1j2iQmYX1LZ+(r~^jZ_9`Sh%k}rR{L}H=Wi&t{dAX~=IOS$4(HfIJ>N9X zIh?_)wMmZnbhKa>yBmG**h19o_9Np7@onxwK&PDsX07PgR};x19qd0;-*iSptK3l@ zPhcsJ9U}{F9$5PFV&zX)qWKIi)M%ugLWi_TB&;xR`_zD;Akz+rpM#bt-g!4cqsc7w zt1v_XZ8ph)3k;>Vngwv=;f*&jIL--yK(k%H*25Gnzbs#{$gYET&R8# zYj zPFJuqj9JnUr|px8ygE9GE@nYB@EMhkCj+*%rL`OMdkFiP>hnYggfo#~estoz(U96L z!&MypUIRWxO5rv}cLy)s0P6$mq|nP(=AvT2;&XxYstX#cH{Yo!I3{!PTkKYnB;LwMvrkh=S^^r%{-#f1p(hfI1ami(cc&gv8K zUe_zFc#e4PG(0jt zwoYD^)4M?U+93@oW!V@~7d#E0z3{+Xh$ICzcFm9T(36a-lM}a8W5|?wqTwa7B{c!8 zvdXL&t4gscl~L9bNTg3IU&l|vX!VCXPQJw5A60iRYhJYFh81=4BbqGVhU`*{HUaCO zHyGIJ4jTopMwK%>AJlPG0o|S>d1t{(ipGpK|NM|617o}DyE0~^8C99>CG++JcV#3b znh1ke`vZagT3F>;r-~!>{>$*UVCxgJJ&QhVQLiZ`l~L|W3Nx)KBCN`kQPq!)ZrMf*Qte!@GU8A9$51g4)g-$?1U2t(arwn#M7x=&>y(Z>EJ=?_vkkqR!J{_Upoh;Lwh&EuYtD zBRDq!7&uTRE233o{wC>#j@iLBhYCyG4_Sw3n6qJ~I?^1{NiZbS#Y!(2#V`4~r;uHe zv5K_m2_lb!6~d*N@AIzAREK^d9h4PH3n@e{>esXQL=@Ram3)6g*weW;M5R4aZJ4lE zOBonc%@gr z*p{8IVthmtBF1O6Q|Pb9qO37Z6uHr0ZOr!&mt;AIik+Ir397h-H=9pQ&z)aXMBPyD+B?6i`<9OpK$Ovf4@ z!K@7+)+9C7pSe;lfPE1*74W6oP+F*~sA)2$D*~nt3TyNa<~d1|`5<^IS{N{;U9eI8 z)cL`z-!Y({GtaWe_i~+Yz#!37#^3!qfe+jzy{kz(lVC9vVW|$?9xoQSiCJI(&9vxef~}ar0c+<#%_x7%6WAr!ZB#0uqq-Z7y<$GD;{YV`i*Ip zGW~>Y^N(jPzekk{=m7JcS6gzy=}3aJ+S4bMNj?2#Yh*EwS3A>|ARV^-1oF*Y(-Xuw$Xo4d$;S!@L$2!!Y zJb-;qV7_fCATxdmqcPj5#oAfjPT*x}czCi{XAsDC4QPM`05e$bY1q?F(Ppj# z*ywhDW-UAA0ivS)w4#f=*Yp8j&X%axMEbu*H&b9m<^dkDu03HS-(lp4HfA_){&n_C z8RbBNuUalXFENjxZW2nYU->$>w?5q$+PetsczE~2! z6g)U#E8frLropdjpt7lQ;oj?y*6`@9 zU+7J`$yIrHD2=Fd&#wk@2+Fw>aS+MkCXMeb#?~2O(k{^urwtn*93u*6Q2c4cu zG;Z6^O1&@h5i13?pzo}EaxEq~~j{x=#c<>Bsk*$;a?(tC%FnLH#Nh;NIL1iG~ z?U>VXchgjkw$cmpq8a~A`+HC`c1f-oo|yNg-0=nyJDJ9MMyd%ue$8W1K24+bF+J3% z`!|5#%C~z4vItoZ9RU$HWM_#Uo=9x-rtONC+q*?NpPFJvxFBb9^oKAW z?KWv9wvz3b9Lm{69o(pmvuXnS^uWTxIDpcq_aJZ@9!a80(ZZ2LAyCZxelA}&zpv60 z(9u8HNX>&gu35$IfmjKMdVQj7*3AmbS1sjFd(r-y3^I-dcFtNx#9G<2EIz$nNgM%b zc9t(*4>Y%C4#wsj03S##H`nL_yfup4N?{N7u2#T!f+&<`q@S6;cJpaA_Hn6`e_$G! zy?qxO$6Y3tY^y&=5~KmSA02{jn&4bqfGBU5*zDv5CwF7eB_LZ4qT<@5%%vJH-3N^*wiCHo7{T0+Z0Xklx&KLJV0_gxDf!P_Tbk zLweaXr>%%v?8Qj}goxkPV0XO6#_UY8aD3Tgf{E7p9n$U(zU@%@e)~T`Wc5UVXAreD z06#3U^b@6U)uZ8?EwgpGG*tI`*iz(#1St^x;U*%{-;mNh15RoQRen%(9V}oPA%)w- z&|^FQ&&RO}0-`VIWH~Z4pYv+{aN*|?iD$l8wViDC9jrhl|6`Ne0=d#XC4UXO|TEYtiW5o9dxvqgIPK~+k zg}eX&0008#{S$7w>gm3inO3n$HU5U#Wx}o%pX)(Yg&iBKiV1b}P?A5)9xpNU=z7{; zgL>ldkP%Ikr=au9`d!TAfW)#$p|7r}`iiMgKN?s%#c3GUfT_Usrz(gyzn^1XnYUpB z!g!JK+QH~#0{JI(@Ff?b&9SdB?$(7KnM8bCoCQi*9UPbGYCi<9t%80@hj$)GBw~Ze zP>ti=5&CI5^X0@KK)`=hrWyPsLy&` zJ&gS9-yBHb<{`n)EFy3{R_=3BpBJnJ{FS6@6$A(d(l&PK^M+qooJ#1b0EqrQ;Lt^C zm>|1n-)yr-DaU>kO_KF?7~cNobjQimUvTnvRqUj01Rclg&K$%w73}8rf|7h0^%edc zk4_G0fRYI5-*QsjJyw3oVjgKW(62OHu3Oh<<)d zGEito|5{08tUR@!w6c)_czRrK#X2!Rx(Z<{x+;O(&0dpDc{yG7ZoBK&GFCAXLS?{ZRAo<4z19Gk2?ZGJm%9D%z$;UtQTR_qmfq2f%U%vcVz#T93)^s zoTCD54b3HIc`^jRe1*#tbakc4MDs~58d6%{qI*u_fL$zP5Poqa8VgIZC|pYZ3==S>aF7Dz&iTkMd5fu|QDXaCDfjVXM~ zf&dgeB}odU;upzH+Xu&Kzw{9}?kXkvNDn{acU1&2rUE;H8;kss)q;!+i&>6*EPs8& z+f|lzzHl@+x&L$2{p&rAh{WKWB~*PuN2POFFaUWOKk?8tCA^JygoSAfcN9f9k;UPi2rAv=`*->#-F17hwJ|ZTSmfe|rePIL1|VFd z$Fda~5EE;7POe>h6|(rf z(~kz<>q15E+9auM(xiM51dbyYcBXOso~8GRK>3l8;=GHO!CnNjGL0PwZ0e1qXb*D8 zZ0*|IT+8YhCV6{z{lOF3+S_F{FR-UMsciz4+^GoIQzZ=;7Zla_Wt4Mk>BpP5Bv1~= zowWeEd6`9So!M{C$!9Ubv>-5XZ5AZ(M-f7Ms_6eJzh}(_xtMmE&rdT>iHYFzn))1^ z_};Q^->SuqH)#ci&wl+aL8Cwg#HC)gTltWeK$-cTPUs|jHPScid|HJSkgE(Y{%4Z8 zb1`)sq$B=XNqU}*Sl#7#wVu+y8HLE4+A$5(ZjD+-7swqzjnx5HCJR(Px+`9he|88) z*M#2_PmKaK+_wanL8(nl4^8< z5V@Q#=cLED)6}vOOl5Ev<(OMa(2Vc?RRBFkVCl9F4un8;-uhmgO<)|y`J{}5Y`bhW z<9{i8_R-;p4>Wnq&4GmssEw6zkV0g?F5 z6~Fh{%WU?r2y$aS1=`%tQ;fsXW4%iW&{Oq%}Mu{Gn zEZ3Pj--qgoT1B0%*&nJsrI}1+$DdL4u|`64+L$PhlvCAk|BwhO?l=eB`pC&NtLOsI z_90}=t^dYfzg7xKi`JE%1dV=me=7>zSVV5pPhu$gc(HFg33o^4X~Ix;m&57<0Xsf& z)4pg@FE^mR#gW9V;#1Pk9d@7i@^4)GiMQjO*k&6)jr0&%Hb|l7l z57S6=5PT2h&Sc#7?M3V1Z^^LPii4U6_j}I{KwNV80G}S^hNb^n6%?G}2Q-{>xrho( zc@=ks3&HmWFERW6W zoFBF&gJVsJor{S<`0R^5qlY4HT>+<>H#Teuf9zqEZ+7Y{bPc^|O1tJg=&TP@P_}3J z0!q4YMeG)~I665xDni2)Ok7II87?DxLN)juk{LnWjH0290s`~HE!}a@Eh85TrOtGn z3LYkTKpSQirb?V{pXm&AGlGQl+MR<&lgoe{s(<=iV+9gP)Ux%ffAu&y&MJ`Vbf^@F z5F*O@(d&``6ep8L#w}zASJ$aA3MMu+vH6msJchEO&h3D$KF)a#5UQoO)`J2thx-w+ zwTE*rXZQ5{Z8a_|+_F--5o|Iac?Az&lULV)HLZYY8cX@W^(tm!MUXd z-eS{8|0dF9RHR>9ziYEfZuh(qp_IQO@Uk?ay2k6=kVo6Zq{b|*AF2s0^cERqbS4|f zk=V90&%ev5`o398g#r0eZ|xd|WomxFd|STApm@0OWN47l{c*TAqO(P~uQcH*mnyMx zawXDyMfP*$J#n*V+gOO#=dbEx{TGPF@|C1SlJ-}4oAppGox!|(hUc*27lIN2Ugf~y z$FklTRD?a_Lp}%mb$bxPPZg`zCNd%i_C(c&AIwS>e>%euNBJk!NNT;>XXvs zNfsVlpU5kQe0xz5c|Dj>?0v^&{I-KW4O|$C58@L2l*jLY|CCD)8PN5tq{Xjf4R5Dn zp-yDbb8eF%u0{hx4qF%#1@6f~0fcQ-^?S54oU$a&!f!SbPdtsU41NC7AoPEcAn92s z!A+K<`1mT9(^9*oj2M$wO7v&SVyMcR3T|Qe_~$^`)7{S5)GCpa_PNwsOj1k;&r&M} z7YFCo`)hr*fHjK(&ID+D0vbkA&HV7h^!hO)i&8;USnhQkNM_lxBlZ(P6~n2EBSlus7Z_c?4Mx&}IC>qoOFpA9(n1eIh#>pbU&thW2- z2FoJ=*XAYAF~r;@UFcb;CtZt!PSDiZX>%)srF*Ug5nCpwkcK+l40{1V3qH|OYXl+P z#8b2;j?uheB_!wcvtbN=Ej4h{5`yIEg=(*}0?iJ5%CqZ6@_<6L@3geB4#F>KP}o6W zS@>i>W=r{Wnh_+XBl#K;OoAhOidEndYZQUHmo%0kPb)!w6Q6~x&c;v0VZ?g> z16Y-nX#hWs@ms%_rcK*(S>_a&|9Yn zo>(r}a9!~fZ%W6*E=Ef|ScDtFPl#d($1LZ0T{)Wl>eTjr#ovoPZ4t?T;sUN|WE(#f zX`6XS0r(aMI*xeuZO~+z4_=av8o$HXSXI|0d+ZW(DY&&6hfrNE%0E;fzlaOBB+!u! z%&(lw4ahW>n2S@^BKEqHYwmq|dh|&&(F+Lw4q8lTU(D-{(ca#FFT;Sre27*gX0rdG z8Xa-!f_K2v$QYVxSP3sUhA&_~`Yh;{_&T2xIrAc^_p-YxE%(axk-+1@9GPlilhARU z6F>aQR7<0A;XyTF5+2hM2=21wgzaUBPnApGwg!a}llc@BI_?Tk^=g4Opy+MNsGzF2 zTqM4@|Ktp7{5T-A5V}DP?i|cU^)e4WECcPxxR*xxYxO z7u*u44EL4p*pX?MwwkSCOdWz)MD~X52ssrZLGq%kJ>8qk)G;q|$=nSl&Gc*^BDW|8 zKH((B2*p@-{Yl#d(vk}+Fl>6oGF_oT)E0U9(kBIvQbX$ttQqSYX#3 zCmzjC9}Xn`{x-hSN2-;sy+oM&&vf4xa)KAN_>*#Fgx;0{>WL)_PwxF@=?nDAX-MEj z2xdU9DJv_sEzkVCyacXlkBJ+bOUcxo+>gd zcwY(O=I75vK_n?b3Wp9)YjZYj)DRZyLEMyQt)oBiW$3232T5^Am4+d(^?C+uEMnXy z07KYCm8f+6gllP3B3o80?DmEbx8N9PCT4L>9_$5<{1RxHe{HEkV(};pH=KQO6c;K< z*rcTQ+#Kf(Y zOHRGv9Ayfl+sFO}YMQLPLgvm8mmkQ6ZKtpi%s|5v7(sVopzqO+`>G{n0vwS-IJj13 z{8zS-!OVI-n&qFABC8#OJ3Di%WSTly)@4*xDF=Cm5}Ph&Kd2~lFfclsp%GJ`=tL0O zCw!kq*e!~uIQBY8+sjbIqY(Od!cpKJ<6jjMl%hA&3yCCTVD*vBVUOa$p76>On;u|jx(*+F!PK9CdF&PX9 z^L~Af<;ZdMP6xuqX z(-9bL>B#{gVrY|fA73S$~;vP;+1AWF@=(NofM<)ARYM( zJysnxmld7O&{9;sv0I&=uxFZKbXsEvYT zx8cXA-!D~Q1ek}5AhA~%hADo=hY_8|2Y>X@%(+a}s8?vS1=J6EoyYPuqlB_&TkBu` zbn}uYef~K6B}H`S6JnPi-TlV7!5{rk_9w#tpoLbk^?G{n8(XmKWSX9s=o^tsH3i0K zeczot_`|UIko2???y*mb`BA5r_8@H5OAI;g62b^S`)sQ_^N^X@Wvk$A1LN_u{{KtA z`fmg0aWk`6t>)&IlOS4(|K8AwFJei8lFI8LWWO9SHuz%1{dz+2&OjCq#vw%3imJI2 zLk)y6=WRP#>_VT_hFwZ?QXya0L4gVV3D#7d(woX!cPp~67$}EdunmaquZ&ud?SH=6 zi{|M5p2xaiBQu?@RpJUlUW^xUr9!Dt%VNsT&Uu|gzBxJqC!HIte;3|Rk7CGy>mN?Z zLIu6&c3u4spR-1Bbwll@9iP8%U`1xC2<=#f7V`ptntKq6y4h3nhZ3-GpFCg{imU^4 zc|~hQwU>Xzmk?ubrA<;M5Id>0+czILCA2bRlVQb|4GhI9JZH@E%IooNQA>PH7&)}c@hq&8t2ZA=F^1M`DP3>faHcW} z1;c!5S_`hcklmPl1Q_U>*tvq5=lftKi$70`k%v^fOv1oR*^tcFgg)el{sx=U5+Jcm z{jaCv)?DT|ARjC@VHSk9`<9GRlKDE3#6nX+=?7KpAIQyWx}EW8K-b-ZeJYKNGQY(> zAQ&{Z2eoaJAibHkmhvb*MPJx(*>Vz3W+LO^&Ba1I{%EPm=cnZYOiF#I<`EI~>)p@J(ZcH36JbqDf)Kh6 zLqIva;}(IaCsTSmLQ>ncTGwJ_gy+KNCQ6|mHy|FxWJXQu)T}#&%<)_oWI}TYRA`+RhXSwxmP)Cj69~ z3;W)IrK((MQfNt^oQP8>$uzV*u*{5)SIf@p|)UA_fv-4TCd$nF{hWRgE0 zW?#4d5C5y7n{FI~Fad9NQx^CZE!}@(40$Ht90=guWNf4HxSs;L(-Be zigdiqX14MubKBKQcvZHHSR17R&{TMaD|7Y0Zg=!#^$8l<8;zdgj;{$9|2Crw0N?w- z;!~DR!J;powkQb+x&qYQ4f2%j-a4o|P2u~hw}`-AN5RYW#NWpd-}q3lpE_gm_|xc| zpC~Zy3V%nI&;1&+L*H+zMF;DWTAEA$B)Meq<03=90d^*>56Lk3iIb#&nz)37=np~J zmdvV!3~_RekTTsHh4dgmFV=rEN6!h}-i>&ZX!$4(wyVOiSvG@iNVsjD1Q0K+K`SGI z!5G0MA=4xBTL7fZDVdpu|7}cta`B)^se_T&usExAF0M;)-H?xyJ>!NxCS{OXPw{2I z`8>MAu1&1KX+d}cwT#tbmkZT*XRj=6a(1cj8CT($u3VgV?QBZn$q=Bv5uiqQ9%1ez z(FLhq8nIOj#1TjiJ9ijqq{6q#s<;ZKYRsJFqy%iR6J}$ zkTZp+2FF-Dh0;>YBLKwmr#?g30_VWhC!~HvZ;!`JM~G-59Z7q9PkkGbOp}?hCShNA z+p=AEMr9S%hdvV4Xu~is_c>g8Qkut{-_ZtvFR&_XP?cr0i!aE6n1btBzh_$tNluge zHf{m2$l0#TM#EgfJhUS~4_*Dr(bukRf)mX}bp^`uRc7bfJ!|t}_SvXS2Om3bC65{# zzb4-kC-*=IDJ&#m#8$R%JE1E;6?h63J(>_~;~6&D!cq!Qubf{Ifwm=avVo89K85fQ zrSUDS$5;hL>RO3zBweL`%2U1*jOwMX~f%g^9J1- zbN4Uh0jWK%5@?p}1*v%vA+^mILCRQ7;Lf-iTjbvv78Y!SCnhB^|Ac^#saCEQfA+Zj zfJBcVbR}E9Vr!HJ8xD|ZhQU^$l>kR3L#FAXN~E5<1%;V&MD}UCe`ZG{Y`5~zY=-+l zN{etL!!w)PsI3O~huL)+!-H9Ez*hV; zuS_Cn$xyfqMDZqBJ`wgKw;S#t8a=bPwc(FoSHC?l8o{c{$0q?gY!$DAM)Y%(7PM8^HMVRJMjz%*V#CHtJk51#T61-4it+S*Np(R(NX>ZN%6whpp81p(>YoZ9y{;D16R5bC$~o^OGT#2y|C8x8OhT$|+9$)0 zN5*H*#3X7k|0N@L#Z1Mns8;y3xH!DhrdYAwJclJU%`DDx^lz-;ZG3;nNOzHd$LXPQ zmeYP{YMm+8PVvrLe7dxiJ6s#RvoNzc z41jj$;zIlHRB*G(POTVb2ulFK6oz}Yr_YFXmDyxS`A+h!`8;TyX&<-Rzq^IeOdt`R z&&x^oxEDJqxo!6r4A4`qVS*nTc%_V;@gi?7z+OX5U!5=)Bzg{mvcDdx*K;_ohPAo* zA1|!y1oo*M%{)5Yn5HA$&_04o}J{hp}T(>&$R7HbiF@S}^2bX&XS$-q{D)2$ka z_U6Oj)zrZGX1OcibC6y*5R)^SO?qky~*p755S@kJ5%?){w?D9oQ&`#TIP~f2S z%W(wNf|#8xS7!~yNFP+K)0h{Bg%$?&K?A-OW=lD3>yg0suw7EUr`X>D^ldJv9;l@% z!u6e&UH+{5DqxSP>Y;QZew!WviTpEga02Mt99Wxy6sgN9@&#amk%medzgADGnH4?f zouHLmP{VcyC)`O5%&>V5r`Ov5juIt)cXYD55K-N5tr5#>lOpCNqz$8ZjCc8UPWl7W zL|BC!y?+d3*R!rLribB-J5bYz2}=RpwM)^OC*F{yl8G9%eiNIfRky&wEnzW|J7`bk zWz38MTr;P@GDs}Nyn)$o5)QN3sv-V(W4SG+I=v+HOksyEOn31>HoZxEgR@Wl1RnE( zJGsfcRDYV|PtP`GH+*(l!-XMZr(bf-sY`%Xj&V43nlEGr;lE)INd2bG`=-5|Cm-^H zV(uv~<+QsDNUd8LS$eF_ggbP!lhhRyIyrH}r25{FTT6}F8?cfP2GDLsS@Smpen`^|;rs}IKtB>i`}UHA1Ok<08$J^J zWFZdRpM!td-v~olJ17*EGa3Wc{czxT0nI+~)~OzyL3V*W013>NFFyGuNL80cIW+WO z3z^!_w|j)>>B&nf(1<+NyoJB@66-yj&RZ~|9<&#$-qT3+`A68TaeB+-8^AUM3s+eC zbM#SjCZu)OK$O>{{sM^P83XcSCRrCC+YJ8Tw&n=aDLppRdScyaj}3t@tn=a5O%Duy zqfH2jY^*yU1`?bAGyrfFlC*0e9L9C+`qVWTR5OytyDqp{okX zW9PSKQ9dQkA<82wfXL`jywHxIf+@X*dYp2QelC8QiIEE)b*XXj09h*mYFihuZa zIT*OCc~e3;sUs^3et*7X6y-)3hu5l#ncI z?7Z`2JG@ym*RmbILae@?Fz0`4p6hnJadGe9?=G0eHrT%rlwGaTeW$8@TJH7Hin;Xm za@hcB_EsQ^_!m@caR}|#@C~|C^XbBMTndP7N*#Bo zdnMMO7az-uPD2k9c!9E*(;&uqgILqXeO)RRkZ;}L|ME*mo85frj-0Qgl>49owor%l z<$0yQ7!@QsL~34_>eeQq zCAlXp7!N~i<4n4sdXde9>(;;50;T{lJL(bCEK>@X)NvfLokC-`kRd|`FErVO z--bRBeOxvb9jC_gSVxoV)nNY+LXzssi37LN*;+CHEHItCFf-i;GC+zQ9PkwELUR6ed((YZnA1#mKfx)jAu2|5M>w9lWZ-ymPzTbhnOiCS?UHM==Auny#TlzF^ z2`^BOW8y8BxovnM#?=}V8~z~Jir~C8`~pJ!zOhgoQaoMtWsS*sJVNQVFlSo;Og|Mj zZ~`g(a-NakfaN%M`81BIeC`6e!;8 z>jC+g8Ify4_Epj_@IB5sveHw|L~%9RVOdpzESz#R?D zZk2t7*d{)=3!!nnIPCXcub6b5B3wBqvvNrYDZKp3g0^m0Ll-PKUQKq*MOi?NN~(*Z zq_9-G>w9G&?2ARtzSYQxVG<8hcTrAn{ma(FMw zL{J^*e|VHAvlgxezsOk0OQ&=$N4(DANry%!qX_W{?y2X~MROjDR{cm2HI%@h^2Kp7 zfS%CsVUe0jj&q67R*X%YwblmhD5B94tP*J=1V8+3ISh{*nag!n0UQ8@cwz1H*u#8>p!e|h zig&?lzq>b-?GFll)xzENUOaj{>6;%Vx-DK;`zCQ-c<^ENB9Tyn zmeR>yh^S{RqbAV2-I$AOIzlphA1ug1e60)peqauRAp+M_0|6V#o4KOwrdAPum`^Rm z?Txtm(*Kn)*>f`|9oOL5yvAhwJ^3kuigS078dY(*tE6*ayd4eRUHfXAEWiT=!u7}- z3{!YZ4^p9H?QY-rhm91@m4eshn>&VpP0|*mS6ylcCbL~wpg7tMV)wTvp z31nj$>KZMEJ;x8<(%Zb419yP=-zQB^BM+nuHs_DQ1NE^;#$B-|xOkbb=W%;XgNU(L z_2Z}P34W?nz{PqS16yiLJH3Az2{~>+qs!8?rj%@is>_Q4chmYghtO?u9*aw!f&$8H zph{FTt*lXm94nuZX|ZZD=w4O)H#$KY)VEud&S)69fBvT0Xvixj48MbMm84@SD6Au95c$~vDrG(Jnl-%ma&^|5zUxWNI`!fiM`sink z?`^D!lSD}H=;+P^QA^P@4I#q{dJ*!LDJdudteVaqm(ur5ovY0;g$~T;4f5=tBa4i6 z!!JnKs8vrKwn)-_@<rlB^ z!Hv_oh1u}j`8kE({-*HmP6^_b4kgel-2l_4XJc1p_3@+A8IDw!ryIL+Kp}YY9SPyq znI#(b`G2b9B3XAgKCL!QvXo2w z*vw2R%IhFh)<1EpG&-Pb!c9IGVLBUIaVGdT5CGqE9aDM0yMi}S-S?+#LsS7dHiZwA zoI_du&f0MP6Y-s9)C;`a!^gHIA z^L(vwtSeQuBodG)hyx0WDa!!NJ*mVnYML!Y;nP1o^udqKA_2`NDBu=l(x$ZXLYreJ z=MJbF&f#n;wNBkb^X77c0Z2(>i!r&r-h1fKQ!+GxhzG4%?HNES!K9yPY#+%B-q}Qz4ZY`3B_KKmu@P8BN!}_6#8iW?1yk!ay#tuD#inuN$+MLI zGE14SOMe!iY!O=Fk?^4zv#Z>rf*^DMH9$|?$T2$#tVIvw@smg7+YICv>VG>Mt35{`sI;1!RX=R}Ne9vsbbnaNt&HNUe(i%av2hL zyLZ1r=X~Y{oFXnB&?i134UpDgF2VdTrpScy;ksn2O)BFH$YA&ienX$@B5o7ZA)b#q z@ZD8i9u;s&{udZv0ce%TmyYD|nDe;}&eLcDXjX277xQR%hpFLiag<*tWFl2`2 z2ql+vFxz%Y?Y!&$r)aJrS7RJrsHfAh^~pGAm~|$2_6enp%9(nidUAne-TnNsCOP6| zxcNQA$JZzHL)=M1<5Yo3^nqNn^c5LrK38CoeL>uouQq6n#^mk-D-|Hm#ZgExm4IV} z`SiwowKS>o6>#XxGwj`}y`3~Usj6ilPudetumPH@%lc$&iZ5EAtT&oK@)7wm%E6=3 z0Y{?Po{STBXR#DG*dR*)Fk3fJ;pHXpI;kxuVxD%JA4iigRYPf15wNg2gKoPI9!}MI zZlX-&R((dEh2R{2SWgbaNd~keOkZXi<;^`!2Zk$#C8S%0;ijMnq%7Z-%= z5v!7}-$1J!O}tzw6yGIqVKyhQ|ay)9&q7I&|roRAg8I9pdeA zDR8dAW#cM>oFmvDF((i$CEi_0(}#)<`VJJ+4@zrL}_+(!KBbeGc7Kxa`rn>xFwJbp4jFa6k~VCh193EWSY zgNB>y`$R%QA`l2-%R- ziT;3}Y+eZ5T^nC@)qvxf4(jQQVrZE}{D>(IjsE5R zEqa0h^nAs9jFdGU_(nnclgM-VGaWcJJhzb%2hLs3Z2!gUbZLxOgKItWaOnA zCQG&WE`zo)yg92z{X|DqjhEj_-^MAPcf2|e8AG2ktonzwt<$wKPQ}A^g`HsC*)2D| zM#8OZ3)mnzc&x*~Qb;F-rI3$J4aK#(llwC#)sW*W4J=v&W+_EhIcZ(2&{f=bfx|fK zPT5J7B?vl5yy~D`I#9cjlR4n~Hq0}t3U&~A$OSPuH9_c2&qdn%Rjj$8FWtuyqy3QA zHs$Y#OOKs^-@7=qSU9DP|J%qvySdH%r^dkLxx(O_{%j z=~bg+5CXQZH!d>d&L7hDy095frVpVbOvLBt)XpOU8~c^e6O=ACw^S?JhNf5R0%c8P zYbtVegrG5hU-5(ZaV&pNp)EMhU%Eku3hKY3jbB~`$z5hff)DrST7f-f4=j!p1a01? zt;=@Qa-v^Opg7I~+B{1;$8AbIKA5Hq(83Mn-o9;6N&W_`*WUimX-nH^k7&ZH+q51n zi{5U;-vg{+aJ?vQP=$58w+-Dl`!v~5JH5Fgb@`nZ1IU#)DkC$cWrMvZs;O>G*)?{( zsoz^vU0sn1aOd_yG9Q|WmZq1rVPoVSCLmsXeNxi*iqisfi7;}B86I&_qd7nPdNLEq z=`qqzZpvsql8)Y~R$*oi=oK=4*|1E?))*=|w@W^4@f|md4zPZe&u^mMBzbB=8OC|S z=xse1c7R?H$2HWw6VZ##YfL(V^#WPSqM+nFzm|Nj+F8JlS>_X-+c6ub0~ZL{H0fCR zaK0?!j@>EcHgs_-#GT|tadsYKTyE%IY^0=yHzVpJMIbR+If zQoQXb?Zs~vzkM|9RI`$(g7$@RO5g-2^awMfut_3M%!PjIh8#_YJKN?2IS_}f3Tj;8np@~t%_11 z*ft_pQM#<1FXh>CW+Qii`HSHup9;frCaeJ|WRLVHUt~S=?Bh4iW4lm_yiiUvuxA_0 z+&($W?i4HkhqP7IO&-nUNmLvQQJmDQ)Ks4DI5)(AvPNwV#oe6jieQDM=mk6{KZk_D zM-j7al7u^OK1*89Bxy zE^KjH1J4DdXVX)$tR8}A16@(p^uV7q`%Nogem&772T6UBwM*X?k1Y#jOsnMKGpAuu z&#`nzAFrhQkd`{^g*ZHbjJr`ebN1zGBy#g=@?v_omcX<`ca-e8%lFqXh$_h07xEn( zdtkipR-Mz?`5Gx@y2VG*APllhHjw!5utL-MHKcb1Bk^k;0MI|3MLtPck_bqmSst;; z+1#E^M&)h?C0r#&LPBvTAzAHm)hhF$s43f_BfGFt(^GrvlptcObYN7VqBQ|?>7PK) z);e)3D8Nc$@!Xa(5*WZs$DwptSr(Nj^PEHJ)yU3{D~2aE>kpd?6vWMv zw70Z|yNuxgMAt)k?3vF7soTjSTItRnq?x7dQI#o;SkgzvCQ&O>OheAsO3S6xoqb~mH`&IPMP8~Y|kVsR5r*lH(xNr}oPI%gX?Xcuw9R_QR>n0*e zDr~wvSD4V%>N`QFWA{~8qDv@|XR#Yd;_h#bRA`($J<9wR@!1|0%O^f*-<5{zz#s&@ zuI=(5W#B=iA9C5GuR$=ss)GBj&?AB2K@;g;xO(wHQ6xhEXpul- zrlK`EQH{O~lqaM0p2zQ{owY&T=+yRsymd9|?`RM``Zj`4*SOH`b?J-6EBUR7ZMt$= zdXZ*r=$?c;C#37X)c0i$u&oJMc%QpSH1-l!7U%=3P<9Iht-vZ{rb>^Poo{s9)yo*mhl&)&>n$4ah zsF7Pz(;a%eA=z=@mzGS3WyoMU4rmcdsR(FR>ASDJh!$>PjPAodnJd+adw}At`ph=H zE<=vy2}+x59x^=N5OT3lkN3h8KL5ADD8imr{d5CL!CG0m^CXHAKK8S#)A0w^@JVR- zx|t3zHi6ri+nOpdbA3OU94H{gaK|eN{Ky&f84#k_u6zy<4XMiICO+VB?XpsF05VvL- zM>cO{sqn0^tc88Mo)LxF7XK9IyJtanRF{WsNGw0xx1<@FF16PvG)|4M3p`&QyX)jD zY4k7^rV{VunPlz1$hb!FWYvupE^b8p2sVL#3hOUxu+$KKuDaNR*6e?-HS@=Mi{AkX z8b!%H`)4iV-J17Gqf28~B|#*vY?EpZ$OAoRK)$b0^2tgk?B?yx8u^erqf#){6$e?5 zt)ELgH3c&|-}7U-mC!~CUqwuf<__7GkbXPGwKT>{Xqm$&i`mD`4k*az@q1(Mup%bR zPmLRI@|29*{N|1v?Fzo`K$Q_`ekR)3 zO6=-xz@#H_R|Q2iS}B)6h@zAt`Ptl7W@DXM1Dap>Q&5bCnz5zU5+!b-dH8IQ-2PJuC=4n53m8dSO`2MTu-OShCIXi80tNjjmXF|CBk;Yk0?d@1`$X^nl$~d`1Nia})DAxi;g~2>>g$vj1 z_fGv^DHKd@yG|S$AIhj(mrp%xJMR0A#|{(JP8?s;v2Cv#M~B#NoA>2pP(bkb9!&T| zX0n0RVIptB>=y;iGZhCkVF3pFeVWrdRy8fEh7O|oe_*O4;VKi{%?WkgJwXwDE`{{8L^-8(TBT3SUyJFN&-1;_iX0+mMncCIhYlVL1WM;B$sxG`+FtfaUFVsZT%0c z0+xAL^R!M6mQXtuJbYk)C>L{rTbt_pJ&jl&pz@o&xs_Z5HIp56N2Kp^O&<{aD=DR^ zrMgedcV4w%U}o{bLD<`oAJ6=}#KEXX`wD?;vW4PLvqXWiwnPumti?l0Q=l;Z79grq z15Ij!3>PiqyNVd?)`S|glLLRc(`Z^Ml99i(CmHTukCk6raLD+mO3B@uL8bqJgM4p) ztWt)c=niE;?4N3sbUfCuZ|j2U+tCy4`1U!<>`|~%{TkWyfxLoG^0FLg{0=YJ7fME@ z&J4(KW@LDcB{07WARkpHrtNwZ7D@YdPskxzC=161TzYyJc|~GI7DRRH+gWPzwe%^SJALz~N>p)R|KCF+EZc9lm*VJpovI=r zVxC(8dbXu?J zuYJxP`dFs@$GQ6ue$;KjG0m%3xfhjsmKjUg#NUT1d5@sR{~*Id4-(}qbLA7`>)x$N z-a#9lIrb^C!@F>DOoC&Hl`VpK5E*SKyK&-% z_7XopoX?ANtKMz){3(*3rH}V(IeqLM_FhP|AmmIvc@2veoy3t(Ft}Pim{ajv{C^~i zPP?Y(4a#ZeAFg*z6<;AhmKYVc*{jako^WsRKTW}-4Q8=&Z*ZM=uN1c{ZF_y~nWV>N^&O+%D4s!M~e<`1uUNThxz!*MZI2|gT1v?p7$oFL33 z1HDsohe#wc0H>3V@%mD+3-B$@=!sd9fTOY_g4tmfbJDwsW1rtk1t9x10jhkGC zOeA20x798)QxkX?%$ur_GHuF8KRdnu=7YIUkk=`I~&4VQ?0N2P`q-VWeW*HKz z>{FLXOtfubLsKJdu}>4&A2tij79m@7?v&}7rG5m}yF)^5m48LXvraI7J?TZ96I0!S zz=59LsRwCQ)Q#*E_k|Sa{5uW_iaM7kv43F;l$}382wZ{^6bmZWFm>Y=BjRTxLHnle z^Vea2?@26PFaFz2Npr>)hfi+@w5@1_d&%S)92n!S%R$b8Y-T677eG{nYj>zGac-jb z)r0Ow^7n+b@LNeRc4Jw_LsTO>Z;7i*5b^Kie*) zOcxX%rMo0s3Uv-Be6T!OiU`(3bq#6ujd@=?Nk1=a*#S|U?zktgwdOS!#f5G#L(*Tw z{nUGam*FR${nI}BG+})Q&5vtFu0VC)k=r#WPEjl-8`=459tgIeeo6d%H2W_V5v8-3 zxlUcyjOLS+HuPy)1U3v$RWW)`vY^UBT!Q7P68ubUPCp+?1U5WLO?~z(!eqW}ck$jo zgMh<8*XSw0Mb*_#{%(h?AutX|)52tbB*5R@j$D{%2EDSF5?Cxvv*-_`o?w>!{9Fdu;jRav{ z66}b)9Har?-Z#NuCYYf^U4clL<1Bme?w@kR&gr*4o_GVIotnk`k}DQ1+lUjJOnpHA z2P;PtDH3e*PA$i||M)s<7A?a3kF8MEdFq169EmVAhb7zgwcBvtpo=5X3{}*xPwp7*9S;6@`w(91)?zR%57e^NTTj^9Y8}=EF3T%hF;w=%h6? zc~?DTUVuLi&MxA}^qxErip;^R+{g8l>1Bh%G+o+iT>lb@m#BA=z-<)#DCc^-SR^^7 zdrr^*Sbc}6b_>qn2OY}lf^6L_m5D8w{nA?FHNF`K^5C_|OTZBx_=LsA>h-0i0wd=Hk6TjYpKP@23R5;sl(sB$0qu=@T^5=kY~ zH0OIh6BUO5RehBr;~*R8?)AENrbDO9@wm`*<&R!Mu$@2IQCNd$SUcdn=z@k0=uwJt z<86@5uBUg$b+}1!x5M}y*g8hw1sR9wEb(wSO`%{gRfb{=K*GNqhzM7y^N45oSQj+W zlTT~k)kc*zaj2q&tZfKJ+@dmsfkEa>zYd*~7iVcu4)F2~%-++LqF74Uik(V+wZEjG z0ab7KSqxF`wovWmPZmBPmp_hRfk0cNMR|hoF6J|(o0Nh!e@V76#SgD|;gi2{?tS`R zf$w6C5DGP|D974yLkWQpIq{@IT11r-Dkt2rSf6?{YO_AvAfA1v$Voi%Aih(mi;m#q zR&mTBQM6C_z;eZ(LK+fL%R?R5@{j?x6KGP6kw0&V*94KbX_Wj+{=Lt#E^ zuTJZPBy35k<6ZLU8;%kxk{hqXr_8&26{=V5&nxE8q%kP8BVTQ*?*7shMZAiv8qtdq zc%SEA#3RLD#`a0CyHB_Ry%2XT60=iSc@!m`uVIveX_`R@hVfwiYHH{V?Z%F8WK$T( z1-CSGxtx>9=0Y_%90p2|Z!pD#>((p5oIsdnj29&+l6cdM1>UJo`W&D49kY}69J)_2 zKcu!c0Bneg48yQl|E>dqgyG8`ocicmt?oJd znB(;J7MMr)F~#sq3OO|OsOHu3e{$Gzz~3&~fJ2P~eZ(!g3yS9u$tW~A!U}M4y_C_` z&Gc=5Ko`AS0nXj36e`}|p?bR?d!`EvFf1V?(_k$(IJvbZjy*{PpNd8AzUTLAdg0~0 zPh23n>Hy6! zEseqC3;(VQ^!4a=5T5&MpXD|E6DFe=hUplECOW21A0Ye3EeKn>EXM0o^H60ee&fq1{a(Z{ z(V7LV?3&pqdZyyVY2~jMnTA#SW{xJ~uk3u#?oIaw6Q34AjrrlDhXu>QgxY?4chxO)pN$_Vj@qr8;tFT4mezxV2C zx|(xKs?3mMtb>quSk>QRCrDI8I01S;km%$SU+*wY=Vy*;?K0CBpL|5I?#*6GgUO)D z&NlZmSEUBW62`1IVS_&h{!hk!12KRaolWq;1TUVL^+^KVYZ-2aILW?yfIbaFrGp}I zY)(+wpYhmXKSm!9fLW+q?0X zBUPI14gs8ks@j1eV6pyVpJFk2c0Cy;xp@9p&#sT4@UJ=JmuX({@y*B_@A;_Tbe#mb z@6p=;VCPOJf93)E8bcm*6Vx7Jv z!_0Ni+x=jCf&|enq1B1c&83r*Fat>o1bLT?>~OZRB%GpL;Dh8Z^4sB>aJHFTP!wyc zX^`?~e7H{)hL72d9S=!)Xo!}zhoz|N{tm}lOcsI9r9V*c3!gb5#Smy=1AJW_Bk28s z&AQ9AyhSSy$#iioU7^LtD{ga@q2ZyFRG>b(PzFsOx8hQO6nfHr#}`vMuYb_q2XL72 zE0V?*sUz&Dp1tFg2siNB;Z=3EQWqyYrq|eEQk_(zBPomQv=daZ#)4$7n;l7o)2{j_ zj7K7m7$*Ud`iNVyZ6{S*YP8ClY_PU0SuGhbQTx=yWSP?npa$Ip>02wxLj^`bt`_#8 z6p}=C)+VCIws%`rhjjlJfGY*@K?TIgMl3ooYv%&LE7c2U2_&hyBAbuBN9c&5FNvNV zv5r1*FA)r*+0%u0o)`5-`y2Io06Q){&%w3i^IG~emnrfpW1#p<$xgL?x9RYiCWEJ9 zjpqY#?mx5h5nG;d&#m&B1L6i_Zl?X>!1)lP>I>y^X}I7TkjdD$QGw{To=z~IU`B$) zv>3PF^}wKCnwZ}|yVvKvr1PN+`Ul1GWF!$zh0w(hoX8Cs%$y?GRB45bs_I!oSkiQm zNIdrs)Xhr`JiC(*zf`|eS09u1e@v;5#_+d?L z3~f%~$P=ek72qW<1XGQqlIxJabJq&6pFLw)R|19!jnW2VEh;V{(`DisDTOf<7O3_68MJU=%Jwl zA(+d!fPOV#WmGR#2Ar+9%+Swk$B^s}z&$guz9vCw>nu>3PN@M>b-q5T*f>I!FkH+; z9!x1$zR?URUM7ei`N0=cZ5Ku+9u`?hz(b z{mQ%}TgVp@I=e-=h&4MSyv-CLCyg5ILI4_YmX6@(VuT{(7t=zsh(b^C7-t*0#{sf0 z2t}sh@sr#G68??dGg>7v-#ZAsp2M>8T8ONEH#AEsbV|U($GwbAT}g>Y1}|3Da+i(_ z!MmJ#Q{H+Q|K&4ad7a$?a;YkBo3q%z|LaZqpag&$e z(zyj)T$*=`E%@rp&mxEhl!>U#vum(l2SMKS2SgRKAks<-niCGSU&20%qJ#{CZm96O z6Bz!z9wRO}OtcYYk_MB%w(~leC+Q56OHP^HtomUsqyM2n7K#7wQAWC83&XqeC3GUN zR(1_3;#O+8OWVi2j#SZE8nq>2&SQ~-dbAqW$(Gn7nN@lWu3 zLHOlHB3E})MC0N8Pqm_FJ9-vfXcjSqpyvfq=U;e>LHGtjIerQIzrQ-0>swVnvOpB@ zK1S`{zEg3M+EP`s&Ua}d!nXwO)%s zML{1XXCx2Kh1Z@-PKB|wfb6Tw9{O}~G2rPVw=`#N@-apKB!QC`RVlSx!aIR6Hs&D4 z_#Xvh!0drt>}(BfzgMpEPZMcp97cgbZ!M411xei*Uc!+q~?Ai@DgW~ z9jUHBfwvCb6o!BxdlKDR;sIpu1nvf^Wp43OmA1CpaI4?hFC-nA;=asR!}K|mZ|LgZ z`nMm5TLe9AS_;bgNf25>zFhz+HQGlMF?sFkh1zL@**?zf{3#Ht4uEj} z#ytdEfE~TqTPCZFHGaIV0wRy5Rqjp3GTSlhl(sl5#;n6p0AnU5)3IP`EeI;J-s}aR zJP#e&wO?LQP^_-Rp(f$ByfO|KDsU7~Zu9{wD&XfU-WbtS{l_WQ(H9&K39n;5q#;y! zY&q)vd`C_-Ng(B$`zC2N?Vn~lj3@O&Xq)%Lh|uK;0hsz8EYq_*n|`7X4!gaFVoSv* zAtZ>l5C-DUbz7m&tq{rL4H2@08;+HY5II0hz@bAiDE=U(AN)?({p$33 zAB4-vg~Dy>$)x?R77&>@y;96-ZD$zPKeJhP1GnG=yWm z!sdVoZNg&$#WgrCq@`}ME|7rq6l@z@6WFcp9`Xoug9<)^ypUrFXnB@UK6<04<}eix zIsOx8MvYVI~8u?R!Ers5{pWoXzwb$?+gBm_gqa7EM<@ zsph~qmc%cxJy$YJRDR(MP8IA+32SXUp480hOrr>uK~1W#NUH)#WI;2jdOajvY-wmN zeH@t*hdA?*R$h^e+Yxi8$4R618y^rhal|+C+>{|kZ7q4ZQ5XMU6DK{nZSvKvwabTG@4El9C^B> z3)1Pauc|C;Hc)z$_@74Shm8usO?FE`J~V{HoF52g-%~Rznj%JTUr*q{JqzH9$K9E~ zT-V$uoF~k&b((PcGd-*AYd0iLmQxJtTKR)|t>5jcgT$HMN@qh3ta~(nVm!}12EKz0 z00L7P8@!WeY_YeDRl?I>T@6pw1NROxKr&F)E9WH~2*m}njQPV~&KFWl7FUU42s_2{ z8hfv&e{hUEC0^LM(7X&d$m_6ps|x}}QRpU|SIf1Wzs5$Dsmn0|Hv9^A3t>nxWK10$ z_~U~UW%85;H|phg-;6#J7CJqI;^ac?K&jV~j^nqudjBT-xQGe9VvKlpbD?VnfgtRe zpdPiqMfbLG5Dtt4AS~v@elN#xH9S?xFMSd(CD?UZ;7jftbhOj z17@y=wHZ37xR)yfD7klA2}l+p-HWq8rsw&XfTG|mMJzQb_bRGXW`0DixkL`Fq|V_Et#WpT*bU5}_Y--L)}*O?C! zT=wtTvgk>ViE7hKoaN_mZMA*AW%!Kxh%=4=Rt5%5b5Hsv4hsWlo(=l?W@k%_ob=?+|DdJDAkYH-r%-)&&)q$|c zA8fIN^MCbl%+7>%4MJr$XGDZyYYyE|Ty{_ASnvTz>KozufNPGOQB}E_b1;%ORTtSr z+#YQdq_*ZOyz1(%u5ynZXQ7?eW+k7%YFYCadL&pi%NKO12N<+-%HVG+F)N6HpB0W` z?+9-x5aS6wacOxi6%9!qj%9Vbf}kR^+-ah83q1Uz!BC~5`lx8Xx8fz7#xd4&B)%Z^ zi{J;N>Vq5t<8}zj-C*GO)7g*>NNjY*`Cgg2JErQ!ch6SxoL)&R?IKe^jIx4JPq0QL zpe?UJ_$!u#H~djp!0SE?b$QRsQ}c&%eO@DPtem-C6XNDz?Ade;`LX&=gQ1is5f8Um2`v8n zqq=JIZd#=9jcQDk{$e?-#bp_OuSszM{I}zB^a=aV0RO<2NWS#KS`l?`_)!9~TJTva zvbAgmKHC2bW=zMC0A0W|Caobui1tkIpUxdVIc#Z(1n-9nuFOo^R z4Dpq2568owTD!igN?$tPs=cu6nf0@mEm*flAf>1=5Rd8VP7mjFKFZtdN12A9fCt_S zf_>zv0}w?nI!*_DM5i;hy3^^TR51WVqPg0IAOWwK?8BUr@h%di;m_fS~kD9#rXZM&*ODX zY_SMPF4Lq$yj_qp!KjgcQv3WZ$Oxp2PdZJiYLuFyb}o(FkSpA;*$~w;m^Cx!5~gRy zPgJ~2kWg=Bh^HKuQNFdcNBpv(1eU^5v9zysv4e6IE#|jfsO8@4yH`IW6v}@=#t^Df zXdH&beDUVH;9SK!H;g$U*E1MdI^tqF?|BkN+Vr3y`zcampEMmFxgNSnw)zF>zT}DA zt_Ua<{`m_0<&I1FHS7p;Jwj3O*)ah%1W{tItAdC3g*JDalYdpcdGAb4`TX-71rEv2S?&|Wsl}>O^xnu*)HVDEZw&MDc zKuOg4TN^y=^k+Z9Gnhhu?9wfDAz67chMq{Z;HM!_>+k9fe0n~wC^t`LN>8pr+xBEG z<|{d9VUy@)4OAnnPI1Z3o&bbDd$4U$AhdKJ<)nziekScv*??j%fECZ$y%R!w;5EJg zE&p4xQAMKD_zOkm{olRlKYEYxbWp~D*8};Fsq=9=uPCeKEL>(8}Om8x%FJ%?`}p>giWh!`UbzR(<(vVviJS8JeLi? z!Lpt}-~ad+x08MzGl+%S?vesz<77+mU5Rb!_6a5$rs9c-Aq4k6Wc!B%T$!R--&3O1 z|47_T9KROT#5TMO+TG9_KNB(@&RJm>`eSVB4+|d@+LB{y`&kLV| z8E$S(@m@OKvPSjHbM#8!Klv+2)c|;XzrQ*^Yl5-TX8y$He-{{S9&iwWY_@!}`M@WR z42FNvhev)|Y~HFncmR#;o@vYFgM^Y}ohOKj{RD@Ng9ri#su{W0#cN2@dx<5e4EB0i zxHXV{c}9Tnz#{qpr0z;LCTXX2;Rmb%rZM3F3R95iI<$je-r2?s#lG#4$UQb}&o6G( zCg#AWlRPHg2#%VlyF0ez4K8Ym0vnO@WL_!` z@B!9mERdZmq~o8m8PZpW-zEPd)?YR@cd(D8KZ2wE(j@pPTOmxRT znU1|^}M~E!RQ7-kVuOc~S>s2qJ?^mP@`xF9b8TGrgy z2b=4J766ccFT4Z_RG27VpCD~wXlFnkPE?0QD7P=22J)K&HD3oLmvZQAt!H>-0j1Zj%r)@0APHlrHRk-gW^U_y)|@747dLv;uEcH~Ov z*FwN(V8j&qoz>%SGTKEq_lX+FSYV*ho9pfN!jo0k5DG}AiBHmm*n0ozP1@eJNHz76 z7k3WphLxKXY$?xZu`F-S*8=CS0 z7%Yvad;juh^rvrk=tnCHky8qif=c}6fv*lm7fBptgGvqY>HDq}jJ1H1l2Z4eGgnzt zpyq-Lr^QtF!gL@MI{%TIirS#8drV~M3ZNjZMV2LX1o7KSpH9fSmIM!VAA-#1P6lV3 zRl=o26V+HIEm^Uemh}7>UQ1_Wgj#KPYeV8(c1IlaS8&?=K-Wski7E_Ko7?OP z3uHFidBoi-Er_odHJPAxz1N!u40)!7Zr9(|)t`zAhA`jH`MKrGVcq@9-FXfw_$31@(^V0z&?Ou(+?Do+|z}&3Hs9q#%Go80K(kW4Y#-kX zDZ1L9B=9F70mfl$z@`pVU!OmnVlbvf(V$hX6B5FsaInxp31cDfyM(K zUY!47H7r~>+!Xy{1t=j>j;J0JDgeJcC~a>=+Z^@qJfW7r=kHAn#5Y`A9v3Ig#`jBw z6#!QbyG}0PD4xYG_0B;upWFjTWAYTiTCU=bkuCgQvBCTil}R{RZrRrKK5#T?Tq4cB zV3x%zwR%~P$nt&nA_M()Tz0KnnDbu@$Sl!53bu?rG`eZZKA+^J`pK_`ufoM*_7sU2 z*KSKxS}jUnenWBP`kK%Uq;u0ARYmhVQp2pbh#t=4Kuo6>C#J4OC1qUe3{!t*V zXotipep`>QRy9}3j1#2C@X|e%xt0fl?o}`TyXxVA{2Mi|`m-9hF48n&{rkZJOzC$E zw4U&Jf2bC^B0UYbl4XY+3%`ra%xTwT8jo?x9B_tn-`(P&$s}6f)txt5?LMR2WI6w` z628=1<6)jQmvn%yUkv>5ZVJ>G2uqq_;mXbj#uHZQ|7C*fSoewyKUfK zni(RQC6n~t@7eQTnXKyQKsgA)Z^Q49=;oE2)|20S1V&6ti#KF80kIP)Wv6?ar8<~C zy9c9s<)SzJaLt)E;$^Zsz6A4*qcUYVNh1fU^8!01w$bNCBc!ZBxa5NU76K?>?b(2U z!2$rQB80Zo{72=Wx2WqAmUI-Uwhc1s?-6C|2(#la_vaLq z8w}^2^3l@2WR0b;n7DnX*pa$tVPcUwi~kJRFRfq>D~ zCXfj?9}`U+SxpG!5$q!>9Zut?B94cTa_^vB#!yux}@g4FCq=^%fE0HYXE%@+}bCS$s zsXuY6PC}SmFXt^@1^-2KAj`L`aKXGvhHLc+hvFY^BqB; z1(r9+a8wePHU?|h;luq?^GU2_o;K%UVJB$;3E@uW&5Bdk%6rCDg|(BJp2k6ZpiL$O zC5zVeZ0U?)5EsgM`eESQCUcQ+-Uf3}Le;V@hUvy|BIf1(ma6aBlj@9{y3EUWpH1)U ziP-mR_*#*{HRDSbFt+4(BG48^4<$xOe8GHP&1p+-{YjlA-toV;TT-qKTBjC~HJf8e zN(|JkXc%XZTvQ6_SMbDbxnvumV8lf{Rl4wl`$Xd2)m+%AbR%!yv+-!x51Dsh^N}tr z#R1-O^GWI=R=z`8Zr4s+C^p-7TsP|oCgzi0SjiA202S{c>1G;xX%3Ucv!}&o*NO}> zh^%=t3s9QA&Mg_~xjdG(x(K3s!I9kZQ$P<1B4CoU{r}m#eO@ljdHi|xyW8LJR+0bb z$Akk_i{^Y}F4zz&NU}t5wYh9Mvwsz~T4YyJ!0w?G+GhK9{ziuf*fBv>e?YV$42kEJ zJ~qeIQ37%ovg6*%EHL8>xm2Uy-0#s3;7l-r#O*nCo0A{j&MkQ zvwepwRR9A-3)%r*f6a~b=}RQOG4*SHIJUN#OLRl32LVo*BFb`H(YIYB^aNOp97& zmRjxXRKXO)FhY|G$f_g#n9q^#Yn$wKX_7Nv=**`TI@}Z~A!GFxjk4B>B#<)NkM{H5 zZZS9wmIo2ofK43(r7)PX?JMyLm{l5tc@mS&;~^8Zf3y+8b&S#Q@0pv1oGs(1*bquS zj?{p)=Z6&pyU9zBKY)`){+DY}Ghx6&!vJeiCOt7Vyvx5-9tX%3tvK~}RgvMG$w;=N zza=;dFl#B*!(a73lk(FC5E{v`utok@xEnvUEG1n`Z!S18#=$%?#Y6sE8US!>4S~pV zen|eu-}iMNx{hrOQMkp_aX98GnCDzHD#Njo z%e+r=c6&~m_)4Lfh}5?vW_SGoJ0Zp!-9AK+C28StXuF+~wvz=ig;VJv4-dS9P<`}R z_Ja|>&`4PM`-(Dw`FdnvcKqX%;xkpkD&0<>e7*hv$jA`q)N_z^@erJ}g;Rr3h^1JR zCp<=jWG$8OM*zCXo)?leg`zk8zB4?x^S_$;yn74cFAMmUCfR>Tu#b!Nh%d^c>7OdL4iS=r1Rr=M{uewjmPs?%%;bE>5>c znaM1wZ?NegM)ejySv>!?7(ZrRs@O_O#hF~VeiP2m2-^SvdpKNWZ$TN_llbwJT_R^@ z#Q~@tm>go|Cg0`3$3~_j3gc-KST|d=qoFor=End4CuUrw!CVX&U90(l#R7rPMSFM@3`+9QMO6zXjLAA0EE0{;rTQ{ zK6`Ul5OXuY7deISy!z_7?8f&103^Z*s#N^j|1_~!TmS+5;Byx}&1J=+r=!ODJ5IBw zkz!3Md2_67qQXr)W6l7*@zL!7sZWxB^eUus?=zvpp!k|mbe#~~iz*|gNJ;MxuJDJw zr;1Ja%9U0fcA(qzYOD@VUHP^O6c_SaJMuq!8<+x{af1ec@CNRB# z04#<*f}G?=tx}za6l*QHfu*H`(5R2y-80tsDR7ZXxS+8U!0?XaVR6|f9BL!GnO7= z=bctd{ghMs#q#ikLvDV4U>_cFk;u?rqml3Lw@nZ7X;ASgoVS0H8*=>^0@7lQ$3U#B^zREY#)i1dQ3ndI4Vq+yRe;_IrAL zfnS9{5g^+)*$wbg^MdRB=8kCNTUaQ}pY9F#-v5mGwe}45t@!0|Bl!XZ0uLnL{ojH6 zLK~l{-y{CRK(3FtkAH?Ax!xon4exg{{Cj|3LLdS*{sI0=&ph9t9~;+kbBYH-!N3_{ zt-sd$(%sESPcqQ^`|We&edg8hHh2G<=+1CL2*JM*=>EO)lE3eNc#`m)4itU8y3V^J zD%v6aZuy3PhkmMg*xB=k{$Be6zdd>S_;Bw8Vgi4DDt?s%1E1kvHE;b7^H%&7{j2;7 zfbg#Zt4MoWl3xzbOp`qUhSJ>>U+^DPuQiuMYrsQb^1JKn&Y9twkgc^hfDu!|LL!~@0G8cS0O&4QvW=l^4G+R>{r?<;7;gJXbIQ= zjQc+N0)FSc#SQnY3N-?QfY9H3pNijkK0OKk20++vvQOY!-c8;IaQXYUKhihL7vBfb zYt0SOy8i<(_ao|Q;e+Rr=H2P#$3GmR2>eJI&Jz3o0y;3wHe z6V?j|n%x!8vHVAvZ9ldzXqzU~d3^g1?S{+({&+q7PiOx>VX`WbOpQs%=TqdeGsR{H z5~8M|X6SiVCZ#4MuH8Tx+#{)s1RWkgK;J0Z=^R0~`+Hu+38Uh11Q-3@r-xjw z-sQATh1LA>NaLN|mT18;r36ouMFbC@zv=9O;;`dNZ3XS|ms24TtH*+6`ut-XYVzw< zmTth-rLHg9r)&;6`A_9;aD_S{-s}+y)s4rTZtc8ZdH7q!#BqqdP8Z~o3(h$#t79@F z)}N+cTa;uF=j;%^eP}+aU0Z5@gT7C93q`~eHOGA7fcT#)9Uqc#1?oP4VxcvvZrUjb zEA@BUid$uperzbi7JH%1FrpBbgTVkla4pd!Srf2=H3oSMR}k^B9B9%JRwQYi4CFf0 zwzJbHCwnGv0)_KdJv6dBzTx{tnIubXhv6e7dA++*pTTFZ&hXXL2kcMrfs&uI0UI7R zmuj#;?v$2gWQXS?A@^9gRCc{wE2TvIl)IsQC7iycL&$3xA?kE)|aA8S@Jst5Vp5X3SRu|ZG3xE0QoY+c9?l3NbOH!AFs zZ(R0A0@vL>pe{lmYT4;~z+;_~qUR>kk$U{t#U|#+VS)9x}=b{FI$?Lm-maHt9 zA1?Ntn*q})+N{T}XpDx-m9ToeF?GerHaj9{lp>Wl{44lXXS$grQCT|Q6AxLpM2Rr+ zhms7`Bdaec{xp;3NK}J`Q^C2ugT3u(7Ro}IfIB4S5LW{Jer{KWXV*)(Jm z*@*O-CNHdSteq}o}Ts)eeymStO&?5qV%W`+M9&0?cY-VP=#;ci(Q#wOa6?v zUjIrK@8Mmo+SvqvRC%dasO__{qdeaYm>H1HM?EIbVnJsF@HBUdwJua!5IV| zg=56pCh}zonQb0d5NK~7_gJ~t0b5XBm7UlYO2M&G(X=qXr293^X|a2!z)aHPaeC+^ zL+nLqa-xqG8C201(bSZ(vjzwY7iI|o*ko#SLHBAypx!xaQcZ8vC_dr&$9Up>l=XG8 zzn#2-aQwG_ZxnEgN{)YuE?{cPUQlxASUncTJx9H7G$o!f(O7x0JW$1OM*IuRIUlKO zM?(JX#grJ!X{k(>jF@W2OKZF}4E~mQan1%q2&Wnzbx^iu*j<%-#-~-ucS=P z?Fk2A)-?kT=@|e!eXvrXR)@mf=}WBwIPk=TTlNZdBwqNUOm%T@|>Z`mR{p1gbX9(yzApPrf(4;e`Tv$)s36JgEvqFMJPRJ71LcL_0` zj2@#ALVWcT1nNBW|93@(2+ZiA!W-Mr_km)09q+q(g3=CBBk*BANOw>*PPbS}jc^f+ z-9izI1SeQUyv^z&Djn}*t-sf3V%na&wect&dsw!rzR={Des>ZpCvR|2PmyqE(!n$> zLu15bII8nQ19vi_d@i#SxLo;$)n;Dv`2O5L)eOmF5gV2{&T~P=2P75>gzv(g{~f{& zGb4Wj4O1%(9_GFTR$V=B$-2HeI#i6s?E3>U})USJ$SaoY3;{{aqk_#Ix7)%P5 z_+jwVN=tT?(v)$-LPi#QLELtus(k%U+xP3mn2N;x z`P$f*u(sr!8rN-cGFKsN?WPDsH>)xJZMpHS_vm(j9D#8c9txg*AYWkmzhRPkk%OZ3 zFPZ(->v-fN5eJ?!$%qgVdm~yI=IN<_t%x!H4eBLjj`*-nEnL^WBVYoqD0;=e5maWZ?SWW6>tTr~RX zsZ2&p#w)DQDd>?XXXM|PC+l(+3^S(e#9W?X17APqF)Sy2#77-G(o-WaXjiX#aPCx5 zUR>857vD;Tt8-^Efp1wD(HXsS%4C-4TM1pdwK~Uie;kk(%rD6L!6!!fH*pYVGWPBW z)Tx!5^JxD$59vqYehm%~DO%ly+HYZ!9vTODe#XSNpbeLAc#7=o3)Zpr%~!~|!CvmOTFYZuKF*}PEw zkvy1AY`}cKZdR8UV$$Mi74bE})iV8`O7uTNT+_Sw2gE8bEYHA%LEe9ru>a%2YOi?iNtoy?Uy7UlJhiz9Zt^tL6&Kj*N?l>uav(h3okYEuGb*O@;s z>>f4r&M_lVQXp~E%pB1E*}@3=L9TISL3MShDXdpG(~XDkC1ivV9J$tz1W0-s^gfjP z#(tuo<@r}_OgmV~qaKYeTG7JU>fWXInmi*w);G}s1O)#%Ogh;ORR{gZ4G?w&Q8`vJD=4iYbL39ZtGAgjY!QI36Dso_11GiF{TGYag_7j(?8RsGAj1kURaZZZ zP9vy&Td<|6QN`+P`#53B8r7%paL#RN{W?Uj7POt^H3YDmMnD%Aso@_-jQgtc9zAA> zoik>>H9opfQ?Qy6A{U!jI*XZYDHU}JQGv9s0}EQ>e;3?-CHHUjOcg_Fu%6Q#FaC~C zs+cqlg(n!+X4|_J;K}u;N>HBGv1|x@UT~6-h}BRiazEiem$Hvt2~Y`fBWC z8|uj^$MV5E-8&>}P=DpnTC%o;Rp1X8^Lnt_5!>+RdPzG55*j7p{eAJ?v?MQS6vvb1 zA_S%QIic>1BX20|w@6>d*dyM?1d%}v+q*fjs94f zmsw0Y4i7}0IST)Sh;=&pot#0@moMRxAS0!@k-nR7%h#n5y~kSsIWxGWsb&n<(1v8b*@+k~26FZ_Op|hLbH;%-Uv^m7JW}wrRBy`L=IBrM8?Z^p|9#W_dc~UMyTm^ z<+RSFDpW2gu&VDm7S>#Ix-y6T49)@xaXx#Rw4ysticcsc(s0PF<~fk$^B9w`weguS zSoyg&id^}~Lb&-11-!a0iyH1q00sLvDrAxXjT8)?$iftUL?t(7;hQgy5~JLudxQG< zwPD`8Q|^~i2r&cfT)&_B9CDgVI;Z`5LoRDLRWWSB5cod*?#~}-U+%wJKN4-)Ua^em zIfKvzn^A6$kt|7nLOGJNzR6Ybm>kkIC>#ZAXUO`L`J9E*7pVO<51@j`U`VTJucLX& zo}Bk1z~b|gmDGvuWh(Zw&<$Ke2cBh%>zt^ifqE8fXJZCco1CH~;*azr4%Nokv8ZA2 zcmI+br&(~^z}RsFt=ppT49BUOQS%O7DafdDQ5{#)1R_}{LW!q%;wK2mQ-0w&=RGSj zN&2Yi${qC0{59YrQNl_%C|&iQhB5w?Evhi3L`DrQTu18`u0apIr87YUZ4d7$@Zh@< zIDq!-XP+~w{>O?>BEgBv!QTKZ!NG(bIv7`8mp~iumqZ!7;bU9zfR;Gv13>>xU6i&e zsrizR`{VS}qG&KhLkottdl)F2Y0D9;!o>VjN%`}4h{5lUZv}!&dtW@6+L@l5>U^H> z&uwU92YoN$(k6Kl1|DADz!2?&ru>Xrm2ozRI8!m{jV=oscb;ip)P4U?z_yYJEZ_0r z?-3|-&JekWS>wQ`UX3~!--QFVwf5NL97xAhdXKsL0*#uGz&EkaUVvjptn@EGtyFwlW4C|f#T?L?y+y^zLyvqUFy@adrM*Y`hkFYZDYDxmhF)wR4B}*Pi z@7KNO)K$`)F8UeG=Rt^^O1_u#P8ot#dnMLyi}kw~#9j8k?}`K5*0(td0+jsIr*2P$MmIJkHLe8 zOw!5(4Xl$03)ea-J*^mDmojz235J}$XDU-h8l~lb6Ug{oo7dF(*8S#e|EVMI$hE*v z!>3G%!)9b@^mZq1gga7{;S3c%x-vU!NGczAo`Jf3S&B#5mKKz&jXBU5^2f=7kpHi<#7^WhST-6-Qj{E}tU&vKlrEBM!Tq6!53kX4yJHV^ z^*55xLkuR4ScM|oQzY5XC}!V>+X>MmwK;wAt%2Le4g8rUr!d+)B~MO7U7P{B^gLvN z`%FWyUzw{j&?c=o+(J|DMNx#8Gi*n!ij=f3*kRofyl)&Qf5NAJz@B9AM97zXG;~hZ zzq*$dDJ1=R;o=v*JN5`t(^-U;_zeLd61GL2@Dh22_+zx_aez-KMuv zXIh72n*vJUthoj&XpDX&j`q5yhX@QNdXK)QKhdb*H+(!WYleM;=!cAjH6sV4D3R9b z$ov6`6n+H(B~~a7Qnc1&4Jd!dfe4&ge{S9S;GQhJg6^jf!9rIn^SAQw4VYZHlwz}H zlD+22@#p;({+aozu58K>nNiDMGb!^_4C4+Gq&~`oHG%DMUQBCf->_+8s*tkM;HRhz zzBRh&8XpY&>KaTx8yiSl(3##e%5!W+OI|FjBf8`md45Qmjep$)=;`EI-Zh0VAklfpD6 z7rJNwk^b_<6Vr9?Fn zwqVgglF8$Plg|93pS;iQ@`0ASL{IT9R_3p)xgUDbzCgCcqnOzK^Ht0=V(AC@@VM@m zIdjWrG|KZh#(Ua5jK_C(saUv|isfN~&iu&}Ye?vU+bUP&QkMCmkwJ}}pH4xpfY$is zYPFoC3I2`hwvQ^+4dLZb%Wt*1d4ZU$MN{%GeK&t)0i&4cipH_5PaquRL+eLo5IfvftAU#fScvk~0u@77cdHK~rKHuN?$dGA3( zqs>^Y!9toY#ENdSBD|y3zCZBaH%}=bKetcOLbBgT3a4G2b<@Q+?Q$H5d z^MOAn#Z^qKz+urCqg$oC6jA_pvTK7_H^9X1r9bDq2K*KuNgi$IT;laCrc^3CNKln- zyPWdRC59voAY3TkZI6Z9voS0&g7tc&G9#rgsaeS-*QmQblc&oJvXciwn~QFhP**}( zWO!puoG|Z)Dx5EnHbrib%^lh8nRhUa^ilfI$+Z}Is!vJ?!JZBEpM2D&|!Dw(j zfEyG=UmCeJ0n+{0UgaDX+J}KpP}gDNP=uVFrA!f`673)Lf`Cu)jFpAvzBx$ngE!-s zIxAAqwC0}c-kqBL;0TS==uEl1 z?qtLa%gIfRf!AETa!DlIx-qcj=3USBiN9Bou~G6fnUQr7`WoN$)n${oArRE&&AYaJ zcw~(w8OvWjOK-sc?^|+!^XZ4ef%u+($#~xb59i>F`F%_XUmL#UAvYcOQn-J6F)}K|*cc3%r9|_)O`*+kVnknmX5-X8@uFaN6uc?ykrH+u0qt-e zpG8o8DGKChgKU9RV?~&^hNBFn?W5G@sKn)>3L;t1Z-t^v;dBtCW2CL&Bgw9-k{+sD znZHsH2G+dm-zqg;R?@_26%7}O(>q7Ts0bg21OoltF9Xh`0h87rkG2kJdst=3crcT9}@EdKFQM{ zzUZ!-VlKW;_^ffxW=_5*J|ZJzAu^J zSA4^F$JP-mLysK;`fdXk(n-s%JyC-^(FrC%eBh6?t(YxMn1WW*3{JMPumsNjA2cf4 zBbwVqE@)D~DGzJs(k->TSA$q-$)GOQxTgt|$4!jdGA14Y#STNBHKILh^VwIwb8J@BHE^GzY(( ztR~56vHJc+HzqW!wv*#G$G=doXPS--p_kl&k3^EI)mm{w$gu+P#VIwdW<>q5Hn%#K z(Hx^RLo;&39e_K?R>3`w0Ul}mqV@9#UtM5SI$1YtdiWCsi#2o6z(^w&25Q4ehZlz# z+(qV9;bWc*rF$D~QT%8@Oc8RJ#`5iCSV!NgeXr-4*u zRX^rpIhA0i<`n}2H}k|!ac@2R`Vdk7tu_NgD@5Npu)EyodIQSmk;70()2~t5Kzae0 zA+X0+>}T>|%qfWMRRj&|PWm#~_e7#I(dIV|&iUXKCt1=UbbwOc7hwG9k@RT4D(q0# z3?>|j&)9e2YYP&YB_QVoxo$5evt_xYr2evQ?h|>zyUUPZbTd3ut(MY11)}z3cI6`A zbZw{`K5^d{3|62Ec7Pz;ei4nE6&fe4@6gj9GA)JbIS>+JDniU;eZQp1Mq_U)@g@Y( z)^J1~)23m+(M#%&J(9OEmaBD(tG)NH;$haWau3iS7XiOE+OiU5gbPzQbsfX&yzzp5 zNL!y*mUMirweys&$Lca@YCPP#2Tl8mNS5Hp;toh_htr+KYZq(^`7J4QOYs^`;=|(Dhm@0po_|o@kP*_~auz=fX62?P?b1Ys4|`y$N3OTOa0RpWTiCoT z8xCOMi|7ieE-h%$*k@ld8;3PC4rgyG6w>=Lqhm?U6=kiQ2nSGqQjp?r2~C7EJTYp> zpmJQ(h3PdA@|4hv@zMpe52Z)VsL4NM)GInWj^K^zOTMAvGV@^yc&2|)w`ZxZV|*~R ze_P@#AKOWuFXONv10OYIxa*6U)zvl!^WP_)1F&YyVuaOG?1t{poQuom^D`GWD4NQ# zt*~q-1%}Aa&{$vMZ^*W-s@L+iVh^*lXHeOkqvh{A)i&`|`G%@r6nVabr4Z$zk`4_`p46qxyu~ZI1UdqrE{bB*& z$&zue=~?(j&LtAgapan<Mf{Cu5(JxLMNX3f4%0zf3{kiK5pEwj`czF7g2*XI>Q2I5 zOQ9cJ3cIHoU${4uH?#Jny!gyMFQX4joG02gQh&}SA+YUup1xbxTL%vHKx-kgZ%x+X zUCczA$rwJjvI$}TwwKsF_Ofj^4pjk1_DH>2xWMbpy`{i+akrgS%-TRk;wp`wwT6V38Z`^#!W zqM6lAO#k`Q18B+&%uiWz3nQJO11TUYE8J#Y?N)dlysbhoO;Fdd>rFWfXtL0hXJL?L ztWcj&lq{lI|JkJR2X`0Tx!2-2X)ogxZId#4vHimJXZbV7uBg8`l7+%(Q%<1i@Jx_L z1_&6Nm8)r{D1`1ImS`IL4L{G|hldf4~)pifV``qVlsiQj&f-N(XI4je4 z@JsuHgPv^kR88rz^4<;=n2B7MzMB859WtRU+I|;iwhaeRxoan48lyenQTOed!agGD zol?m)=P|&#!@m;$r?$>pfjgC6aR!`(T;4rP^>){Dl5xSypNlS3erLRE(oNRF@~C8R zt$<3a#*mss95$CRE#V{jWulPwL>QiHYTSLIT2O9#q}u@=?1mfW*va&FXf(LsQFAa& z*1Uz5M8(KtqpbG&nZuN*^f~*+HB^e!^}eLpbPxW}To^=xx1dV|s>64*VaVNwlCX9| z1t5V!s2S$Ix8qEZX~McyHP@uiyvHNBHB%JK7&mH^yNy;?-Di+u=}8p~KyD~mNTI{zgu z(qDF9(uVyABflj5K?N29DxNnqYDM0O_6ul4fB8d#>apIDAaMmeW~A@5(z4EVZcMzN zSN=sEU}YZ3#u>07?_XF85Ltvf>IhoUMKJV!=;1A}{7r66@=3H>w$#aS%`7}db^XsD zCr=-k4d|0Hk%1jN0R#|~_ZEgc3ivOo0O5818@L4px+otFom;V)S7K=LdhKDnM)B{B zVzK+Al!r^rfHdZRTW+hqgmq+0I*3%BX*{)>Dnp|wE;LK5}kC$ z&+X5zD$d|s`JNnm0B?@KJ-AB_P@s<%Rlw`~EBW?AK_12?p2+sz^YEbEsS}Fu zws(`^oORn?Mh^bG+AGh!>u@KsItvmrTekJ`c9Y{Y&A5FTWX`ItMTt0NuY*@zC-=SM zR?Q3iGK^!yn7NnK%M4u*KHuOOQvHZYBkt%^cVkC6lagOL2*iJenVU2D$&@tIPVRD1 zQji10VZk+fx-(z3*K}%BXOnGs$YY z7jy~V`pbIt8Z3slJJ1U6omfAB_itm+!<&uD*_PGT02NTb?xnf947&8DA4S0e&_n@R zD1*)7)!2gwtiaB{*-k`x=DSGGR(Af{hPu8h`aw8Vhc`K{Rzpy=#J0S(GXGn)o)X5S zjy?xmYfh^jk{vyxTxX{8J0~R^@9vD>$%_`3{f+85{%qMX@}z*b-qmkHn5FZ}FLJcV zQbzY#O1;E#E+)o?Zkp`Y3qJgU5<6X0Kd3sz(FgfK$!N|S=Q*V=-2K6SdxT8r?8a^- zx_Yc>hf8V3_e*f9IyMO5wFk z&G=4iWry*R4-2GwD){f(wCvf?LPEkG<~L%5oQnm%Q_Vyx#N{7fc1k=*+P2z{;&`n( z*v!Xk)H?;CkM2qAPp)gE*7C5l{x5sIJN7nos&t!`S$RQ|5(v5p>Ut4?lLm zpE~HGD$Em7vZ?!9Hry_A*yxxtc@Ls&rCAqPY%Z@7SJad}&}5*(3*-;hN&o}g z#8_grFf95*v&LN7ahpNSLMmN1L2X1n(N7mT6QBPeIv_WJz31wA#)(-vt!lt^UdEz+ zO-4qtwvAr@&cE4R=carNJ2W47z4}Q=@hP@ck2Gkx2kmEu>cYM$ z`qhM#oQmR^LkfuzT{|nV5N1KuxC3xtrYRGm>S7&E<~1LaNRw}xNx|?UeMIL;$|O1V zp~>B+J96c$QcmD{-Xt2*kLTR}$`a#IRVGj?AhzVanGO-$oUa&{uJ}4h_V?)PHW|D5 zJ=S;7iI14lfh29Rd}_u@B+({7(HnrFG=sSNu}*3xas*J z4n%(M%Yi(Kp-@t+8qtxJa4ZSK#Fp4h9v=&nco9BO2h$vlKKVC@k4!V`_h`PXCkdap zSU5}hVw?yGaZ^}|AO_JWa?&#a-lp}kXKZ~H3wmKnyH9Knm_M!~Y)LF>c;$TNq0`Gh zN-iWY1R*&4Nv3>wc)wP5wegV3b+ud9UB3zJ5j%gtnr-rzlK+${i& zl3MO$53#7(AMDS%srmFZv^;%y`I4y}$P5I6acq!ze{*heT1)G31#_o|FNe!b*fUqxbj}<)AH40vK#ZeHNZn+TCffU|wi`I?c;bf|L5MigpI=;m*f6 z{*$9sDHQXPcbaU|tpq`LZ?{oV;?&3!r!Mh_VDomWWfT&4QH;#yBBh-*&7|O;E|$M( z-8DANF=UZ?n_ZfPBC-4-)vU{dA}#J>fzWm>tE$~kR0M5 zRS1uZoxPbuf@4iYH)u|DN$)6ic^P}N?X&79hw9V;bDDN-p(lNk8&W`GutwHcxG^kO*Ol7XWdd4_ z`cpSGk;-;$(_O5VV`q63aLob)Hj^e>J=vfdUijlGW-fM8qDtVxX|=Cvb}shKoO;fz z_U&*S(TgZkRWIgtpndE3&}XmYT#b_}6xhg~M{K})U2(0ec0PwSr|k3zy@wqP*Qm;* z$nNlxqpzWYYwI@Qf0%De;Q2q1(6R@fk2mzyB#AoAreN!MqoTU(G54>#Fvq#*xn;Mi z0wgBs7FQ1#0ACcbmwkt^B2Zwk{9+BsVvx#2Vo~kZN@!uyP<2f4_>uOS;aRq z)+@YenH`7EKQYIHq%=iLI=%`y^x?6@V@xM{=JT*ZQ}uAX-HT?@_eyEfmg0*kfxeSJ zxy)$E9%|*pcvxDhI7xkDZGo>1=w|V9uT82+f3)z5tL0|&2Uny;lI*f+IBFr|P_=}d zJ-@c6;zygX)ByRv`*0Rln#oJ~UeJ-8dN{Wo(q1PkJK~mc=85<6Q)t>z+uY1t&jzjc z&ItP2!EfgMUIpTu8(Car|IR?9BY!-Jv5oLqwZ!YgrPb{;{~5E#;|TO9?gpmmK4`atG)uJ4bwk%G$KeDx<%_*-~q zV+r@{&UPGO)6i+=WJlL*Gan?My#{6JvPfaX@Q0>utSSnZN!(=2H)P4#L21-8-_>Ar z7Ayuo)6OaeLri-9oJIZkdN-enH|i|FHo)1f_*>}!AAwEi>^LP-xLWGAZ>6?olVrG+ z-+<0;h>Y|)9vcEPbmd|!V=rv|XwMK|X_JZE1!PrszipbyROod-(kT!31Ht~7DYcBR z3Q)uvAnXoF3E{zJ&=6@a0vL!)F=})oKPv{|V}f!&YP@HF!%h z_99rmtzCLmolA|$o}fy-PBM`OLAsS4|JRyyUIVwYcVPN<+b~_ifu0ELlY`j7)BmijQb161L@Ih5gK87#GdQ zY$@x3p+P2YnS3!L0Pt@yMQc4;gH$JOliMn{=|PeJ_O zFsA-83On3Rvn?l}L}yWMf%r4r=7r1TW+i%L{SJZU(yhNdgf^5}-7r`|5vcQp-%Hg* zIsy#Gt~J+~`l!$Lcq>+h-7hc-Ul$8n9z9uUz}R;)oho>I%5j2YLhr0IJc zWK8C-&=9^2r4iwu7@uFQ%<12Fd|$HTNV@WN<7$Kv;_5zQyDWQ zNbddG1d-N}$TWJSZGJ}t$IHix0GXEsF91Pw|9nyf6!%49X7Nc-s#FSyczN?YJ?~{8 zkmdf<$VORLJka6*Y0vVT7PzStet~5KxgU88xjbrKZ}p*{bZBWc1FeZY|G83@f&>TL zNH?0xB|z1&yCtPeN?F6xLcTi62*PP5%jq4_H@oL_Yi+%o6TNm;d?F#vA(yyNy-%X@ zOM1eYOM_D%_@o++yKshhbV0Z5{j3(jg0dH827y$FV<+_Ge!WYH!e+q2K2iNOYLBK( zRBLIj_3OpLX_upoxi^GQepsmI zq$;toLj5K1x~J8kiR*tzTY-l>>NX9=rFFJS~`&Z?eEft2e0N+Bpm}(I)Oo%4oEOybwblScE9;)rrlJ zCGh};bMqd_N(~A5%G)iN_|5%O&WG3OOuulA($s;}@}kcGV~qj(>q=)K_?@X-0+Q93 zNpQT3$7Z8Q5IX$J>JW}Px$FhQ)-(s#-7>FY?8B4;xkzhTSB5|JJse5+>qX1cdSGV9 zkPUN=*(t0HJEL|9o3>ndK{SQ*eScLWv%!>Ew{FMUCG7qIQ@fPJ zEDu|tH!UA$j#BTqVDoVg9QB?(Cy9e!^;KNq)!{IqYgsSZ$$>P{?QbF|b`GX$9_s15 zg%zI%cQfvWq^us}b%SYJeB44wL`f=JcicD;HXC;}C#&jtqMOmVFD^m&Ql>h)qJD<4 zUzWVyP>Yh`Km7-%oo;$bi1n$?Q*GNDD-7Cao) z@knY|zSPtKyFF*U^5fG1wRxdZyHQSgKIX6fT#~AbPJ0V%f$#Fk#!Sy5Vc$Ji#W@Eu zaEg6-{yd67J!EiPCMWn1gC1aTIlrD>dhMQNex9Uw+m>4g0z=)+&B~E}N+$ri@AZeC zpAkd)i7-0z!*bQ(?w1&768h-pAv|@Chw6`lZPt%1FUj?xROTiUasplFo(b|VifAm< z)edK$%JA1a6QwnT(S=C~a-!})Z7wpbrrx}YL_mjbW0+6dl5KoZ zxVFOxV-qtCD_&1}#tysUKo31$zB^Ihm?>>wFV04$r=!CiSD=j`GHd1R_De=5K_2WB zK))reL^?P9OG+X7br@Zw2wh$SuR`;bm$y~V>d(2*r`9iAffSi9SH4GPU7Ym1da-!xZ0Bz~95O@HhQ z5S5=5D^<|5+*e2ptwZT`gwJWIh0m!X z|FV!Y=Q~vX#bx~>YRRou#W703^+>+$CAdYNvNtayW-kA9j!)~~}5=!xouijn( z2>OoR&AAW#c^a?I%YpY-kX^i5wd~dJk3Gz2WtpjHyu)at=bOg6xcmeJd|{Q|J_!6Q zzxXz%MKJkqOq&ITP*WCVEX_;%_Ipvn?>b`Tp5|*~6p}|32k=#c&!~hkh4Tn|%w%frFSxo)F0)4m{=%fkq14aG3O%+yBiQM#awM>%a+x!!9Z@8zy1Vp(th<*G;w!s z=`5|6+ikN6Ece!ZPmotR#d^YLpwboSSjqMJkop=X{n%TK?&qY z-+qDN;Pk~jEuTe$+bQ^*lluuS5PE{&J1@NXZdIxpL$-*O{J})|hZfqjUDiT8?7Lg94=i!@d^fG-_ETS(8Q4>$J0y^=A1k=p;|DRbgpJ z%pN*5SpUK1a?_p3hh|3JXC1BKIdr3#!giby2=OJndgupvLtrgjhwqv=bmz`>lJ^i> z@l{rkV**reo5;$K0w!E#?pW}sw!nWxn(l!CrFy=e(>}mz4L9`?v8ZqHwmuVOf8t>-{Y?g z%WJ*Yn_I8cuwwF`r7mO>o-g?w5g-;%ojIh86?A@{O8`!6nM(@sI>W1iLzpvGqD;8y zb8ZAm=4Vd7sI1Htf($XJ5GVa5L-p-ntoOs+_4Fhh?_>Hj#7u`)4e~8E7mQG4*LL!S z#>@*}L^5F?OjPj7REBd6nO>5aX<<(?SzeIa#PH2Evfv_%Tr-zD!g}LF9ex=q1s8J~ zr$TCuzG_)x3*)S78tkIHkB=FRJ8K*Y27fqini$=7$HI7M(iEapU-di}^r0TlS~-+G zDDl;2d4c&aGCM6Y7?$umS7v$(UsjR*tX4~GLO#gxOxXj8&5f3rc1Wn(BlT1-MY* zX9C0#0zgKJkNr}L}GVhyT1L?wt0=-Y_r9Xk5>ntSJP5o{JElmH04jUw@Oj1 zdzQef4bScg?`4E8L&;jfZjW$zsJ5aIBx?Ln07DQ`plI4CLmmTk*7BG_ ztS$h7yQ5bjk)=_k=oQU5wY!s}$X$A!8N_v~ zK(+f*Kdu4ilJJp8%sw4u8TsU#}XJ{5u(M~vw5?)2DXESIZPI#_S{stuj514&P&>BiWf_X5K7 z{^UV}Z)Ni*t-Xzd6K(e5tjcd-L{8AkRvPMZNL_`ltNg8Q3Sqwa-38zq{NHeYmBe!8OCFN zr9OR8BDW^Sx0?*C%AIlIC07hV{Nngu06{>$zhC<$3sI!00tQT1=-b`_K&F-coBYNI zX&`u+!Kxtnu_WzD8@8@E=6r)^pADLgei0hUy6WP8pLcr{wk6K{(vNR7!qWd_rSM^R zwSXlU^o@loXl$&>-T7A~ZIpazFASl%lTrt7B6_Kc5Kv1{*f?g?`V4E02OESYVai>qoEg7_5S!^#_SDR)Uo-o}A_4mRdc6xGDmGWeR8yPziUP5tW(=K@&1Ct^d zyvsLwSjhZZ?1#pqmH}{$c7^*L5&{(Np4y5@`l`;uhR=4jYe55fvhTGV@rVn^X@s6J zNLcqn_U&4M9L1$nGrXW$<=N*+mvXid(FA>J-|U*~j;|krBZ1D!*&z^0#Ab3D=`?Li zI*1I@UOcb|ZvAhH$9D?TwwOe7eXbET8GsLQt!+M@GwRk+!`(RTG7J>@}xp5h$< z66o*C3HPZxD^{zH#YDqKLNL*fLSD4r79>@n%b^4B?PbS>gI9<1GpVmPK&F_L<5D&11#p2(KJ*&Iu!aJ0N^&l(q}i6e69`yY39m0Okc!m~|@fU*uhug&qz{EHpSs5L;!RbX0Bx%1;ZM=ENRzyiIauvbO9mqkl<21>UU-O5?Ouw)4r0ARj{wZXa@%DvuHfE zB~+}zI#7Hu(q1k~F=u6lzGau&B0dJ50)?~#8YgkV`DqBDwhJl)aBo)%d^fJdpjBwb zS!cw<5UywWl|2(*jj-Yb3lYYwmAjZbcKLeBPn)~}rnkqUsbuuBVkDo8QH|Zf%2!kO zXCW7q$vn{Ie`9o?Gylfih7Uy(YMC>I^PYr`NWZ0nT01rI~=WA`RI9n@HkVDF0lX z{BygTv3G`iS{m?X%eCR+d@Sl#A*)uOSZ^q6t=K0L(!Fse7H$6IDmm>NXrpLK1>oc_ z08?mCTa?Z6UemKZo6ZjyuTI-LF5ufQ9ZKo~hSF~Hg=F04%mK{{Jg{XijJ``yZlG%= zjWBzh3SlwivDH3zB?FQ?(Oc<;t+@Q2JM9a?f4gzs0M+5^oZlPeUCft!z9;&??swXN*_GNRc$kH#a z=E9Z+lkrfBUl1PSoJ4bb_$$uDiF+T+q1Z(}NjOzErslTjbY1Y$sdnWoV4_}LkMS8~ zsiL~a!_0uP2y{&@0b?iUiM`+gM0=!hDTNUNXiTIVZUnUcj8@{re}`v0!#)Y{uP*Fj z`9gw8UQb1FxsZs!>x8qizd__GmF3l*J~rI>dKHK2tk!K-uLf7)iy-Gz=qzhD*~0@X zAyX%V)KRL&3&{`=hg%>L{tV;&5&!@I0000000002Q}{WSmz}pDH+{fwf&z&ipky{8 z^fZ)rqaXd6JS}Qtg{L8r0w35!udwc)7B+IvWT@geP{#}yE9NPVn*HN|q~;i1MZl28 zp7Za32_|{HyXY?DF%*q5{RC#r;|ar@000000rAdgRSq5-!?Q70@1KqPp)ei>hQNraD|*WMaQjO0-I;hIx)~p4}oy#y{fx2+ab_Bb>**I4LM@)+r~dA|K5@^ z4W%eP2~57jM1g>9yv`#F?^+AY9{&2Kp@((&wdSOW^QN-$HkPfx@NYlo!5n&_n8Jr) zjH7&c+Vc+Qwe`*G3Ta)m^aBpt8+%UQn2X~MXGa9QWb_j!l3ZFJehICuUz%@>DW@Vm z7$x`$AnmMv<)61K0oYkuuejeoFIfS|t}OaRB$y6T5Nptn~jE^98r%}LVOPCf*-h7hJ&{7Hd{Q)kN(6}ZN$m<#@dhBXwbvD6C}a#rI`<$$^N<5_;{6mJY=F zlKOUcSc0vMfn&Ef*PDZG=(t@ydn}iY!w$>{BEEd?w%4XE$DqIcp{rE=!vUFes#1s- zlgC`biQXSh+sB>H>fI3$q;f{zK!9``OjiQgWGcWPQXa;nTi4z6 z0+i?m!2v;_foZp-2xl8fdboxmPnWifHDfVZ#Wm;DS-w_*j%vnDr^z$GYi~bT9%U4i z1{1<&8^am0doyY38#(y(V1_LHPv-+l+MsPSERyQ-%eW5#M1LtEg$ytH$ZWI4M$w^3P%T>KQUj7Sp(m2 z>SEdyvEYu7j;S*~Ny7!1IUu^AU!qM>Q*(Jsb-CzG3q>u-|6tpiL@7MUnPs?z!=vS^U&>^C0yglT8KSs#q2<=nM}A_FR{xmpM0Gd;Pcj)G&- zOCELR#wAQC8sj7NCP{9#B{&;MN`j3*AARKsibALym<7kS@~GDe>gf5TP8R*9Eb?-> zc~DzN22rmof@q*0^0a9^>NFA~VcjYM+7_|%Ui^SmXw;fL*jh>}b4b1+fswkvw+;om z?OX6%Vl)7JIOl)J0Y2rkE4r7I`CB58tU;Kb>pYc4pE(}cFy&pjmWwWIv-t^XCwRf^ z=FGPbn!3qoJHOIZ3Lt0bC*AH@pE(T^NUquX>u&o#g@=)T-*A6+Q+>^_W(-jYw9(zj zM{N?NuJeBG+q=7lhv3R6{v~?jOS%#`-GBVx*tr7_bdQK#<_r}B&)@-u^h6(HY>{Z5 zaV0MSVWnGQ!loDVSa=x26#7Z6+WU&m&KjWq8MlPdLi_c$nNBaad6R~&^%j{Pn?19{ z{q{h}-lwv!QWp@&tsZ-i1Ily!*@}7IH?myT#rI#KOmcC%8nHqiMR@fkie0D@hdJlH z=1MP{H}J9W5gDY|75D?(Yn}Nfm`2BskRSpb%$8$uxSq3S(A*ehhPvX*9S}Td=n{&+W+z$euz~FW&}P5+_Et3tuJ<*o9r0 zmK)Cs-rT!YbU=Va`%J^|ez9#%ZM98FS59u+4l<|EpW@n-`FFNHh&m+^7l-S`qMn5* zd)%ZB#1~JwJ{|aoEoAG=TFk+d7eCs5h$@wH3S(!9)=B8#qH_G1^!2sSvkFw9-_ZE1 zFX1}!ec=q{yPpEJsPHrfTbv>mEU#V>CF^5v*Ne1d|G#-zndeCj6ug)KCZDA3en^M+=`l7NbP-+JJ~i|Ov6rA$ z95aUQ1l((KJr{@xJoM)g-}Pie{`9=6GSL<4is=uefs2RZ12dZaK#cZ1hB5V8WO%!Z|8>%GMvWNMY z>3;FamrtW;BIb>l(33-ZGsY+x;Jtv4k!%Fti`0Cy+~U?1=6s@nXY{S%WY?}vP3;4~ z`3;DIo(s`exxEOT<-6+F$-M)?HJ^Y5>o=0skUTU@a^VMbRaZQg>~TZVgQ9f_Tw8Uf@0gyN*iWToLfUup9n&sZ?kIu zD7Lwbqlm4A#l$NP@df6cO_UH^KmN_rJM)J74x95|0nn5W zh4`Yj5PHq%uJ2*aKT3A@J}T+xgb%)4rGjD`)P?#!xz6ULb?x{5o6GSxtT5c>Vm?D> znow6U{j*$x)~Rju#xXJTv!He_yb7XcC50!-%Cdit0?Njd)hrJ3D~VPKIrAa9%EY>Mc7Q1^a(nn#_1g&;&X`C^2-;h>mn9@7ZF_ENN9%k6`w?+KM zVXikM5X@8suJapcCgfi5e~b8L;+tHl+{*!?>U;`hJF;N7LB4@*rGs1}0CRtCwPQQ( z;=gQxN^48luxed@b798Ql`sI>sgq*S5;*PKjuMa5QHiv0#^1z*mpY|X4iIYyN8fU5 z_*OT6`wF`qs}k2E-o!%wRb9l!O=%^@wD))QtqK*n*^k|rgKW!Az>YETk!^M9;lM4~ zp=C`BFk;}|G5%AjM{WBK-90zD6knrBp3u?VMHHxrj8w$|G(|HjW0kO+efn|`och*z zA+9=7Z2k%zbg!vA=iXStus)W~R>P>4RsMF8zxQb^lcr5*=edLl^>#rr{jUs_i*WZ2 zeCw&wPVNgCMcBhj5?aq&-DI4X`wUns2tI(=8A>AxOizkMlA;?jzGS?X*?a#PThfog zO?nIP&G-#f2o_(}1A{23zIZcI6^{=^77@~AE<8xTBgXTy1S@TWfUNO*TZd1i=9$?_ zpv*AuNS6+Zt7e$W1>>XDF4RDNC2wS9h_Ejr)$&%)Yp=ZTEl|kXygaJFEVDwp-#=S) zkKIxq_hJcr4q~`1PSBeSQT9Jo*jdJpLm4tPcR_ch|RxAA*|`i?9!n^79z zyEiHK`t;v$wr%=Lsn1452x9&rS6lW@?_PD=P!3fo^et4Cv6HhS2VAs9`-|d$g{~XoqWtY{7-V84WsWz^7=OQ?v^4( z@v$qVd@}-*03U5|#a@9vKp*JlF}&YM#@<)LXyrf-B?xE^Vc+sZM9Q>?aUDhz&a~t^ zza)3n!bnT-ObRQxY)x*_wdrPQuJ)Z>YunPrhIi zj5dHhF+JJLoJh~g1;*8qL@gG`{ZLwVOi0t?JlT?kNV{BO=e8>6NJ&_i2Zst@S*?AI z*3xBB#nb1BR+z4|%KA*`j87@BL@V&E0XpgTcwMt6)rhiAk5=H| zNf;2;)?p(4lf&SAVVln8U8&8}8)ZniB}Ek2&6_a$;-Y;a&oWHT)KFr3K9f`;vg*3I zax%45_j&cilIU&!3nZMt1U7^oVEx|VVvbyoB=J@sf4nNzpfL6+LHP#bql3&P;D`?% zBM9?KwTt?#vtPV*%FF?C!r z92k5}}E3XW8?b%b%#$$b&VHIYX&zS7h&fNz4Y4vIrUG_s z#1l3=T1Xv+>5w(4cv)j3$K*R=@A~zISs9V!Hz4qutUlD_PdBP(14aIeWv~RK6unx6 z4#(3EN{?qemcikokV`;TpNv=i6X_EtU!+=Y@wwvU78DV)C+Hw1-Xrp}OZ#&HZ(bZ+ zFRI% zhQ_Zg@SWRPJ&j*cDatSj>PC$yQ3y}%7@z&b86)_tR;SGHS*seMIG|{@1#=__ov65hFwCZW%8(N$E9U*W9)|{s*LXIH zgNUH8*dyC%H#42=2DzttQJo@p+`hcKZU!`9>h!UO3?zJw+CYQHdi09f#W z7yU&{2|i04hGcxSBL~jIuy9NNEwy7}H;Rk|@kF-D4eipFEBkJA{6A_9V5hT)tX;#( zcRz>Jdj?VT;>zeAP_XM+^5PCb2*Xauy76%TLiE?mcerTQ1BdZwj+(@`a&cwRx=>h_ zO*=q*X0&V@;{U(LVcViT9wBlSQzSdTZXQO*Rt{8GdPwMa$GSXAAaBs) z1(Bue*<5LRi$^tfqIa?a4dkL4%)E7qM`B`M4QM+pQ)6AziqiF*(ACwRtb)Hg%N6>i zYpsi|MsRV|%8ULNva_SEg=8MInW38hzH%D`Dca_N$mx2O#k)fnhV8h;7={Q_4;aaK z?y>~ptbWkn=0soTCiUSZyVcKL!cLoKXz9*UUjoF>08DgFsC!2u9KpwsQ)bH^18)#d zblBby7rTWi% z_Y!Z_XXsJv8Q!RIfFAcG{-N1u<2@}Ja55$qy`6MPsB(Dl) zd=`@E(9m%fV7t${tSC&h$?y5ccmb1fn^9NWFo|9afm=}}*6}f+z*6nwU)sQa&lN#oI@A z`~P?YGDh#=0WeV|e)-v68is>jKbsdb7kySH;otp6rmm`@O3aOZ%H0V-0i$Vxpwm5l zTP5;drQ)cU#Qvu|UMT7bo+$Q~?8cq*kl@%stM7AGOX$d;!2)-(Ve;jh@=2|~ML?ks z-qhvRMe>E|{Aco|X)LX7WaB+wROwCp#(F2V$+xKTa}8LC#~zGH9L)W5`^%(<6Yx+I zTH{DQr+{BHQozo&w*l|>jqxmkADrWD-GO! zXyTaGIdzg2hXwlvH2!7u*```1m~c7PvP>hm^<+Sc)kT>vRw*BiopqFg--SW?b;h|B#`p0^Jfe+7 zk*aay+~Q+!bx>k35}hu1mH!SjvcI9gHVRnqxr2_Mo@CjQ`4ksXVkWZ%(AW4s<5}!Y z|1jBr16mpHuZF3g#x>91?TdQc!fb^xuOh<)-?3+ziXz$*IX1(O8g8Mlw=Ub86kJlh zgmM<5NKv#ky9ytqAOaZ>g+*^na@A`Ykua1GF0Xg;pA&+H5#2GNyelr)qd?JF3 z9>GWKVb-`f62-a{(K^;PzFD$E zs5DsHWMaZ}Abc7>1Lz8XgpR*EVcY5k(0F=cVVg8vw|1&M50BOY7^P2_(!N0hU8#P> z3SkKvC_C1Gnn1UM;hX$?6zE}^tX;%{rxvqikQXxJ5K+k zSM-&=7vIYtD{BrAJn9>5X(&{7%p(@Hh z+&_xJ;dib<&iRf?C7TrBzkwtIKTmS^{)&zHzj6=4Q42*8Cu$@6S!_rxIH|qDC2CQ=?`%z`DKei3S#77?Bh6mNmyol^xHSMfV?>CB>rK3FX33R z#)SflHNlSq*`Mz%TYsJ!*;oi$CHic20FKyFkTrnieY#*~EN1uzP6#<+7cA!3ZHNJs zGNU|kpPA{g;UsF!IfvO$#j^~|N4IvBIA!}A@CPktdM$bAefg)!!0%_%t(%ckud{pw zD6#7K(N7Jyf&K_}@@5sQ!sdsdSd4PN4}GY={}|_*b;to;u=B-MEJf~t2jGu2OC+AL zD^t~$4^1e%^Yo^`RZf9tnAAirbC-ZSOTT#cRph5xVlRpeQ_MVMup@oc$xUN4+m&y; z#63jTV2GRv3^5(_EXw*pZ;8Zd+!%< z^RD%86(&aLObSP@sAm$7j-jif-l52qelpGWEb_*f(+m}k(X#entH}Y6x}?CEp5@4# z968C8qM^_?6wx;t1i`37Kz%GrerXIxG89`^QR_K7=qAg~NZ0%zZ?QU@o0Nd`{tO~$ zfY70cKsG;I1i$wLk^ybyS9E~C1;apt08#atQfulUDebV9Lv8F4^Aq(0M456z{y|$* zXw|Xrz-R1JMS~QkH*m4^D&HsVy|5!0OX*f&M{W?ruoQaBQ0p4KNR^<6r*BS4{Dq+x zwWm!jUN2;c3V#Ps3z-b*kjm@p=Ai}!>t^1HP3#^PeA44t7DV-5&wlqPyETcqs~I_Y z;v3Tfw&HB8MLbCqhe%O#p@XEv7l~y?kO8uBeAVJJ%grz_QMRBchdX@ce6w8czCJ0g2L~ojFKr~Xwdv(wY02s!`s-8#{`Rq_ zr)lfX6awe3eo=vj0GQj5oitjWZmv~2nk5oRffn^O$joz**KE#KH%<&kDCsG z;j&)po^FXRzts*IBCjdc`nKE_j4eo)=rFCx&%E~GHR?laKSu|Dn(KwRMUP6o`8n3z z)J6ucT=1GIhUgD08k#I0HkU`|@HKGbv$X2Bo+@CV-JPBhAyd7g0L8?!UatZzJ_y1~ z^k~TQpL!|T|C7Z1ONZfK_Xvg5|8-}a4fX)R(DAT|?D}kHKvSVuk@Y7!l z6D8ig=jC?(0nD6+=Y`17%v7O*sECP&kb5H>NXOkgo-)8N>&8@zsgcK$+V@hwj|!tg z%rKK#3q0uUi^n~WuGuVL)QD93nl-YB{*s2{YU0+-RT2FQM?4pB?wQT%_tCCeVA%U+ zllje(|3UP11xG&>3T?EeX7a_^U5&Yc|9cU34N3CL-=g{}Rq7funyJ}8v@ffcfy;U# zVT~Uau!Zqk^J*A|c4T)a)jFcKTxw8YBaz0|OI0a}>i>#^^L?6A^gR zyf?zCZ&z-KO5rudTaPWD<*L8%9*|!DM|I-{H0S$~c+==_^d!Z1?|qAxUfsKf!NqGK z@Kh2K510cd3zeUOC^z+$d<+mc)p(SC1vvKJ(Mg2JrY7#)?7}}A{;Z;`s?jDTZtkSe zgBncRJl+wR+nJDlTBk#=C)s5m&XTh=d8(p31G*s*#a>Rm#p0VRPaby`n|NE{aCk+t z*Oh{GCm$EsT{o~xsOgms>q2CYTKDu1r&q(RVphR!oF@J1FLhA%$gyfsXnZjLX>4}-Ysb$ZHM*IS-i33d$BfHB}iw(UgtYa@z7TU4-cxhvVtc%R!prH*OW8vaZO_! zseW~mjjl zpQ>;cVm3b@T%E%g(e(VWw7bW9tHvYkxB-lu*Ij#&I5TN<31LJadsZUF!ekYtB=EdC z8ZtiEMNqfi7I9DrSCh>oDiy^HFomYM6wtC@7@c-uDjjJmC?4xAu0noI+Ay$umlH@; zVY~>s4&mN(017y_l_+GD1#_2p2ldg~oA3$${!3wX8RW8ZYkNVBxZi5rfr+iq9kbFK z%KWiwUX21WLt~1w#uKe2TUgTtG<`v1|2AB>UY280{xWeE1Vm$WKbw`z{G#Db{7B1H z?1siPUHOKG(Kdnbn|9n@~LZzB-|K z1p{5mj6CJy(^5vl%Xd79Tx;viWdUQlW`$w7^NfKbsMt(uObfibrM^PC@;>#G?!W*? z-=v`NyUE+M17`^XKV82*Qr8trvC~@1rwb6pJm`tLdg*YKD2S_h1}6E_q0^ClOB5iZ z*$|xn`9PF#b*Sw8XEOGxR1=|?QO(p$^qm7$<@fanlIa{>x8HcNAC&8oR}$Y3pB*3p zW4x>UU6NxrGfQEZ^}7(3vKi@KCDf_8%xlo2POXMWxI^ACXQV*xKl+RJ^N1U4TrFYO z#Gis|2+eq7vLITVx)h3|d)Ro-Zitu>S7J83un$2$qp(67ZM>7>GFWEJ2I_V|QcO{I zH=aWvrIa;O7pGGxcP%}FG?F3f89yxLj%C}HKYIfvWOW)=9dpU6Y6Gzf; zi?cfUV*8;$$55@SkZpWHytqos-+2+eMA6EQ0sH*szz?ECKT!{uvE}f6sf1{H!7F76 zH@lLF%EZ$qit2ueyWM&W+IKnKyTKc68B2;XhCxsHXU1{d5FSy_GfmvJqBt@(!$o+J z>nhb7fAM?jYk4C?1uM<079~xP4?rc;$h&q3r@Q{(ID=^pl*(3EcQ8n@xl0C`@5D0T zwYM$Y6R=)ZNB%wq&*4I0M1$Nv`UPd0&ySjU?QhL~h$wd6hWmLlh#;c`-{_V?6Qs)> z3nPZ&j0th>JG<@qSqEjmGlu+%Q?Iziwr3iO4V}t7meQ|_ciRYE_nEr%%u^#q zD1q?mwxA%Km(D}C8z;4uuR+GXHRjEi*Pu8>x|-!rmF0+RPM2H#{qolb^nkg)zMn{$ zW9A4-TC#Mm7Dm52Qq&6O6v9-BV~8VE@DO#t>Ef1lh6=|En9Rr?n!NvEc7!cI>z<=L zSQIMEWsR3_1)}^&>COd0D54Bw{xC2=yFf?&qLI&pHN8&(@MWQ`TqOj4!@lkRz*RgZ~dHoTMEhEZC7kcXU!)JIr?x#7>zb&$5J^P&daLwQ}G ziz4_}|6~n*TUfGTEZ0SU`t;v4jx3hl#H%cs9#c@Ui1XFeYH?c_#ZW@2v08iOjn`%- z4$f+2SfkZ(>HxO;R!3V+bN2Q}n0h#F6jIZ#$rKcBCe;%i>lbd#_e8++qrpfbi(c!Z zfi%|cox7z0ec#?w5;z2K@(P#Gw#{AFLfaH<2o?Xf_-nLqn7gSP_mX+isx?Eg(93?K zCCjKam6>j9NE4O(8EL2ku*_pHbN3S}YK|qNE{tn_-IqjMp8N^@2V<-+f(W4Gbqn0b z{|=mL;7Z(Ju-V-T1b_fky^6T$e-oXsUoN<%CIwrX8iXBR&I@lzw9-`{S2mo?$({Z> zIX0d#u7EObQO(zO%LiSc-(^+6pVY$HBN+jaf{wI;gh&L9q48Ri7#>m{`XZL2Es2%~ zdO)3lYcm}^H$MFYXewTR2HyV9LU(bwP1_vwDq?A)!WkXVD0)`u^qVgUuGS(hDqA_?%AyA+@m19-Y{*=+!ouO}mW=eEZGsagMd<_|JQ zkpL=0T?ypzwAtleeGg(cc4TyXHL_QbQ!!3W6GP-z?C7^F@a8j9eRahzvnj_^Tv8MI+bfLCxT;J6N22bMS##U;4%b*Cm9lXvcXfB7=Ku2(~bIl z20ZydKS%t0`T`KYkQ5Jt`I}#5;@I18l}!2zeDRqS2q*3I#coKxZM(2XO*+w%-NZQk zc)ZkzwB_VsPBa?LEG7E1QW1s|_9ZcxdSSk&{ExX$ZNqqK$Hvx9LUQ$Vhbpk1`l#A@ zo?K-Z(wt2pp?A36{UQP^}GGeM9VX_x&xTFX*K?3 z?$!l~%Sq+SoR;~(iCidp_gY!_R2ksh^tK@6gsn(D|00 zhr_iK6SA@&EDck(A{?I8nk)%p{q51@jvQ1{F_D-e0sG~bcT%i?DJ!vRFyar((H}en zs{Epws2dvOm2-|<@A+3c;5nz7dYz{oI_dZeapNMbeaxdc<;f5_FYTa)Sy1`rpPV_V zf%pfIsBYxNT*$FBovv-LU4Xe`Je@}fg*{=>_LYb<|$CzeFl21_=TXBMbfuu(BLC?>86BzMxXUK_0 zIOld(ltp-F9pHv;EUQ!TNycel#<+PUM^6MbaMXkf)!brklM;|3Q8WTu$~L|OE?fTQ zBoLMxeTq_>PZr3xAg<_f(s_sfMm_Iv*eMnjq9K6P)c;W=S(262H(jf|Nxnd>{?z2V z$fz1*VjjfYG3Aqr@enii77gXJ_0sT@i?-p4D5~)mtl-~+p$ReOU8 zwR}_n@&q+c$iMnqJ4mCQ;W>;3_(ob@=|c9eYM)uMz?;Hm!FUFt)EBz$yyFg{WL)yj z4T1l`%C9K3N+c)r^tI6A-U4W74n4kpUkuH(l2BI&sG$+mjuIEQ$*oCY#VR*nyGTrS8)GWyxjSH1#&Ph)^pY4B zii>2|`DS~)UaT*n`#a?|=L{$#}?*2&N zX`l?h1x0F(pCf%23!yLH0Ma0eHg?@E2RdlA+i?2SiU%K(zA0@jQ0K$yXegf#oxE}j zp(W>^_O-OoViXTVLB{Xyn5$ly)j-JS5lcwQg;LV#@?AReNXZf`*Z^8*FvWIcQg!59 zJEUP#(%nJ?`q`!Ni*|rkReO;T(negl)~SVcn^ms#mw-5S|7`YS;a@CqWnS(r&ujsO z;N>NC2dxj!N235{Wo&Ebftf|^M0UR@68?iQ+n|~xNOX=DxHuhuu? zi0A4eQ@X6M@gGT~6H9ht!S#D1Q})-pBWV&mSWjhiLAEhF7#FVEW^FLF4#uk8+82_d zeDIrp4=jPqHyi5vwzH?wJXAZ`qIqp! zo-)}hZYxmrp_>gHm6(5yXIu?+_mxv<8y+@MeR1SO-8$KbrM+w~1Qt*1(Ez`m6X}Bb zUb*cA^`lVa`P|wlfs$JFDCYFhShE86$T5)#k!z36t(Z@|3OE%f>SxI%DhpUI=)zkPwS@b zg?dA&=mIZmS;b3-t}HPfN*2_kUVu|QFH60)hHgK}t+@T%ZafqNG*pGZ_Z?yfjLra| zp7VHvp`RiI{j9*N&C{-rjWVlbAXy%%U5+7NvK;N(H6m3SSOhGoDIf@rLxbsyGXKkG z7iKC(t3GIam$NezbSFl6P5UuaasmIRF-tMudX?+N<4Y;7rtkee*VLuGYPc_8>4EyT>K7Q2C$`= z7Eq;t0JDePYevvl8qDbbGkmRx205kXqW@NN*TrOJPVg~`LQzV9w+KeBm*%ZRMfwW| z?E&=#0ZO)yX4`@QIxm`uqqfwnYSyRb+8RYx%wn=tFkuvI?q!>raZ+Wi9|8{G#*R9K$*X=S6GRIu=nCAv(oQT0QQg7fKliE zZU1$r0_og>5nS5>UXPeer|A;qfH5@`gig=gQ7=I|#&r4W&6IyM=3jt)G zyWN!dYNWECOjQ&NXN&D_a0{WV6tpKoJvVF=FU@L1=p2nMy zk~Vme^ql33gS@ZTgE%t$pixL$Ep}rvdd)@f0q_SWoWfrV$dddmvqnsb*ju{DHv_WA zeVt@U36nE03H+>bW_ES1ki)-o<)Pg9Q!LRm_LpD_Rz!fC;?1Hh{V*nlb)CD0y0x&* z+v*>!Rkk7$VCBf?Fd;IV#IL9t_J}mE%rIo-)m_WdGDmr|g^GO?Z!RBBuJ(ga5 zKwDgAssaZgf=tuoG;9)$SjI;r*29PTLJ&432e>3{-X9xtC^|45nC@OMZVZ}YOex?2 z-Nl(0g+^b{;r`5z7sGA7a&-oc^vxEN@3gB9I9e>-U$tIv=ws$SYy&k6gyhHtQ&Lir;2_<~*fxgAAi;liVWb?_b>tVw5U1HWEyau|{3cR%C;2FT&TaL{ zex_Keh4MU!fx#?v#A#Kh@*YuRzwnm`92sc<*r=R_h;QJeeKH2vHnu6&;u7i0XcD|sij z=lD&uYROTVwnBq%wJfY1AGjbVrUm~saSKrb+Zl=UAHpi(Tp`40)tkeY%t@f=;B){D zi;)_IGAsP~E`&Ti&u2`|wa%~>aoi&Tt#DM$BUz8QHfJo%Ddl<+toT5UcFVsE$y&hZ z!T;TNG4LS8Z44;!CJ>p>x0{tHN; z>M&-bPxKPbj_FUWi!xphFn*;X1OL#iHyRkcP0oiSuAhHUCILH2@t}O6tud5D8{CU( zAO6Fw4iLP8yU2;wK@xP^!G~RdhsjrD3e(IRWcYRVz0WrkIU*F`4iu%V2&2=u#ARGmzP|=8_7%<9Ub&k5~T0Hi0csCOfBEm-A6mun-?}ty3F$ zI*954;hg%wYWU)Vs=+7x2?F3B07HAo1>TPAZVO@t(VMN$k0qr?54u`^&5wT!aM_F1y`z z*j131m5KN=xRs^fFk^ZE2Lj{%4LH15i$oNwSU(E!-{>4tCJyfQKn{kqDd{c|CRna6 z0;P&AJ-JMcdSfrPoJh)f`d`H;j(GE|swfIJBUcmFG+On(XzWF~VG1yS&BVm&4e|#i zAVh7sLUr(cs{wqpHVR=IsGhB{m8;{W*VevCyu)<-HKCo;y~dpngFYgJzOTExf^G@* z{Y(`t7q*C>6b=B0VbaSGW3nqv-7{4RsMv^sza^e>NFaGj$m=~vI+f?CynH%6rX>RE zr-LGjO9CkB5V;hc&;4QKh0LXM@;7at575FP4B zy8PT}QHT!Y4_Lwr(aqVAgjJj`VZup?1}!U$*nkOsadAS#rcm%OPwABqtwKMW+-9@k zTHm;%Fb--AFS#Lthbeiz;jKlFJ${PlnOo0>TW1vT9BWP2*3bqb7h$bgKtl|%`s~k8 zo<-Yxac<1W-11*S3LDoS3oaqfa%Eje^O{DCT@{+9x?Xe~{`0SWPnHkWg}ub%Ze)BS z5-VhwXV9pc*c&1GY*4Bm#s%3NHco7#K@y`R`+0b^P*iE*z%%ZW38ABO-7<3P)xC~I zmbxy$){pr?ti!s>oY3A^5t0L$2m2uIVVxQ%wF$8n&TmOK9eLV*Xh_VNd)=i~I#Xe; z9u_LfFl17>``MUIpKo}NFCPR1WN$b65(UzTPh;A@u~iXsDes-P`|<4i=n3a6{@R%X zi$G_}o?D-;9vfol7vD)C0Rq^VSF>mD98eLu;3=9-dV{#3>z3 z8xt11c0Y8snot*ykWU4eGh&>i#)Nmc}! zbF>e+R)r{9Dfr8-nV|4WeED%f9yLz<>}w43YM)ZuR#dnT5nKNe#AV=~QzmC3TyV-& zu&XP{h%NUwhJJUE;~tx~6Ytz3s-jt1mZ~k)%Z)hvS7A{jeP0DE7ow zS9$n1L}5-NYykjixBrLHp@w8F{g8iC!P=!r7w(TpgZO#6xb}$@!IHMkH8jHOh|)>9 zEUy*Mcu`znP}BLf6Y8Z^`WJieMs*2VM56hGoizFfOZp|B96$_E4c1ggQJ=H9yFRcMS+#6A)uZpPg26!T*OR6aV!xW zfT0sP-tv8^>^W=}IM|gZoEGlY4?}%ak+YR4_y6G~Nli=Gw(tA^9>oU0erwU27fx{1 zUE9`Yjdc#aBRR_OdN~4B&+K5SyAI}X8wi8j(beEV|H?76l5+Ea>_YIY0t}2cW|Br7 zko+qoO}Usw1EIYv{o(Z2dHFdSa{C{H>&@W9-)gX_X)7IMlH<5=RvPMz-&7YLkLcjP z)g#THB}$yPkH-n{(4jX(GH(ck=8*&P1%u&$20`Zx@h7LZ>t`p#?4v2M*uINS-?`AF zNnpsmG;_EPAwhOqLoATHnJCTq?5OJ4S$ConVY=$#ph(fIu~#U}Y@dKV^&RTvw^n~c zjepm9jTYW~1~0F0*7`KAjpQ7<6^Ss+RwcLqWYteEyW2>SrR5}(@fQlH_9pKZNbK*_EI0zXPs21Tp6@?7FCD3 zQem|qi+MC8OH;*n@m{q}#Q&`7hKCdx$pG+URnFy$sl+q{hI*9f4=M0@`doZc0!^GF z*$3>-1S_@u1$NufE)(+EH>Wnqq5Lv+DOJ|HjWc)Ts9DN#TX0oU zuYSQV#McK}u@<^NWn0%Jj2~A7ElcOTp-oAdJxAqM+4tW@&0sYfHaa{U z$s-o!V&Qmzeqsu73HY!_`%+OCV{O4T7D4{R`E)516qm0$9Q+~OWy0C8= z6Y*f2h-t?4Km)IX0nqCF=DOyTPtZ}AFseqchj$;T^jXjTPucLqrtwamlrnN>M$pzY z_POQcPNf=vFW2iS!`mR|I*h4Vbt~#1?Sj6yROH5e*`{$3pBzRU!MjnP#xg)Ng3V#L z261vTmo2*n9O^dS{3o_8yD^fHeZh}TKlJ$8y^6NkV+Ag>MdO|(Mz9}JL{Xe-4Tv4X z(C)&}A(%e2$HbV29_Zu&CW$$5#yJZ^=1n{qNJ z2-1rLy`ea7xFSyAgNz>7m@24P2gHI=dlZ86*gN zZ5Z}84X&u_!x@3|U4eR=?sD0^~EEw$V#rT{yW%$$6hR!g4jFUQqWB&%}e>zLo}V|)69wO zPNqd2op>|2GMFVRz@~g{Vc6nX^5DM|)gKD6)uP;*7fTS_O&b>H;SpiE9ZH3W4ftxs z4wWXBfe~rnQZOE=ZrP>62J|elC>yGewAX$>Iui*h$Rhd2^K@9}kiNq2({6h+Oz}M8 zLrn=`trrrxLKJ6DPLh!Nv)$?pb7gIHW<(D^3Kdz&qcHS2wFz)vy2HfE_ZH?g{pQcG zHakuowA%hC_*y7e65tk~O=3+3zd&VnjP!LHky5Va^n7Kc1y(q1UC#$M`tAX&?rTEs zeU30!-G%~VGhj=RYH%vskt7@l1#+CT$zJLASQnHogD{j!PAZF?S?N~sgZkZq$hlX_8=T^RUS_ofhQjQ?ylaftahj8^-Jh#;>>>ri4lHLW(dx~qZ z)utY~J8QQ>lBUmx{m0^?v*v?QPur+1&3T zq)3CEv}q~2UeMr?vr?NHuTO4pPQ79?`z=G7LyfdX2r=DLXL<%G7EhRY?dkO-k@Lcm zv1fLG<}jEl`_+;n?2j@U@U+RPu)MdOs$8IA3na+q#~bnlMy94uzwbd3fE8;ZL%Hkq z84&I)EB{+}@v}j=M+7daB#9;l0$(3_O?I8{$7-U{?a3dVDW^t$`F<>OW}R~BCKb)8 zT+A;4`0ncZJ-OoIO#vfvR^i6)M2{6kf9f6ZECp%R*4+??=BhRHi276<*iTMImce)N z*(}Fr=VUCs$7%mNqX*`fyx79h+!mRU;8}aIqc}u>-0k8Nsd{g=YO-am#khtgHH76C zcro~mdG#~-{bTYG@LKRg+!)8A@t1=Z;4MO%#W)*Y!|a;&!f#5Cn?^3!>Mc(|^~VaD z$gEx!-`!>2bbC=%&5_47N*bLa)iR00<%85D_6Xb0Pfrb)SQ#H<02ynfY3H*c59g6) zDEZ2AW?0ilVd2`3RlWxG^jRN{ExHAPAb+>wn>@yt0>qkmUU8>@|AUW^WV$E(5Bqm~ z3OGJV(6i^CA<3DIv^`!7ykTHRgmzYddI(`-u;{1L zc)zOn112M*>CSRp7cQaug@VR7!DZ4Fiy9Mpwq^WAXYalHMvYRh2|aCWI2h(=MkO3h zM*2Tdg>5VLcHOdUq@Xewu25eeBN98eSl0C~WJ+iy3TedfwMZUIAC33Eqg8Ydz@z#O{_EVcz zcU3f;xv(?|oPb{x3=NGw8P_B-!yKMtZ^I4KmgLZb)9_;J`o!NX? z;v%+2^7_#Ppv{{a8;7Krlwir!CZTQi3PvpM$fH2Xi&s)0rF=2cUNI9|8RBc3K`M?B z!FN6)XI$59YvJkMp-K^WIXo;$(MD;dvm`sR!l82?waWP}x{>a;oLnX8^_ne1M^6^n zEZiU5Yz1^#53VD1!nFq2qY!`V|5Vln(GoWVR33u!(75V9MJ`#t!XlkZ`j5@Y~0CrY;sTXJu_AlEBb71G5o^z$G8KC|2 z1+MfE$jn#p#59UQdi9h^kZALsP7iP!3u9R#D&8DMR)JI!B}1o)u2kLX$#Y#FE=K1t zE>Eg!)o=wwYd=8O-4g&HS&|*J9KH9AxNui67e!p!c{_tf>KqFYuDGdTNRlvtPU$>> zJYAG8TUI^u3!gi*tFXrk+8XwZFCfjThV*{`Ot`Mcoh<=rI~7f&5&oJly1FJ;wTCLW zQGZpc`)I0$$Qn~WV3T|7F2!e>z3+Oa#bd)g2KUM$%%=*=lVDs;=$`1x2 zOSC#o#=C94Tn_fKvLT7Cf2T_L3fHlcS zW*Cc76>*x)A4@6h*Xv-%-do^b*TgC zRl!@(;`0UU4CK-KAwfIB?TP+M`n>foAp)vg*1)WG&`SVe>lhj&fRo;a9GlD_!uw;{ z=oveAB}XyG)#er{MFmy6E)vg9b<7~6J&k-=YdV6#)=yzAFVz})>V?-^fTAoH8$oEN zp*O4uc_h|h@xjyq62JBquDnn1B_E;k3S9C*b9RDvVz+(9YJVRv1YeMD?lJ|n_JcGr zm0opZwYBP_Isgc5ZDd{pye7`^kG+&oAs~$%3oTnzwZacuN2LP@`4kqEx;KtYv!2Dr z{U!DTi!y#an#Kzw|E=;id86X}B-bnuJG;ZJ@~t)wyh<;jpd^GqqsX)TRZapz9^~S_ ztW}?aC*5lB0bTPD!3$q-1t!Y!gtslSXfF5~B?J(iNaEu;Ld%&RdX6z`Ar}H;6CJq3 zI9ilKg&^BaAZdvY%^7X|$luWVZphlLr}>3HZs=i;@9Lzc^Yr#&5v7R{Tjl{d|zh3col*JenPDnRYPu7y7 zx*kLMXg(s&Z|EZPD9lX+lXLfsPh09X=R)7vOd}EU0GvFL?KC;SW(!_I=6wke6hy$VrY3Y@Qg>Oflbx!tKBw z)~5zBJtw#2yh)2glu&L}m4hcN}-c`F= zjKrK4x_39I*wc~jvn*m-tlltE$bVbPp8NqDjHvOJ3Q@Ckt(p>(Q*~a&E)yg(+@k)> zOdEOLR%8j)nJ0tFI;RQ#HKY0dR?MA1E+3>uh0H!NzsN5_=2R2?0y-Us2RM9y#wm~S z*70|mI^rr?BahjAZ+rPZASo}ZX`0duUw?~2F2Y~$6v2Det0X5P^eewv*iGfZceQuJ?i_} zbCb4{@ZWm|dB$tExwaEoAY9GGEkJ{+9G$}9Vj)mba3q~Mq^0}O%+P2P$N8%a;^ySR z?-o?m7lgDZjxjW@zDLAi%;E|}lDO*L8=G0L`S*|o0MWjOpX++}!>9#cl`AYM+0SA^ zUpsQM#Zqw*>>Fx@J4iUIh5wWU;Wo`|_up3-`y)%VrG)xCUM9IRwZ+pS|4CQB3(+PI zhW)qUA`aZ&^lW_Ewwf%@us=7wN~O~co$KaPad$QO5nKl_l#|g-gD7l?lRS&V9LwQ< z7niHV+n6->G58AT9}>xeTWq=)Y`5!iI_{nI`JNIW{>+g}^`?Gqf^49tYqfC<9-fA! zc9OS}p*MFP?6`k~uHI(W8$L=((16YBIE%y>DB-HS$)BeM$a6zaDkuXhKf=c4 z?M?qOIq4WDA?z$<>Y-~MhP2jD3qfuj0C#rGgl-(@?2eX0ScxIe1S2;auYsD7MJ z(7nmDgfCG zUi@S^*-ILQ)PJ5u@3=!HUBOR348RVd#9XeC!n|j!+uwz+O`o_|C(r&Gyk6SE{)?Kc z*IY_C65@XwoMYkCFM4D6ky@l)LD5haM;%BfTw)q&<0P9jI^vZZU`qx1DM56X>po6m zhLsMmQxFg^v?-QSrfzbtGhI$z~X_jD=LD%@l#?bM< zIlJM4<|X{Tnr8=oH%E+e%+LSmz0q{T+3J8%bvhl<0lfh7WIAk-wp`)1R{-ekMueRD zyU*C(PErFC9$!Pq-)fG^zF=U*xiXl)vH0| zEhnHMAVWMcBua)GrAq@P2%pOegFWPPUdrE3m5)aus7GXL#(!esvt`&1TZF%h?#`pO zv4sRRba$bU`h|uUY6~w}Fu8SRBNnec2JQDT!?825;=bQsm`?_^N3+cAA$3s8Po2N4 z-N&5}7b{^1F#WQk=ND3?YruA0kz_Qj=!8K?^aJtxHy(o==D)UG3Rh^dl6@8AaqsUyG~6~ zQ|9M&D|rzWl6kmZr#HQw#X#LL!sLyK%o3a{NLzYp+3I((7raiM`tv~`_x+|0e9zrv z9^~5%@LUCkB~6RyAxID(;v&Z~TValR?9x6qya#Q8EAjZZ{$SCIxJ34oug6#wht%`x zHEDjb{uqz^X1(|*i2dNBy1;yQ3fVf8E?yb!`kFxm}6c1KQG2P^Kfg_*FX=DVL;4Y8UjPR_8ETQ(fv|xXsM}7|aiq8E&OA6obFc-S0$jK<5 z>0ur`B<=^>jiW2HrMAEPX6tl9kE}8a?5{%$nUSPddVifAW&xrOlAy@?64zXq4zthQ z;~IPA-z#Eih&@&jiIJiAHG^mCC7MJv|LVzycc=?ZFs)-MR$w9Ldav=?8vc|Xze83$ zp;fs}NPw3)6)wKY>=d|HfwdoHC4%dJ1IeVr_FT{%DNV!4&;phR=LE;Bz;ax7rSIWW zlNd|!2oWB)AFn}ckMa#1YjPH78~OgJ z5nnjhp1eQo9i6rjjj`aHbt9}32*HzarS$I*(50c3)uRAy-l}9Igtbh1#`~^HXT^YD z3lYTGPfO_kuC8A+c$dic1%m*w>IX{}j-O{Gio^cB^-Cu+WKh>eqqGYxr4X)@w?9q@ zlL)_kHxDULbKM_+KS1CtbK?7l>n@h;v%A3Ab4oi@Y(cftWT0NQ!}4%lqv?;1XB-;u z1skU*w@(@tOx*@RJ;G9_SZ{w5-eaaqpAqiw($zyh%SCJS-{mdy&eaHN)O7)A4niN5BCCm(9i2Q!%CrJ4(Vh))J%1;m89 zBm8&d2Uc*$L!x6zCma0((<)1+%w-gRE{9*(dNF|1tb^rYy1B=s9rr9W0m*Y(j-~pN zROV*2RA38Jq4zSa6RI@7$we#gI`8V&HLLT_{7SjjUswbF@w+z_a1j573+NA2Doefp zx02xcwzgX88scYkQ|VtqJ;`$$?oI_Q)x6VILLD)=e9Pl+ds77kM=EW#gY7I}1RXFX z9&=DgnDQy6?kE3rKnrYd=+8qGFLjV2IR>?5%kQCUSqI@I0|3Ad6n7dP59dC43c0p@ z0lg-M%SLLpG1kePsg{b0;Y_Xrd8?wpTKPDsR4lyUI4Odt(XZ^o;E~fBj~?Z+kt*r- zdy6N|gVQGA1_CdONInRrq76JtP2pf$y*ixh9!)rEqm0kRURc*KnztfD-~xtP(F*yV z@1arbE6t@70jyn9h-k998f_atC4lP(944}#b8qjz$ zlZn~O^mrHfo=})u-Y`|NQKv6Ga{fp7?_0(9GB&fQI_KT5y?sA}l0D3scEc&&V5Ras zXZY8Cji+oSgE3KBN9}A$@!W{J%uh;NX~2(;=8I;w3r1HT;PM}v3&YN8mWkBcVE!WQ zQUcf_QTAwp4tzk#efusUjOLd-jSY`VA8%W-q=CZeK2&f05}3z~+UpL%BsESqiG*OA zRVE6CLUU1x7#oAoVu|}%s1s{?(HRW-5DB$KTNgDG$c!m)#Ska^RatM!0-!}MQ%pPW zuTd9$vHdu3Poes-(T1>O*C>-*>DQ`HOd6am2`_!Y3YdkPWht%O4+GA`Zz8us1W-nXcy+rj@Q#-WaRUk~}VZSetKa`D+fp zamk8dK;N26i-gG2QNOnM*B@cVIODkbjeFZ}-#5mbFQVIrp;m&Hdx`n)T9S-S-e8c-CmALHLU8r761Hy=XQudphHstL9jU)HZV4s?YAz89G~;qgkuMVu1L*Ac{q}DgQPgwjZ?yp z!*eJd=RV%BI-`4XL<_1|OqL{6b8I@^#H&^1MEXz%XsJXdwwv=bQGK#)6_uxILI zCv5QyR!ypl*`%tuJhL4-6A_qx3oxO(S5P=9dQ=Fy^!(d^&>u0nagPLsM*&PeK#Tr3 zg>!8i?>Zysa>3101Plj)_Beghp;@{c;=HN&(!f0D7KFqp~_)8hKK#MST=eV9A ze2m(Kihd}7uINKLCE}5miYo;-Wkmzbj-6kqVX0QKe5F-%G2(c5NTDQ`b^-7``Q!Fr zj&6U`1la&>6gp_R`~V zMrpgiayO_x=tu_Gb91ny2cu*If!s)_X?AS$+>=Yh%=h1shJA`35c2TXq27&80rgHR0T&w*Uy!5eX3xBQzjM(!3qg0L_r~)5$s8nRAD+rQe8KynTk(;iSLd%bGu`TE(OENldt)?Mmyb{~!TN<-B%L7qkxevO7-Gr5Ce3~WH8d-J z%+a-d;3%%Ddt%{oLI&k4z>Qat$ zh_N}K;9NXc!Y8yECM9zy@;REERqW$Li2m5*xy;nSt)zUJhDfNsL7_ObuR+K3(z&g< z1iL>{L`O~CJ^R=Yz^~t!nocZrEo^LCE8ecg85B1a@)l@RMy0v zm;XXL4S}lju*l=jvj?o+r-ND1L5@cTq)S?B#tq%fb_lfx82XE@w9(&NEVyKW4e*w) z(y^IuDkr|J4LeI*SD#Jqar3F6eAO`hIsr?l2H+q;2u-fQr8|RIx^b_L*Pb3))Lj#v zXk#I{JW7Ild-gaZ+w*AKvT20Ado-iI2=)KDc^?b;TFe7d2Awlpi+kWQ@!Z0CA3{vv zTZ%N<=Pxknh!B8*5E(m!Y;}Ce8m-_f$p)THczxh7J5l~;DfN+IcDE6VvXf#bopWF} zk^4lQNc3VE@+A>6z0~eT8OoI+1UUZqyz@Zh{@v_R!N#M7l;=G(le32L= zd$u!L^5lc-nTWlJXmSi#1D!7a@YEIi4s6B)++0d{?iL8s&Qo}k*mB}+j`>2@Z#jQ6 z{g!KuJj0%omU6QoJo#MNEQ4M8{0eDC=a+v|Mov^FN%~lTG z19~rCjTJ&Dn^fIZW(o0a+p|g*XzTUIrwT2(C9Yi|vv<-?Swlghf$#0{QSeqhtHG_UQ6$|@3I~e*~D|(YL^hhUJurxS5 zh87A={&;{tD^t*r>|&N`x@9N=ttmH;3u}tFib9jL;Yanesbs z3(^rmeR8}MDpHhZ!)fqG%U4mq5+&8U#) z@qEe2ZiSaBPex4V!9aAEo8t)7twcf1{cMmLcWI3YF7>p*=66a*X`F^n@3*#nubTT4 z>&~7n2IjuHL^#KrX^cp%y*OFR+FYAj$+bTjFIV%oUQ$fTn=-Cdu=5!Q>g6%A09w4~ zp<^wDu65k}imfTitKQkyV}ztP?3Cq!fN@(u84{}mwWc-G7a?a=v37=vLz#}%AZ6wT zvbVElS!-S8QgSIc#&iW*PS=Qkoym?Z@|IGhp{GU9mugST8hb{*s!uSpjsgigslQRw zeo#T9PI1UO7`la=0S^87@a+op60HM+bQls?`eTvYfc47aXiWP@h^{{>BH(AYDVD2V zpDXO!SD)|OW-*eO(#|B$M+vw+`EL@uv~4M?_Lv}sN@fPpU@4hVaOT_8#D_%vF)6=P z(QG3LNUx(tfb2@53j;Ked%*s<)Q72WR-{KHX?s|Fv1yh~^PGzMwQh8>#r;s&$LYbfZ!b3kK&!yr*0TurqpT%h$0(AOE_7X? zg5EJzyKY~A8p2V>i;C+yf~rEG5TG9#TPtJ9c8{ukCw)r3%RgqcD05ytHee+AaA30? zZ@I_~h(Ytt1Nq6{53f?LTk9&!`?@sNgf0j%`>k6TFPOC1lOssfBrw}D9JEh(Gq!J+ zirF{;ZJM_ZvXYSOep@ zW;o|GmXt`CYeY_dd7Hv3KnJWDFdsJ6>N!p^+p1gCIzk{vx93-1Y&B2et}7DW#uE|G zrr9vRR~a@Ds(rS$0P}%$((b8_KOXekfB`|qL!j(yd*!sanTN=sUJ|ed}e4OEF^(UqMO*!ap=B3779* zF~!?>Y}nri4u|OCtwSMwSYMJ#nG)H3SCwl+;;h$uU_dzbYSay{IK@WOK=YQ0R<%y) z4|+Mh0C7jBn0|2D@M38&Z*87y%A8OCxJRS}m*VABGJ?j{z{aaRUgrWx`sx}aK0*+I zjo}AT=sIjsG_UX?{qGm0K0d}){$vJZClfACA#4QOzRLmzxQBzX^**9CKbP!=#dZ@+XaD`PNI&P>=zhrMb|xcB@_=StUk!WW6V6k(C79>p(vf|h${D*fW) z&1|Yih=nr$G}SXJ9x{)7^qHP`b*L4&>p?u+37)@E#dw{nrJVWYf4w97XK-SDzCF{} zZBVi+qrA20mt>+~BK_wEQW}ExN*R^o>JV(En?-D6}aEG^b6;3k4{#J$e>T zM*l6aE{S;%AR)AUvnEbNYuvM?CQ)Z+W?wzoD_`@^SG(2&Qm!)HC;X(e06gi2{O+eT z_*OEF79uSoM(b27`!a;_<}kUBiqeX22g=`t>@;M=z>dV4FQbq|hf4f&HOBLZ>lUB` z<3D_Dv#&%tj25MI1EcSv99U+(qlTRV3~%HP!K%ljX$W}faPcvtPV=oe258>b>DtQ@ zFl_#c&t#LNsWrN{}Hl4Gh8f{Tso zP*WWA^d(86rOu&0A6FT@pn(i7j5arVUN#vvI)aON`Uu%^g4lkw9otpT+@ArWMl*Fx zmWa@Ah~H7iugyrFYbih<+>5*8%OQavK!B6>^il2dJqX`@v;)&5e0aFw{(r@WvW*Pm zjAY0bR-INH*KYNoEw}9H-ZR)KJj!szoyNp6<*DjSjfj(P*P*_$`4!Gf(&gftn@IgK zrYK!t4;`V0iD0KXcFKmK%z)06lk&v^4l&ZqA!eTF12KtlFz&ArGu{9+)(#m>bFy2r z91O%lGuencDuTTEiM6#qK)T7BPM5m6&W7NjoVgb?p*)YE!2o%HyHR{^Moq&7LW~^j z7iB?u&6r@Do-cO5$dNR2tHTpeSbv(JbR^xKt?#hzz$U37KKA#q0GZyc(xC+8Mj~Zd z^IvBCMD{;Vhho6;>4vD{@y-H65H?g7JdKnNW>i`p5sNjwg4k+3?3DF|C3D2tSB2_m zxIsR(E-IAB7hY!PGA9%wTDD02OgnPaxre-^WTqrcG!&=V(t5@sT?EIVy$!~&*LSH# zKEtME(8~&ZS%-hjts3x+pnLtTnub7zMjC`&uk+W;^KunuEtwgwCi#U~Uo_9sEU-U1KXhmQwZ=z4SFaT=pxdk4c@1n2Bi?? z!*U_T7=S?m&z3U(sOb@;TGa!|JoC%+X??8n`CwRw*;T;j!e#wdYTs6fzvL*jmI~Vv z55a9eHJ?V#4GwAZw0T`Lw@j$0*9@aa#zvEX2i;v`?Js@jRzZa@7J9u>K`x)|q7y}> zy7zzH`B6;Lo7M7+x;+3W3+mQ&%MUKUHTahgazL*Loa-e7^E^;Q$f1N<<9?6aTQg3qM5KXQU*1K@lT zaY1$7v?_44pElm;VK;xjdWjd^A(jslW)EA#o&-`yy+P*KXKyls)!=43I(iO+XP_lD z)>A9hkB=BWkx}e~;b=xJ|KY<%+81v>w>K6Pioa-6dvqdTqSwwa5eH7D{vSOnapO%U z000xe>nf1aKO)V=b|$pW zw`I+}u|Oe2h?m>OnN{MFiYCB=ww(Q5e>r<9b9G>TAAza9HJ?M;k@oVGs5R1hreDBe zDzW3R)j$9M1GY{TpJ((`prE1iz#ms&$m7;R@Zl`gd(_6%a=rlnKmZAo9JzD&ST8sT zWZDF^l>NY7+6cY+xS|?NIob&wGFSho9~j*E_`?PmO45n0t@!Ew1x zi8ufN7lHodGN0gibEsQTX#_n4E|jpvm+6}+>kW7Td>l@6MJ8TY{xlZLe-i-bf+t<# zK1}ogqvYCV(SKAguk0G(ZzwW+jpqC&5R+67ZG`Spvr9%915LusGD7cm&|=4NM)Bv| zg8)`py=)y3@U!&1;2&vU(1lw$lHcg4OGzw3h#xg2DabytCNmxn;ku|ZQC|F?%1|%o6o#=?wVP1XTF(#qgs2_s;XaA zbx7JtClhAwF8{j^7uXo}_-O`};pK*|$%uk(nilI@ zGU?so0J}pd9U$}l^?LIN`YHC>_BeA5*y-&6S^$$Ck>BDTbA=65fnwib_hP5uuf6v( zR|acd&>0Rj@%YVR#u~gVn$o^|*rJ;|o1KdBtMrOK@I z-&%9u-eaZwl>5GQLS6LVs0qF~e`EWXewu09_JKyMxT_qdQt&CzxILuXYQ$j$u<}c< zn^UrB2F5VM6^&E0yE=c4YAHaw4Ii@AjtUzJ{gC_@vq2*iCY{^O$?)v$+=4Wj{UWWZ zlI$-Uy&0_dd8)07h;n};#^%U#CJ=TH-3)s?JLr}d3`e1FvggqchC-0K7*{XU$t_=3 z;~|-$rvg9Mv@|5%J8-^uO0VCWn%$pZAW3E-H4e%4w>rRqgo4`+=PhYCOZ-2ed2&LS zumW-xv3U=x+a?zJQ*S1JWU6>B;liV$t7pgXex+Z{WhS_*5K!rpZw`N$0gNlSMVR5 z@4x!u%mC1^V)N@@n$pij&Ru5^g>a6~}z*9S|Yl>Adu z`yh2D^X^z=u=(@1koA+yOrb#k|6t4i&b0rpbyK$~q1SfmMM>~sG{6>mdzCqDDEnZ~zq zJiQSh9H5GrwTX#zyr2YA!$qy0r(r^RR$huT`Hg1lG!vZWT#Z1&*?eNhoi;vE@6w@> zDoM*n`nM~ke=}1 z{*#x=fq1GfQxHgj1+;JSOf(S zcVBf-Tj#Hn{f9o{ZJ+88nIj>>(WcVyI}fDpTKG*|)XPN9tMb`e5zD$>i2yR@WyiFg z$1g6-X~&GsiKX6*&s3KpJOOf}wn9RlunXcioUl2z7lQ+*LQ`^#s1%*9LCd-F0qm4ep#q3Vr* z;eb+~<;flpm|uS-fHbIa0O)VfXnLlo`Z zq4u5D$OQa%cm2=o`*#lhAF7Rx3_<=UJMixsmP)m!{!6ltMU2&>EuI#3Y4_x^Uxy$I z%)DSk6fxyr>Gr>yKYHA@OYCW5@_uNxlt-8^cqCwF^JrCAyUKz6;3)w5Gac2RCM?ATu~EZ{ z^Ao8xLnllu?_>7JLpSs&(=AInW%C9AA54`7Tb~*p6u7Vk!Zo}aT(ibPSlRUQyw4sH zSy(uur9ZI|=jYy-K;~t&6{X<(`!9puL20I}zUL$|6l$%W5O-r;m?P~4a_%J#h=!iV zq*KIgF*N>7Hi8%nPq)WF*v+Ikn$fQ*@M62*wXG}@+Mw!J_0DZD>rMabr0^)$-_Pb#?f4L2h~`#dCwHly_!!0O*wY{p!>xG7`vhP z%iaTR69?^4I5|^v7Be|`@1%JUI9575yP}@hF6ifB!VPTMFaF6?Y@dP_@WV?Hz!m&H zZ+}O(9i0O9CCj%NTODkPB^ulZOe=M9z3TPB!7OkxUWg|8k4NDs**j%%P z|mlUNHohl=SL{R0w{lO8_o^*8C^G3nSz^N zFd#>nrfkq>d}On^)!B>ZeTvG?Hj(e(`93gUZ5C{TvWT^8m11j$s1HMt+EEii0`fOWj6`=$ELl`_xat7oyk@2tW!YH?(+Pd}+9`S(;*S23Du*-A?20RaQ+ zJhm|6w%pvJWq8()Q~s~MpZZMNBuJswZYOY#F+*pum^x&RWas-vx)O8_>+dt?^a2{(7$G75;qw5ulyP_Gc6}p@f#)n8I4e5xzJP zuLa>J?oCs)Z+xJSpU$9AURvO1R0~o9Bbz4l9|KB3b~nrk_YBIIOE70Ev&qL!`=wsw z!lx>r^XT%{4^c~OeGMKOAOH!yKWhhQuq~`$AO0Y>Nj6Zr1 z6#egkFv(2s6H&mY^3N=tZ-^vKbAK}NRRfl;UFupZ&F&U4dfH19n}z^l_j@TR%dF+N z%@*f2&Hj(%i~d^Xs;m8Qj@x)s3=`Kp(ceY9!OQ8MkcjS6+JryWLyQY()|qoymhuxR z@}{f#XYB!?vpm_SaDkUSg}ddDXFTk-#+xMR+$S3N)+6 zk-7v=mY(Ip58HOLPlsDt28->jy+B$(hLp2DCgz9FcJgp2Ea?-vmRDo)4W@@DH#yzX zd%6di`Ur$*G7mw%L1e@_?^{D8bG2SkoS67^%zxZA6mWcSE( zpFZun&MLuEsHJNB1{dRN1(}gns>Sk-Fw71wBs$45? z%f_5OhdClX&{XCM2bsRam*;qP^CEC4dQLit=B)E$6}?SEsYU=`?7dZbd&kWW8<(u% zurph)rM5oRyIdcxrPVhbEf@EXaC5YU1w@P(bRl9B<(lzIF$;(kG}P=fQ||f&6-h0u ztnt-~4E1`gaYdpAElK9C&UZtK3YKV0wm;6_{b=BT8rvxf!`DtW!p+NBLatcFt(vDO zJ9wy1@d0b|9x9qK_rn;Mscz8t5Uii7DE%}mI?R7>IH8r1wVyuknwEH14X`9_^xYvV^L{WbSRXG-Ny z$s0?XSTX&|z@n8BO^mRM5}|mJ?*&Q6n@3JO!>?D5KYlI$s4s6?{d0qk2{woUe(#TD zoxzup3^txVy-D1d%U8i6N-ekALgnPZr~wk`J~nfS@?dVQifMCR^jPoihXEmdUh=#Q z1G@WNoeK?4rcev;4)1 z;}v=*LHKZ8>yLEojk81X+q@{+HdcTVo)^yRnD*NH~t;8d}F+Ub@& z?l=&lJ1X#^Zm%K|Pn>A3cLX6q`asFVWlgF=2sWy8$H(=%8$D!52_I30(or;-l`Rr#$?!XIVf|_#^%Y<2Ye`fTO82yU3Ru9rc*J7&LlO57HwqsQ0bDCze~zZ3`>p07>cWW{oQnYS#wNm6 zKUQm8FZiLr^o3`?J{(frY7#nx=Pq_@yCi9(z)u5IcmvV>{H)BJ34ftqI*l$sD{}D%px4C`F<=xa_(QMP6PT6Aws|`yi zD_FWCZX0j|mAwF?g24RHr;ZxvB$%H&1jXgUO4R5190uhlLw;#3;Qg6%fUO zs&zuqEZ0LS{yS9++)OYa%KG>|nVd|mKX+X@M0{7z6;9^yJCcb=*VJ;s+>-#lZ?!f0 zcg1QMkWGB7T*sw^FEFR2bVTd?C(k_-83J%+05z3t2Xo>$C5Jr+kC-iU&Wx)0OslwJ zf#tkwXStNsHG)37Aws4_Cj?~a3w)$|isCkV`lJaNMQWI*3?e=MxmpK(uIP7(7NuCX zPOPlM7rG3SxTQ<4#Z8LRyC@m#>~5-Wm!{LYmXq3^HaTYu7yl|&I3UmAc_K?m68RbF zOc*PP&f^<)LF*b^+f_WjP{q(o?M8AfBx@;h*tn|77GVfNEKT#+>-%Su&|!}L_h&h5 zr!>*6Ipgf}9NV@>E>h)eVSS=5Lb;QAg8$K1(KSF@{?0~(VIUaq5PL9^ukBaN=j`cL zE(A~&BG&`HNQ3F~{2XTg?{2Z#arhY+wzmzT7Bi)71ytu3C+4qST2hf-8BOjeoS$dFWl81YszKq!@IB% zmg$?*+|(eOim52F4qlGuPxhhb#t|MZ>|6kQO}89k=^}SUG=L&=s4r-@qUJjHp#w_h z>KLtFrUo9I2SFXR*fIOFm4_tV-D87D(!9m!JfW=-NLDR{3Hd=^)iD7p$&OfZD?+)= zebw%mD)G|{P2#%DneYadUd#ttwq;#3mT72>TZY3HQ#PP(R&p9tgVXLI_@|?~)<97- ze4{VOTL+;qxOdpuJp%6rZh#Tx%mWnYSy7AGGqn(M^FZ^+I82`{J-iBt*bN5Mk2#?n zeC-@UIJb}b&yT`)mG29QG;{2w~y*Q~4TA6eJXRK_%V^hxC4tx1>rfyl9( zbaDRdH>*;O{1Sh?Zgb@IJ6Ws+tzrF^m3j0|ONw&gir^5uLwjq&zvGkoUptWY8p(5N z1gF3XYW1hj!HQ$E{h)5Fk3jGf#n=5W!c2JJF$t-uSb8uQTV1<<&iEI3> z!uVtcB_)nSYf;iU)P8l$@E6cEJU>=|~gc#L+(`y2S}0Kx4s8O4 zH6hniHUOC4RE$}v7 z(LrnE$R#wBF<9lo4LL!e;?8;*UufVx6n!-D?|14b z`XwcL#QO_Z0JJJ}95lJ>R`=>sH4lOd3JWF>SZEr|K*?WHp&a8lU?e&DONaHbjglcYKb#2FW#K!% zN5eQRw&RFMeueE@zqFJ3H-@+lr%&JmaS*fsGQ9dpPPGpS`*e81D76s(%Co$;QmuQ`Q6z?ZOob9;7wD^wK z(5%JaqFOg(5|H|AXmbQS%O9@_sr0fNt;QujOy~8{9|k63>zWeunH+>Lf&;yoj(2H_ zA=~X@2e_m8r-aKv{Wu5_)yc!}X%G+0tD5eg}jSV0|YmeMr@9r_xqgc zIL3;<{)K@<{?>lUca>GO_WIk$efeI>OrPTX4tP%qya8w8{-Gxr-T{pFx?{1v3V#gb zm#Ed&rm!+5bl+*}%zjmS=~qZ8dL6z6{9kW*FW-(Vk^6Zu{zt>T_Z`>zT$X&vuEuU=p zsAFV&SmF@cAq?&LWjAiasv=eKA?I(MS{vLmCL(hT9={>ZV;eQEies9PzszgLX&NMu zoHu^}OP}j?{F_v%9*XD;0yip*suG#dA?ul}%v?PzU}Q$)4@SY@mj&wvJN9k(k7}2p zyw`G)cvJDUh6X@KE~6G5rVC{*_gozJ-^}F@xuw!ZRQagDHdpYvmNT#(bpOB-JeHC7SgiaC5yPNx zwlTrp7LQ&u z7Y=Tk_CXMCqD)V=U)k%ZhTCDMAQGf2dGFj8pT)R^>Wh(I##yRJ-{5$3!GcP1hU?}` zQY)f45&Ol`{lPUi;v7UKeLp+QM6XYvtuccCC!~i_E$35nH$#;b0Y)&4@A#hw8x1(F z+hr=_hS!Y7R&yf-e~JKRF0}o4+S{M@hd_wFj%WP}Z*VZ!x98;$;UoEQxkZu^hVd9d z_flfZ2=nkK&s%?qnJq;Go*L1L{zCctxNU#RxHU{3GVVI}Z$3JI_-yw~SOFTAALz14 zDIZfi((Syl zDK(Pv#a`Y|%1=K4#cBf%N*JyH3^!GZGpGs#8u?D{F+_ub`CJR5Vlxqea@$PJn%hdM zXjwmuR0rWgN&9VhyZUU3Vyi9jVm*3glaR~@4lriJDk|hn_rLczC_hRRU4-8)+qw}v zBF4!uQIUDE6v&! zs6jiObcCcP32Dq$-!dNcS;Lt+i}ShL>zLBo)&8z7UpcdUYYiyn`ZY1Mk{NXt_?$X? zS+&7}bJAmA+HTWB7HdSLknSxUiRmM+yxZjQ|C$pnL&p3Z1s~J(oPq|`>-QSu1;vOv zH$5sE1qnP|Ybrr^?|CP|vi@_=Cf53N2N`vc0!m5wBDOz#o9!nhaHhJ90t=r2N^)Y; zxR)Q*OIgvbl15=oW; z?XaKnmk#e2lmi{QNVymPxHL(AM5~BN#E$Jxpdt$nrJn_m`*Z6g+v{B=C}(K&`V0AP zIQRnbPbVtZE6t&FeueJ1$U zOH`z%C?*1TV9sI=cMD|`b61(8jKC!?T{?pNW?PCaX<&+) z%u<66Nq&6_HBJ;pRvA&U^1VT%O3fd!>c9=)_!ZYAYV8a~l)ia2M)7gaiH^!hDGf?(gM*`G+<5smETjaq3T=D96C zLM^)XP_ueZoUa+&mL#Xv?+}kYS#pWQz7j}7Kw#}w7e#N$L;FbKjnY(_v)y!<^h96s ztqS|CR0z;~h^_UX%RH$PpSqF925ITXtMM6knbO+@yF?$A9w&f&ihUhPBVl2uGN56c z>`;m+ZxRYKT*IKdLHfPmVXSiDEigklP=~ ziw>U?0{uwHMqlX0<~@I)+!=NCd=KGL5yiysvqy|VwRH2r-CCa^wC&HaxRq25WuNJz zE|TKhI2wZM7A17tkjuMmbZ5e`vr(fxdi{Y}l~V$olgYUj0ZS z$QFr{_M!l{M;@`z2wiBhawx+H4M{dHw^ACZKg$v=OfNB)5P1T<5sd>g?xXNn{ly2q zcZa^D|9;z{ViZE?toR{}&W)=N*xPoCCb`HrGWrlCL=tK_)4d!cuOA+sj5a8< z^m#A2__tVKbkh;~RuhNDDkP>s*~8?u=#qj2j*8?FqTO%xUmrVja*|uH^^XOFi34$& zKVQVWuAcM|bqdYb1f(1c{XblC8HDL~96^zmW)m}h>|=#why}llgiwC7U)uG4Pp`mt z-wsy=xVQTAC)=B_z9362-5@x05W72qWE2ZmFvV+?iL_4RCBDAObJ6nW61V}S`e-D^ zt2|!SoSkBG_&FnCsX(i+@Nhk11wB91KHc#$&4N@7SO7gOytE%nYS2e;Bl>ph{l&xv z^k*?N&$29q@%RTP?4!)i)rce~Z#7&`B`Y>6O_bnNLD`07qIf=H87|I~G{V-=#Z6nL z3T7JHB^}Z(Xy6~QTu0NoKrxuFZ)vGzhH?a@u->5EAL);^I0J}X?;wx2Q7|uqv?K6G zQll3Sj`!(f82Zqp^O?LO*cK1iuOmH9bAtx~y_)my0h2H}C~H<|#A|Ij6yjHuTo7!G z54UV9o4$f(u3ENiWhAf1AU&eNK(TJiUBZ&*d$@`<-PXHhT@=D3cuy_eU#~K%l1t2H zn$dBvXz0u0@);zQLdU( z8}UT0{G&}H-k?k}f&sg&xU=}EL?T3`leeP=%pqtd{5L`|k1wlVZZ(;&R4{~y@S(*+ zu9rUv+zu(_q2vYaQy49Z*DV7>;cwHw=@W`J){e`MF_^xwr7GGq+GsGE7iZ$lsB%ya z_XZ( zcsa++^p=V5O=7R?JxsDq@j25$fm1G3?K8$|bsF69ew1Fpm8F2o-}&5^^i?AsU@k~r z9sbPeb=r$pSgg`}Cyp6*FOm##GZ>IBvyI;oTD&ecAQLe!OSfa$cmq8@;m++6`XGVeAHwQ~uZk=%;PdO_iCj24XY z2WqiRs`SkPiwXJS0l1T(Or;b;aMx>w+Q+YPn`JOKd5nA_MNpj%D$zi5$VjK z!kk^5m-y~x+X&TY5uB|FKcg-aP-V9*l<}E{!*zlDtJv1Xiz6M>I?MaL6)mQ3rF_4o~8EE!{Zk%RS2S z5`tHa^Sj4i#khN5iOhd08SBR^xtsB}50AiY(!r!Zco##K|1x72r}IWy9wS*IDoj)8 z5g$@(NH!{@drnZW%}&j^LE=j36-DIKUdlqnNgDfE?Y@nia|C~$JH~4%!)T4KgSe9w zmL%b-+?pC^Fo(lw045xt3UwO|eG<*%9%-=ADom1P!>Dl0nN4f2 zbiMZ47?qQ#eyYqC2PYa`akgDciF)&??k{^9Al8H9T&#ggm>sv)n_-ngKn5wxTPZqXbLS7osCJLDsIFAz#XIGIz0y8BP_yN$wJ-$cZYIF_I z*A2@_gyQEX_+;mz9;k`<0HyIMO6XK+qNwY9YskQ=V-%G+6p!6Rkz$~>&*4rDJ>m}~ zN_Q`GHx-N;P0$8^=_tbHgOxqf$Qj)$?JeP&-RmjiQ}?>oQYoYo*YMp)>3!QSC*lgG zat!F;B)UMeXKFEi(#isQth_FbEaM&EI+UX!mXl$a|A{V?6NCx~>=MCwDMeWumuhSw z#~QEb*g2UpxI(7MmvP$o-PsL(EU^@e7IyX8-~TnWSUUFe&9<_w(wQ@Tp~IE2hmBS4 zyKrM-S}6UkUUQ)zKgpcc=3iSZD!x*@VM0L@uZ#lFjqzZP@cKXvshqtr_*2{dEXy zy12ewS-HmO;lOJfoM05m99<3j)3<|kc0SDUD+kEr6GKosCA4s57Ww5((u2uTcft7| zpzTH*aysaZ;WW_hmOVz}6@KV~7NV$3PxpSu)I`a{M@rM5G_E#9pg3R^t-^qw2PN;2mI6Xn zg5-bxcsKGD^XrMvil%rwO)isWS4llb=&ud?+|c<$q|wY_Np9i%xb4b!yZ8^;t*mzP zLu*VmItrFw$@zUN%&D?6nUu_InPq;F5#bV6>U@5OwhwapX|~_cq{@tD@5KbPiI()T zI2j$}DlvcPS3P@}4FCfwgkTRRchb{rlaiN9#$sr5P01L7Dc30veivi=!4LY!FU`Ft|&Q7LX$F4EFe14!&tjo96l^x7&}e zZb5u)4n`z&q0y+7#2fx~m9iUASU_9Bv5%aDLlzl=4`^|pgwJ(PboQD&#`z6U; zA+iw7^TI3uY8<}@t#_d0dngle%JdxmU{n`7L2^`6~Q&XxiR6P zis#P+>9RQQrU;%=Zs%3WeoTk`VXt5_4?WC>IB)p%2NnzuM1c7!Evf`Hc1fduUTBcE-@- ztERpKba8ZCvjOZ`_hdX}e(E%gr(eh^YQyyybG7{?okuOb*nqFfmS)hy{XsHs}qhN1u0-1Mu@q zM+1joQc zcN5L{_x*xFl{nvM%yS`!W47h#Iv0#RaYSEdEjV6HSSinLBSVT0S{vxl2Y4F8B+VW7hd4-l?=RMR-rnZNWq7aYge~<%tOA7d=Q4588xJlS-xH^l2y<9 z>Kn{C1y+7dLN`;KHiHI>JaOrUW3X5h57fDJD!Ppy%HuRtu;Is;nzn1peKFZP3E$*> zK#^TCRqoeFqa44tpSd=dGGEai;ZEy4fMElbB=EbbP_HRX?RH9suV#h{t|)lB3?ASR z>pM^YvqoCnJtlI9DBHSjQhe83D8SEzjMAhIJmlvT*r3UBm($;Vz|E+Pn|`1g7t@5b z&73l;$scV|?h)(Ii|`@KGe*6qN7OzBP}jrO-%GOw$3RAVOfcD|@GVbS*RX)WdV3ktKBP#O$R)4|lWs`rSQgoq+N7hJDxyoK7yS zdgA0WPFp*}_j_sb-$-lhw#{03LjpythOlyiL z{khB#QeQb+RZZK7pvH*CL?-zi(vH5w_WLcb@Bmxpz-|2RotFe%Uuhpw58g`?Y7)kY zwYzRHTO}8l=1}psDx1ZqYEE~2^54uFO}i6w54t8P1mRbl462f)gEkWZ5HyBuXkB~) zLfX6vhu2W5%gMY(yhlz(REogq^Q^5FG()P+s9V!>$BW=<<%M_L91{?ok5 zLDzfj3RuIC1)`lMfl0((0rAhTcHSpvpY2JiU24A2?t^-9z-RKP-rJ=-&77hr`uaCr zW?hL~g|cAUsMK{IfC?|Qu=vgVOKW>*7k^e#u4nJcxbtiS`Es%H=JzeQ7jYTu?M#_U zm7V0imJbx$jr}9}XCyv1@n5><_rc;_oM(erQdPgdzk2MJ1kb2fA0+3 zCk9Fh`Pm=Z9-jmhB@P`?yxC?$B!vBeV_rrfyQJV_jF`W?D&`B|z=BlqNT?7he6fiL zZx%8^Zl)a9Li-ocvH-(Wx?N`c(VZ+FV55AkO<9_2QiK}7{1>ms4U<*2N_*q*%#u$% zMF;SvKu+~neTkxQb+v1t@iu;!Q@+l1bM_#Y&QC4SwVI3I_|zoub{6s}azg1GLQ7U; z>(+D5gaX!?!!ey-bm7NjoqVU*mugkjNpf;S1FjnJl)oB(M*%+L$IXOWI!iI^`S^5i zWsnFE8riPLyr#y$H}mI(IC|NlfqMM*)Aq=u((Tgc3g7-q(P3o5k?$o9b` zHg-w_p%OO*kJ~v%4xi5(ub36Jr$-k03pxT*>0;3{)N zC(5b#A9(dveh2<<((CYZEMt)`?+hI-4lwLCLJ2O_5YQ5Ln0^LIqs)NXi$bpRy5&^P z1Rt*mIdRN#@qQ~#fIe1QJ*Gu7@Taj3d(Sdn8%@OiSK_DS;qv9Lfnn%lma(J8zNlGw zp~@@;4;wIKlDWQ@DM379=|Yw33xc7&=UN{q7T&lPVE;sqI_&8vNH}7d9PH6O?NPi)7_oDX zQVe5@zzwo;qc%18>qe77rt#Y~zLh#Dg4`jcMoRU6E>b{LDKO23dvVGY^2V-vnT4cD znciIb*91sZ{jn`>oE*b`&3?S^Ofh zO>pbut6WVFLrO;nFQ4dav%Lt-`WS`c@NqFBY8T&wY?Yhffw@rzc*N(c8#9uu58dER zym+{nW)v7nNC}CaROCG~iVqd1%7(IUQYBfA!B7d3%pySU21klp;#?ZH+!~L79-c&x zETLdpuzt)9p^#7yR^0BP;)tJ)FUy|4Derfr*H7uqU9I^CTZj zpM3RFeL7P$sg13BqM5)5w>P6v~(6H1&3xHqWP#Y7&U3rSgpYs*-OoWG*QQ}!_PhC#87pibQ~M?-I)i)^!%qd-E; z%2!(WX{W|)Q4%*BnKP~_i_^l~vVkn1C z$ODvIaIF!RI;dqLKoXr5ek$N=xg)y&ouX@#YnP(@XX$ZiEAv_1triM1C zTZ+X{T|=}Ku7>_BGgYFb6(r!R!SQgCienQ_+UN{7*dC!KN1#u}B;;)3L<#FE&=JjAYj9Se>r8Pr|_dA`7z{4=hv>q zIf_OK{u|`%iR-j_c1Qy3EX{_pfQd=vgJj9jK~T^tIhpNuLreTQVW~g?&Ur1}(;Q0M zCts-l-l=T=U9XT}MZ^>fLAJk^V-~E2&humCG#*sS6n_!~1^VoChj<8>TvDp0m!Vd~ zY(+S}(6#42%1Zljz1Zc@%ITlpyL@V|fr(jNVqWjc5N$~opJ#97E)iz))dcQiY!v{B zPWbX1zXH~%G2Vr&Z$G)Sj1K&hHCNN1_I1`&tUC)AQLLRfvKE}#Ls))JJytxIV|r!Xi*V`JRUeel4M z`v-0V?wSh(>`LPOww4$bvk*}4lI3?7o8^`{3yZpqm(=T%-Fa&FkJpDA?i;XA`9*E~ zZ`LNN@*C?*iQl0bLmw~HoZ0~DtrO{(l7qc#-bhMgnJ%H2_>X4tfmhC%Y%-k<6uf0# zJ-XZU6UWGhzr#%#uc7^Q2N9}rlx%wC3k><9I#dD!N)WJ+-m^)8+v{W+-!W%9Ua&b! zU!CQ}J8C1V-1J_2o;oaG9+Ch$f{mw;vcjUKc#BnhPi(@^`9EM8!2U+a%$}adRq-v!zCAk+OB(id zKphdwU1YJ0qDJ9AxA?>r*I;O9wxHH++U~x-6Ny%-jPl_jPq%iScUY7xM}9f!|)N zQuW8rvRGdJ58@{J-M=8#LVa<5-ztf;WATi>=KC4RA4J&%9WEMS(p7|AZ6MhzfJi3g zggPcFv96D=2TzJqa;z?VtIRDvE;Z@i!c;k^4s_@57FGtK8KmJrq?~PblSr zvUd)c_u}Jz6qi3*yxhU>sn{Z8*(p?pHB`s2ZC@Nz+^r^1lIsY)H9DWU?;E+%Tw(Md z52>i>k69<8M7Uq~)?A6Z^P)}m_x;!?_YO|3>uk{k;xE+*lTgB370)7NvGoQP5j!>+ zhmew4Q=&vT8fqwRX)@N%a1FL!B~Ps0I7diH!U&Fy z%ENOW#Yt>AQm4>-Z^j|WWT((gCnRK-FyLWVHFLo2fBfx|i=+GGE}1+6d+}mh8%PEmn(1`)ZcHrzg`9o1p zOOFgIXX*2|wx$|ybE~e%xE0fPpKJ1wJ7-HX;{qBZ$GN=m030+?Q+k8PrVo3n?#SY< zp1YB$pmymX?Wzyq+ci;Fas-QA5=s|c8Z&b=5n?Zg0~!+D&zE_XZ6k0EW|0eT#U5uf zZo{UpO;v7u%MR3d{pbQlSsPZyDP-fuXu{5HBt7d|4Zgaoj99IQpauoKXxwZ~Nh>Av zV*HTU)9p;Tj?ykE;W}!mTacd5?&iWAnQPa$mq21WX?&{X!u2|Xn9ax%3hWQR> z2Q9e=Xa9_ir|)Jh&=qBDKPu#2Dr>SXS&A~iwS1o&@EBGtMss=)c%xpo;do^4JQ?Rj zcZ_?lDyM>XA*dZF{=tz_8aJxh^&=qpWicM1rqSv6w-b$V!DBghxT*jh{YQg|u}mjN zbH>9IMP22h*+wvlWqlh0UYNo@{k(evk{SiqtJizCWV`9TQu!+!ZX|$zUK4oQ9Kr zWyJ-dTVZa9d{fy@wT&ODRp`+=+b}W6=N|*wqdZ0YA==O66EVQhl8vm>E_R6r0=jfc z^HMn2Q=@ECML%kLi>fwu99jjTxs$vFf4>4)bAJ{ z382(LAda)H3!_@>dxXhqWY1OL(09U)reG(k$*&dJ7Ob{3|L;_m1v_7h5H~SV80kC0hQ~Xl;0~@FX|8pWz4u>p)L=%voB_aH(GgYELQ)xpT@P6 z+`nzT^w)KDoxJG5Yhns9IW#GQz((gi)kjdC{lnya2EB#$5BRUUK4*5-heJ0K0GK~f z6us*thnad!*Ot=7)Sdh3P1_?1c-BZjcK*1opU%j_15!doVVR!#xiG*!-#@U!4p0VQ zyGmvf`*N^3o4ZDwbr)s}8`_(7wK3r`4~118(cR6iSdD61WOe7cL@_gA{+u-WK>69k z_fJtkQwhkZ0_@gjah7nLHM^`WPdvRDwUpn#bE{CZSS*Wbo_5F5L20`N(--T(yznqC z{?sBEUKVbgIG!g;VGH|qT&WwASEIUMYN7do%{UHlQO*&e)5ib5(OrzA=e?BX>GGQ+clzLL0a@jAXowO_ZI%d6lx0PuG$xX1!6Z}wxHrepm z=L&1e-NWf5x$ds@I|Rh zDN#Y*Yi+7M;`_?Apv5D(IL)VD;KmKmHgnhd*Y}592+{MrUOy8IcH2BJLHrkbH~sRD zMbG=<(9s{e>8W*iD&hv#XFRfc9o#I+qB@{7YidU(NnFVDY#k=5lR>357k@lB^uR^a z+X4N2O>57;%M)pJs7UuhDV9+G3S*%cB#Tsn6pQmPnq`C+RPK|4lXqyu z;KmMb*i!9PauCj3k`?wD_tkT&RJ$r;4N1Jy|HfAwk6JHYgBaF8$=ArwpFpE}Ywiwa z19j{!CviwlKIQVqq|q;0h=S}P5BP_f#wu^xQQ^hqWDihLy7wAI!E9(T#Z(4GwOdkD zG9<%>Doms7K&q1jpDmVDeqYubLY9RMu=4lF-u)fhPC{M5x+AJxm^jC!}QAP&zOJ_8eR8*S^8ort{ zNpoQn?eCO&M=uT^nn zc`DcinH)XG^Q3b;U6CqwA6gHs@zZEvu{~Lrl7PXVus0SL%b46#^A}!bKbkJ+WlVEP z2+q^^Xi%Xt>kmv12T~rCmwIo{9X=HwdSNrYwq1u`ZfYM@En%OqtXZw?J;LTiC2%(6 zo$RAk5UU)1{7UxA?uXWNaZPaf*RNdk4B+NCORNmT`1mDz!mPLt9J3^-WS6 z-=hePdx5mez86GdH4BqOty^o7tTziHGD!QcKOuTF0S|~yvkEGubejKf-vq2T!@}o8 zz09zEg0{6)nLD3sT$0vWofIQ9OY-K$9Jc8vKKL(BG0t3G!^cixsI+O}|2Jbhus&q^VkrGjAD01B&-LvC)TI-kvm6BDF{>a|{;;yEkI5MPehOYAQ%*w+aG4>kpfg z+Y=+cm`FV{gn6LkhV6OR{f|^%XJ5R6qXcF4$G_ z<+fgr=*nmvPCb>N$lp((pPDunX||7=mEj~+A`&;cUX*_;>fFXKA+ZiHxtcH|x4VI0 z;`O}WWz0udq3v88efJ86hyVz)NNK8b=4bmF;GHJ8U$_KS9br+)L)V$hja{*BVXENe ztOb>K1@sS0;IRL9(g@faOFAnykb6n@ofZW_*kegIDfK8-MFjD7f*@*E(H-{)x{sQA z=t)^t$yV%%HWULuzoKgA+EkZ_jNMu7*o+vylm_p7M@oC&0aDA6DxQgAs zD9*P9;Z>U18+1+gdz8)*CqB^aB|eAGu6*<;EL-tkYfI<06yLYqgI|8^AQh~6bqZ)d zuX@-G%ICPuB8T$04l`@lCYb=uwai;$bLnxyH!iiI9b%bffbxiq7w{F|pvAyYlOi3( zNYK>3_+~&VnPHh^-2!wu3o_eXK~PC=TfG#eK9M3(3f2$W&H8%SO9 zbgEB1^O5TL<{?>qeMI3}Ar|2uXftILE|~`)y>Z35g26 z$hI~LMGzhg#8fEev`KpUg>#DE;Y5I5TJt}yLjWbn{$Lb7Bx$uP(8{@@5{-F;m%>lQ zNI0GeA0m?i>k%1BPOZ5uqtHFVlXc`H~tdV`}mK6WE(FrFDy? zC)IJUZR01WVWuE_G}`(UN|2^e6CfKD5zl=;s9Z}w!54{`%; zh;Z)3JPXg)N9>6U!c2*)DNP8a&$JtRkX)^DG)PPnj3VZJnV~fWNP;F1<>XaVJ$XcLd1ppwdpB4RLKl>@jzb&GXvSc*4dxy+4fwfp8jpEgq zFa)B%v>;x-os;DUNs-TG*<_Yp$1SVo(E)Eqqv`+<{}o%tIuf*)xW?rD%1+3Q)>ZCg zCM5z+IzmnwS^5Zb6f&FFr9^MB6!J4xq&<*iB}-2q zay;;et+dwWmSAl7w2^?JJqIODrcHiwr$>f_)ivx;%5WWv(4z2 z>t6wwp*g9f&NdxrNm(s|gg?;|mypS&12WF3y&BLKs3x=o_VZJXnX;IfC6!y>+#wZB z!%^@gmp3=l{Ms7*QW?$;)h1zr#1ua@;PxMamRESlbjaHB7p2a$VlDQ=)Fs$fIVbG^I-M^?1>k` zM=JiGv!*3=fHpeJjSR~dinnf=c|bl$aww(ly~-Kws-HBLI@|NB3NgZ{+Se#_XlSw@ zxJv~%BOkzZGh&n&8;1aaO1=ari4y3DRp4Wl*!Psbt+*KdTny+~hD^|tq?nq-I*`HK z8_-s@0sLRb7^4^D;D_5`j!(oszi>u z04?L50yDt1{38!28qbKHywC^j#|-~aJ|f+NftoM@mo``}=6MNRB=&NeP0JLO z&Vb$X2gsOALX+bwlU2fjT+Y_dza#D0j^{sgm60>0*fQZ0)YRMmNprH1I6<`dA8UWiAMlAH1*FR zo19>0S=%c$YpZaDU+-Iq6UYwqzqrL>nuh{wLPRhQ#eQY*xzJI4=g5sHy8moqr7^}7 z4{?+SNFpC4i*)Fy?)pa;n*Zcvt?4S2d{qD|iAQTDzPY7kpW-x>%M zZHP}mD{3Bxvl4uSBQjXht?CU z&b@DF9zTn0Hgpzt2rx!oLLWOWLv;Mt`eojI=%wL>JZ(v+)6I4MQZuYAkh-YbC-_37 z$ZCtL2Q3!tXMGNU-chx8mDOa3K>t-Pm}o}2yqDn=z+S(z!peOiYNT`TY-6KYQJ4+E z3l;F#O_2p!&+;1KmVxZvBmh;Z_p@pJV`0v4gj(4m$ed(tj|XSr07DV$G~op6p%nl) zNmqkj(j=~!)X7;dzj4)cYf)`{eXbk4_df>q0ZZ)8Apu5zpEVtHP}!X6A^skqC+OL! zBwu&K95RSUvf|62R%jLsAk|(%kDORu+h#~N{)ojh!tS!5HBJK#C$re4o8_fyn5%-L z7d($NrzjcHTnSU6!k@RQ0h;t<(_hg!{&qnv5w3s$0000000J^QtCLH_1_;h>rv!rT z#KE}eIOTpy(6;)m5;oSGaKH?sQm+GhDKZ=4BpH zW8Xp_%Dpg(<9l&Kqt*<{CYtUy26h14U#gPdIb#skr?P+=7=(^R2eL%N!lvE0|kSEjX9dx61}(t}-8rY)jQo3(GwP|JQMdg|&< zLiS(pt_Oji27wl(iU>yhcnFzp003W%tdi^iDA!Kr8k^3!{cSZnuDcb~H}VCvFb4ZX zbhXaOw}D&HyAr1;oGnF=&qt2WLMJ6m*vKfpxwp(1A)O<*r~n;q?+yFaHfw1KNmrj!LjB3EX9GVW;t8zW@#V0M zpj|hr?=0!ZeCnI629Hmfg)r=o6uDew`m1d`YASWe`v7MUsZH2 z;U&LwHmkC2R~$-+q~MdIr1Xzwv_IPUD;q4L7n}@K11jI9P7L_5)6Z6p9*n9c2n*Xg zvt@G=VyT#Q`Q0Tw&Ra-&5}lCmBBDAFLz*M zLMZlh_&#&zUJk>5?E?8$EEX76r4#&GX`P+B`e)Pf@Nk%9$3`$0sZW=suJ>>u3rFXi z-k#&*5usVi9e^Ru1b<3}I%W-V;h1INkHi=_cV)hz$C#(T(SUre@0jln+^ePEP{?*rD%H3_0D;)otEJdl z8mec3DJq^3n^Ca_PKf~_gFpsP*Et^!TADX9E+p+YnvdF-n|;Yc{f=d58&=zq7H*xp zo=L0sUhLywr5>`oHd?cO=Dhf(Ijy^e`q9^GUN7`)^22iz3Nyrl9*pKKZRG5!oAA0W Z_f1eM{OWM!?$HW*CIA2c00000003{uicJ6j literal 0 HcmV?d00001 diff --git a/tutorials/images/0_to_litgpt/usage.webp b/tutorials/images/0_to_litgpt/usage.webp new file mode 100644 index 0000000000000000000000000000000000000000..5b555cee43325083d857718a410d020dddb22296 GIT binary patch literal 81938 zcmeFXV~}pqvL#x!&0V%_+qPY`%e!pbw(Y82wr$(CjjzwS@!su@=yUGt7t#H%BgU_c zSTi%{%A6y|TuWI>TpUIa7)V1*SW#V(lki6X7$}|wGzXYk5L_P&2ndL*r1L(`BCT$_ z=sYjt=NA%c0zmCs@@MqN>8ozMnd}{N%V2}xF{_rP_d}pjz~vUJbkGCIvbG z_MdN`ygymrIv=CEt!s!&xA%F6fT=HkKinUa$M=`-AKW$T&(Zf=Lxy95W#4;%-_LsQ z2w)Vz_c{9{@(B!p{mT2;{n+gVdX*U8?v>xZU(aXJo8J(CHK3Y)Z4sUfkgmYcSfKFkO%<1A^GHZF}MTl3FN-(TnZcm8vWe9!{5N42@ZN^0rh}@pYHFn z@80*gf?l1Ux^IOSnytL4-Wosiuirn-FTh_MZ)K16SAKhdYJe%g?8oGa;KAUicgDl$ z+u)1g-S1VP(cqB3&o9D{?5E~C?JMkM_onyBcd1w7XZ#!RrSTefYw+ZA>?`)I^11rP z@bC|>oq(e+&>w;?iw}txhC9HPU!&g+faB%+5%6mLB5?Ei3b<2wTD>WI{roZZ45El( zkNB?<1wD?(w!gUzlnUnc%{rUD%NTK;M^)U0qt+%ib&y8XNf}BS=RH(p_bM7P-$25|GBuF#XKKlA0fp_Bu-C2}kfHhf zhn#a)C(_R<_qRnOLsJ~*h~$S;YjFWVM`2k^_f%$2?)nX|%(v>mkeHK0Y>dT*w)=G3 zE?U)Us_Q)MVHdIFN!{*ni@VOAr}hhC4Qo+_!dkhHOQ8Ih9bnpZ<1E99K5eWaz(i z(d*sBwnWuF@sRZCdp(kxWmZN`j0ABI{oZ9zt{Dml>;@R|)dre&>Nnv&yb~vfZ-pxc z^4k)S>+^9d(@Qbk?Sozya4Geb1*fZy3eP*eP5X-aBL4UOkTfXbdFQErVt}CFM7idw zH#F-ufj!t+H&?|!+kw+K0uUaLGYhL=2y!~KJLOfKosN@CxW~;^8$O-qtF5h&9go6= zSUyH8?J`;>({GAg5lF(M^gy9p|M*o47zU2 zLl(i(IRLn>S23drE^_aXV?RjV456A-!w1{B@l)dCWmF^+Q>}6HW!9dFn&n%2s@L$frkR-N@#GM)hf}NjK509>MA{?zAJw^6mPc zByC>R%p9YzB*Qo*-xGh5rMFw^_%KcP9h?N)P{T$noS+x5he zCDsSU=B<$;9xqza214hfzh%d63lbT(k#*R+UfdwI-~M0;_Ss)b%t%o_)B6rl#JQaC zs}0LOH|+$m-q%e@?@Y{&*}>%5e6Z3Jq=*WFfPyvf3cWDePuiL$7}A|V2VIVXK!{## zMV0Opp`@gtQcE(s0iSqsSwQShO9a)-jnfB2ez?!;93XT~NN3 zxJ1(QKUnX%cPMzTczFaFne0RB`q3U3v04jR|`FHl!=lg7LMUAlwm zed#>Yzrft>kIX8O4s=|&AjUMliAkQ&DiBP(LbGL-jkr{&^)wX%@n`^3(XR9i+y&7$ z9akoq81beAl6aqeDK;P2I1_{}!>7Lv7bTF`pm+MB<-dWAim%0L&a?ZSy1Z={N|SC` zP^cK%UH>?Ie71pFCVNt^b~Xkak}5*bz~v57#z2-Ww+475+|B`x-cvPt2W~gBML%a# zF3opUU@bXFG#QF69x7g^(7&q;kxWXv$}pYjK2lgoqN}4JP%HaULSw~QwT;ol6#zKn z1mCKI884zMtvjl{XQ0`+dA`8C5?-UDu0miizS6ZQ_RN3D4AGNhuJdupN35-~QdmX* zC4cx`8JhGvb_Ypole6|&`{War5mqyIyZ9aeeH|#^3s_M~-^S7CXTpf|lW0^P) zatt(mOy0Qx__$6XgV(A__FtgNcCJ?+4>mr*n*SefdTs#hGeK!Og;dzM&_wRvLLUy- zwJRcMs#Qa>joE$C!VX?*AM!7aF#FQ|zj5;q-;EVG&`uMj3zPqB%Her(x#6{(vQeD# zU#n$PxvU%H|1(%fKbEjh?CD!7dGCt|{^7{dZ>5v_E(sONBqe~w?Gny={P14}DIy1B zl)p4Jab=8?laDa>A0?d4BTk)=?~p!(nNW11fMbjLuKDe5(fk49nO;D6#PKGC=5nq!l^^Npr?o;l*Q(}D>u95vt> z)LVX%-7){UQ^@Z}!3iCg(tCgJ+hmZKw76j!>pWx8dtM}8cZgZy11#E@{#llBAV|R( z-Mb8W-}i{enO+T@^4llw<;4T1$8rxZw~V@(b!4OBX{Bd!)20zwy7KYOTG=J~2sJPE z4K(Pm>^Dm%gz^>k3~n|!I>-C;LUnMRm9Jhk(7Bh{5r~7fJ#frH#KlhbX#PnK$6#P} zpS$S<-cB@Ex7%iVR>zY_68Uh0uebx41ZFzt7c`MEe}}w2VY1~*{MQ40*rIw{IO>w< zlLZp%BQ)V*#1b^w-i~7&@En&K>cDPg2tsS7);zV%{}dPcCj?Tdd>xGNYzoF|m|7Eu0 zD#N!onr)Z0s`MF?WyR@NO@0Jlo54gI3M|N#*irOiS?Na}4A-!K8~|OOKH!I*4RI9Y zqK^N0|H9l{4^Qe;hVSBJwsPiSpj@k`O-|ti2q$-v|2wGV|kof=oLubJy3N znbxbni>P`PQdAelbb~C~I5VOYvm0Orc~dD}r#1RdVaIfdnXGGxpU`gYf0QQ5j--XI zVPNrR`QW5}9<%$xO*kS$iZ7}hh_b1nsqyqsV+AIgM|Xaq+3HKBakzdgVNkF@FMj^N zY3P61<^R|CU+qA_MPpb0te}ZTL22-+>ahno6&rEbyQUrMii8@3r_mTv6d}mJJe>Zy zsDmUy*)*jR%;btqz?CEP`47la_a|SwO?f}J7o#aAq(tx&SVa$b0*O8du5{{7rvPKo zlUlE)6f5jw?9T)+=Im=>SYj-UW#^h38aSgAUhhhinhT0t$8&+fiWy$Z1(%OIP^u)JFUXZ&ZVHCXbJSnOb`dbJl}du@o#W z0o6u(BlIO9UhfD(;8I*ITD!X&*=T&WM7~?Is$}xjP-Yz){gbh$x|%NXFHW&pPR1=l%9W50t1Rf!=eI!MvP`{&!(a;AN48BonADWTHW03)Je=3pKSJ?0|y8anefV663+kHwlPv;QT;S@8s<( zdJ&&~D&O_a{i4%3;zy#Yqsm{>ZhTe~c_l~}KgjI3HA(gP8{RoaL?~aU5~dfUXz6eL zVVx^y1}<3h-G;+Q#7AK=2Ob5s@z%zrteoMqVnIXTSxoDr4x)ppe4)*pUp|yqm)1!y z!)M|BXA0MI4_w&xLbbzNl_XUI(=?QkWgW~PfEH^uBMW?za~$wAsFFY?t2nTrT&&+D>=E3K9}2fXqoZ1xBz*X~C_ zBN}ry-VPlaRqQNK#7WmPsD{dWh#+Cjk-DQA|40T^19~Bf*qV7r-9Q_tq-VNjFi<20 zYG3OxSjR$etWhoBj0{G7?PbO?#**R5E3gh01i}3&j-nSbV7OyNVJLOTYiouVbCn6z zCKP~~ZG+e2@X%$vjBqemt^ZT5j1fkXpMnr+9M}@e2CKs5f`BaW`Iliy zn#oXdx0xX;SIXp`?K7Szjny}{-_I_{cO4xGwrn6zCMz!o`HP-sh9SewyH*fK5)Dj> z3gxf0_x;C7u<75S!y~an4@OkK682#1zS_JdR`@^)Q^XT*SUjMVYc}7*lzVmV4~;HG zqj(?7HK~b|5F&v;D7^*A3>7qs8#Uj4fpUz9n@?YC?<%VMF5+QxOrid#lo`e%9q+I@ zuH$?0^#Cp5F5auNGcGgnoB5^v^nY0Ol{;+m{9F0(|EkUGQi4S?zU0-*>ny+rj}gD> z0hQQ2w+P~{|L|^jrn>w%uE;*dqYEcgFg{WKSbB1rPmg345$iGjA6|J+!FA#Wnd0U~ z)tiQc^8Y?@_FF_llzC}xV(E+PxX%zIxP|)4K?8dft-J8@wlRE6WK*cNix+UMS>HOr zlxd(oh<$4s3KciLQQv*-9|!zZC-TQQnG(jXRw!R9ELhz1`MG;LH;7+f{*tR@$dW%D zYbb4q`FZJP+z)RKn1(za+`Ih*L5D8IsBVQdSb@4;!bH47yITBIa7km==3U~(bfc#F zRFf+XpCpx3YN^4_kJ1zz2RPK*Q8F`t)hUGONHtyp7tMj7qI!t`5A!`#up&ff7LO~} zQhC_)H!M9i{dBAS$^Il^s#G7UJ1IjC5{nw!#Ar*z*+?IQy*h!CS1y=Zh zrp}acoUI(!e8{mDI@1q!Cae09L2Y!y|DZN{TQIe^t~Q?D=*8sQWZ-P8g;jmP7knlT zoDM_Jqow{!nfCv23HZP7Sq}`}#ovsRi)f65uhk}OT7@pCNn_@==qGEhGDVijAfM?2 zkcT@cC`Hzc9uP%X#Tte!tYpGSZ%H`T;?#FOfu*WQa_cGZ)F3H5z`~^eY@~ELh;?E6 zInuVs3rMVRXpy)p_A>-LN3#@wIMU|=D?(3$q?h9*w1qjvS^Dzk%$D6K-=+(d_Eklf5?>WHz%Hxx6Q^m#8yCIV1 z&vajjPlo5qnEs)bz-xLUr{UV$pv>xR>8ZRho${58^bzIr2J%lSm$PFIZ12Z+*9^!J z7gaP0@?L@WO;S&%Bwk%Ej(r*zbGQ4eRi`n~gh~OcGqB&7H3AM@md3D|TRY+?kRQag zZGbtO$TF}EGT4IT55@I{YSKOIFs!B_@-@MAw>}3IiQDuBg-kx(G*=2zS+)onGaV0a zbAzG!ySk%a0^tq4ey#R6GG0LdZHUuLnwvn3yD8japF@*ge?ukm_Rte4Hcdcf88%wV zR2uaGz`k7J(3C>lIE`-R1YEDLN1EzT!D8M98dW(i5?_-|)*ppMm_U;ha(nZX>7#A@ zSwnddlI=-ec};rV9BT|vi$Wl<2hlf&CORIJvsA~??o}w?j?!< zhHO-<6Uw8v*Ju-ioig)Fp%vFhm9giQv&6$MAI82v8t9CzD@uX!3VUy(Z6PL(l0)L_ z`VR7}bWgq{Cef0r;*U=Gw#=YrtaQ~~<%&-E-9v6Q*Gin8}``R9QrEg$#!2sFlUvSiGh6O zK}3_B?BQga{qeRFck`X$*vv(k1gyUouy~RqE!hkUPAjM;Zh2|*D{B>vilJ{H2H$DP zn;fL*NVz;jG=lAI2KmDn2&T6cK1i*_2&{vgs0}6(SeBKaHk7CnxLxfnO6zg(1RK;c>1sGPT!W>N6$od`TYAuRz!YME&VJXC|qL6M*B%>xE8rQ;|DuhZ{BB|K6Oovg$odJAVYd2qHtGKGka5 z13f$CvgbJkUCGN(w#1%b~d;Z*nkmPclR*cl-uzHI9;MFA;+XKSczv5uPo6Z$`LC%E(*$ZbFDnl zSj{^!oCY!KWme!!)W;0>|@}D$ZNT(1ItA6NSS^c zx$>E?C^+0-r}LSba*kJy6jk^w+XcVQP0W8a>Ox84x9vb{R&Y}iE>(6x6Lu;7uHq+z znTKOBc&rdlXc*zFW{&#n2sEac830|TV-2#iv`HZy!Tt#Vr?*RC?VA}1j&fRF*aY$$k#FBoup5^F%5~5`>QSg=)D0BAn^^b&+g4cv%XBRN6`(5(x&gk3sg8}+f zm9ll)j6&0QK+Ugnok1f{pUe1+v%xZA99I6LQIkgak33(rm%}^v_9jtm3Y`qgNuUlwGTjMs^WedY8={5^yS8Z2hsl^us&+NTen%{f-x zZrY`(yHq`qvhqah=QV0&_@+QgjVV&RS$&t`wd zuC-Mg_H*OA#+-o2r6AUZtqB3fUXIWus3*b5AM)&OC%846tFVx@~P`PwHNZqZ~Uq9uqQ67Zhp8LSI-mU9kCgI4}6 zAPMr3)^t$5#29u+cuBSHZ@#IFS=E(AWGec1JF_Akh9na3R8OqzF(E)FV-{TUVK@`P zO|%H^B12#*?jv0K6;?~!z-D$K5VCA|~h0!E6y7z4y&QWV?*8r>?DJj2I%k;oGa5@yJcj5>E?<2QtEGAv=nb@$ zG%ZYW0s67;&s0=)oNJju27%w|cnO2ke5)fxvX!TdgB+>=+i2|&w;Z!~_s%H8DhK|`;V}G^yMef1 zH;yS-t6aqr2YxK|JjDv_tmsPc!}D~z3n4+EuIlE$t57o6tT-UswQ#aI`_BmG*(oaN zwvsDJ|2*#0RF5EhfUubLmeiaRtqw92IfJB<{NCGpOiA4kU3cYaRsb~O+fqIipAU%@ zN*B{r5e(3N%;TW&59?i>^SEud3ORXb+C08J;qsq0k7{RTp{KNS;$yM$ck`tz8lBcb z-rP#`2bJIEBQn?YcBYTI4Z;~;d5bp49NY7V&{j57H1j*v*{|f}rDLTP$dG_GMBmUa zc87h;=0y01wK*h$tQd5;9b$)BjcNYI*iV7zx|bXcx?;;~%nKgIOp?@M|9T$v`|+U5 zc$!H)r`8GW{#(%eykXGxk;8HCJHKN<{1hBZtF0HS1k1IQDZFpSdBZ9HM}8y~beMt2 zJ$H`_ERi%VjL%#ctUN~_m3fJTaUE#dG8zZ`BpoRzV)(j}fi^ppKBSSe9MO}aBr*Zs zf;u3BGRkmhG&fK)sxN`xvd!7#_70N7NB8ESX5XeIzbv@~8VG?%zf*ueyrIhvtBs04 zD6>Y>{mX)^Uo+5$1g)nOxE;#_yizI~&m_@^Ib_T2YI=wm68>Si6GH`;Y20pK4cGu- z7qn>Hey`L+zrx75R(!#3=AMqW)|#9NWog<4o6b*OlTZOQbI!Et=A#rY1AVy*AMiYJ z*pw-N*El+yGsBuOaNx4FK@}<6jk&>NBP|s|`4aJ0+v>ib?_rePhPf1HG^YN*I)T2q ziz=ts@uhS=b!Bn;Ep)f+q=pYp%=N4?fxL(!Xa;S&83-shEd-am4wj@IblYyR!aKs| z`XJ2zw%RRFWbHoM`b3pc4JvqGc`4w-Yfw>cw5u`eZc3`ZUR~5`ZEv)2*@IY@9xcSA zzY8MAZCaMppj3$9WN`Iy{Q2!Lws4U7eOOo$YR)l;f_>F_>u=#ymHHpXux_o|;!r{! z5{6yFrBju^jV=&dB}q|esv#A)(7*fOgr6Rv`^ZA21D|&3kh@McBSz3Y6_YI3jAG)xR-i>}-W5l;-DC6>6d+DMUf|)dwFaKWByIOQxQpQzXVd zGN9$$nB)%??_dk7fN1Kk=*A2tiA9)276!V?d6FpXF@BXMp%m}FS9K!Xh>09D-vh7T zS#i%vMa-w+RD1uCv@9MSUlm&+D8fK-Ywis`5LO?ce*{9amOOmEA2#0DyM~_g`P1YI zWZe0zNTsH~Egs#eJHR4(_T~TLsduIhpcPqFD}{y|`9m{y(#P?}lySDu4oN`DydTRB z8sa4-Zd5FQ%!H+P-}L%`gUtVoMX6Ap{lng@Pn$)}klo~2aRo9Bj@UjEBD@L2WnZ(p z3c5@8ela^2j6M0Lnf=)Vob(`gMq_J4pP_PD5a`D@r&wOXXrlgX~4tlQgNbC zTO@jm-4WlDghic!J{B2khDi*XWB=vdJlEt-`!t8H4`lP;Zp=f$*INLygMfd}ti_0p zVa1rpSAhBHHb226|5?7F(%C|M38m5GvDO|g$IAOM`vAieaIk6>Ny|7Zx!7zEDjRoG zb%lk7NvQHjnze)k%>Qd$eX0TgT^v?gCRhtGn9SKA@{ZdlJ$(D4&4X%aB1`fEdLlEc z9VFr`)jl zx&hf#7x?2j?Z4#DfH=ScuD10}l5OMxl!2aud3`u={*5y&2AaV=i_!fya1_$4veHqf zkR!h0*S#t_n3S3rfxGI!Iy&?*j8Au3v=GH0P!fRK1w8+5ZS04HvzMo1E-BP4BzBML zV)+s0HQ4mrYI#BMpbY_Xe@ENWe#tstG38sM4%~A-IA*pLB`LOsX84!TO&JBeX-Uo5 ztkK03J2b&?)x|baZFvAbv&&|qCO(wvn8Bg*wt;?&&B-iJ7X1DP6$qZ22|M5e%(N`5 zviJQ{y=T8qVw^HR2+Th%GG3;*fAWv%CR~hMW_*xMffNm_+SM{{_=E@FDv?DWwz9>} zns`-L?**os*MJ0-&CO{^0)Z0^?9$eqgkR`>)O*pAkD$c?&&_pHG%5S77H7kCUd$2{ z?Kb~($F^02)6LTBtKMC19~~)YZu(xO#9DtJOHu%f_wQwRzrr@4wb#zuh4VT=O%?^= zu*eEyJf{(Vd0#Y6sQ{A~c&s?*oiN5ssigZQh(4`OlPfi&(G)Yn z&NB$-yw-;u2G4Ui$pnNiLqX{dVH)prKH14D-@?efx_(7)Y zeW(y`GG2(BdQpXD20`+y{MJT?j;nvq{UtPCW-5s$6*&PWQGB(+%KDM=s}~5J;lwB_#&5);8uLw zb?0!5PJ<1rO9HQjdfw!@&{(B`JQnc+3DuJ4IhK$zyN{aNClV@;!b+@Mf*ebF0Q9c( zyNqe*$;0?pu_eVkO=R8MmYFIh;eK}(lT=EwZREP&vN4a2cqL{L3k=MdKXV2?cj zg$eCFA%*m)`_EZ)j;Ren^v`Bn%TSpz6U_&nfrhZtdmj9X+)|XbOrJC0_Y~-^9l4+X z{=NQKUbS+B{o1@>7EGyLLkPaF#8|X~3g0ZFdTNuDfkLT8^6rv>W!U)MpU7<$_vm7$ zVD((DG(p0 z%ux5_z$L|a5iMWmUbJlT#53C>rT}~*R9x~2FOCe+GKUZQFXY_rfdJd#E8g_1M-7lO!Gz`jv5P%&7wH-FT0lB!hvS&rLXEW@kCNEk82Ip9U+eQrJkXwq18N%Z*88xa`uA zX+oArkM{Jf$Qy5KFeX|h3)xg^bX_4nXVOg)UUQ57EOH)QXXUc+&-lvDAd6*yI-`&< zpv~s4@7LO>bxDtKc)QyTe$Ry@cSQ#GNfNLSodswk(iC{;-l>h0ui%E%gvck2u7#JJ zD<}!epI9QPeAVCRQ`8o3`88@Q$Ew5+a8Y^9IP6Q`E-?y%e6eltc_A ze5}w2HLF6_^kc7+NcRm3QsX6_*~xMT^E|;KE$1_xoM!D57D zv&Q1);N~ASNfCaInwg1?yuwgtD6Zhwftdq2eE4Y22o5VB*56JHwsjaM%J)24;%&Iy z92+f3h$$TraN6a;n!yJ1(Wd=$!hNrxzwhU~p1e-ES#uTL(v~+;giEGlt>qRfnVV_L zq5qY|$3V^6Zy=ARs3JsScNH;jrdze5e@(&2_CX+Pr_QRHGzTe@Ge5fc$L-P5HoFY0 zu_7`3PvH~;d}*6l3fZ4?G8LWU4`T)329&RY#Wkps0E4Iu*Ybo&o!>oB*ghibjN3U3 zMVyIGFMo}INe410#7AG8*JYIvaY%p^dVj>rH)lhS~j%eauxA(t8_jAQYPEH(8SxYL++ZP3{qJ zU_7GAz_7<*khx>99cpHB?aQ{r1KV_JEjg^36J~`;I*76=n3bgj5-|DY4#~$hK zA**Ak*VfR%CF#AmXbNN1c`65IAVQ|)kG(^m{;;8&XZ8OY^cKp;vh=A>+g1TJE|G`m zJY-kfImm8Ih!N(ij{ilKl;`ToH+u&&shIJVL+(G&1zk|A{32Qv2}3kC^p1%YUna#7 z17!c{<81)W#%E%mAtj+6m)J@2?4_q((&X#|)!qbD;NIzwMs#Rr-dzUz&FSO-{YQS; z6EZd1Xa47Q-C|1V(o6%2oEX|A4J+>*;$fn7%zp0;48$!^7CkJa0+S;l z*^DuzpTlk_yv;EZ;i zgdgNwYl*L5JAW-ZZyID;mv9cXMtgL@oa@N{L!gT?@9`;L`Pg! z`G>my+EH3|A{z1=jb8jXAJI3HRN~ChjT#yw&BrPZs59rwjYn{yM8JN1&J+<^ zSP$5Nb%6~Qd#~>>AFH!mFqLDsJ|b^F1C<0?>}LeTNd(Gb8ge=U-rDl2wtvTHWA|^# z?e$90$q#J^5kqeFpaJRWQ&>C%xZhfBWU6IYA(tfdtYCE&W765!EgYaXVs6rg19@zd z@f~0u859-J+p2tgGX!9;F!AP;0Rp)@VbL{!V^`N+M``_2d+-xG#wlJ4z$Dx2#mN zE=TzKI#&w;lO=vw)GQJuYdTfTw_IF$8!Tp~vp{`kBZHypk8#5nwEFcA!N9zIFP2;s|ZIm{D@`<=P#sl?&Z8t=cpB>eC8$8{j@IMhjTXwllpb{K_jFZ-oayn2O znW1LS<(7UoU^@OxNPF6_1UJ2LawT*nsomdbgRS-^q_VVzQlA^a@FAWhI1U&4ODS^# zyF>uZe;dF$)cc4A)ggSpl=J8Qd>$Nr3;rB#IZyZ>FCdTcDfXZtg->RgsEkH+%U_=aBUn zKPWQpAcyEP=b(SnTdxZ_Nl~z@r5_*wq(_!DacUmAwz^33xv zio&($gSu<9SVNv_3?qbP~4mP$?+YeoO388j%R=L^6-{b~7k2zJ&zU zV_JYKq*VbL@oS&jorKkiHuG`lR$abW-JWb&DlB3Ff)n@Lo;U-JU(fR~0)YWalS>%W zn5om*e#C+`tj{j!`$`U7G~A(IFInTcSJD39PF@p^(P18FW&x@IGfr2` zZOZS%q=XyXRLBvXhgg>c1rTDcHgSyC_j~M^Jq0&@zPFXyDz-gwv``XLsiV07r4jXC z`~}?*%cg9!9&VeM6@nyL$A#S9S?SbyNHriGp(A}-SnK#{EGImE4dk{3*(l>CkESacdfr-JnHrf@cAf1{STW(w?~@Kt$WlDojiC%W(w7h+H7lrwS<<$B9X z{7xFCwT=FOVN2TDElM-ULdKN#t1$fa7*NVj3=IbX6Tfk8AJr$k5N2u>^%73!^yp`C zRrEV!Ms0jcEcUY~uya#VwfLiv(qYGb` z2^@YIfrm1H(%9pQ5-h_TalTOuOCh#v3sgkjxysG@GTZO!(_!e{TG>3^;2-}UJ+64O zwrW!e(VX=WsE8+d@U-#uA+PfUNGF7+*NUN`qE0+loO4ouMjwj9uwG<_F zR%<)n+6U{T;QKqFgBMW{_}kIzDy})G?qNC&Gs1}6$@3fWJ2Suv{PKl?RDl?ta~@_esTZ ztMXkWbq9Rp@Hr&A{nm6tgyBOPfM8vvtadLDWMv6pM@oIVa6ccwnQP;rf6ncr_0$YT zT{!A*^961sboD`g(c~MzLs`o9BzBPZwe0#9xn;PIG1W8~L2szCZd=%$E$bQI=JD#; zKjb{q>Au5g zj9TAT3e#4}FLHX<_+{+VH1z|OF*V0Hh+$?!e72D@vsg8LhT+Vxe&38%BX04u0qV2i znx<|wJf?%?;Tm?4Jr`L2P%0~(?v+cA@)xzPJ?1)zw~sOjo7%1mT1T+=_bMo)p&T*M zsZ=?GW|rt|7y6-$YHN}`XxQl!kNzlSR##?aZuSKXPm@t|G0sYk@Aa}uQqALm$|2LO zNsENtOJl4qZC+7MdjDW$qnXTV7i*js-aDC^!e?t6Ba=_oN0>hMf=Aw${Gi<#;Y^`= z=T5jIGAkvssZC>uj-{sx7uq278^A-^Wo$ z_6NIve`CiC%78f$h>`PUUWptCn7LVcpq?W*wS3Bv#7FQO0gCQiNSqHinxDGXZd(ec z0>eewxQK=KxHKn#^^`Z0(O(UT5!*_5a4jqQ9MNFGxZ+sm?2#m-?X54-*tR`1PK zJ$wa5=kf58S7yvRuvCn1phUlmx0&L&LYk?ap4}98sStu&jBg-XjdzZbg<0AhwT!;PcyG~F8`C*dsX*aS4T$O6Bb+W z<6?4}MRrfRaL%Cst*zm3>oM10RPK@bilEw9x6<#o<}!8S*umz&VKF-yMq(N9(QhC) zhJ_+`Ecj>Vv?;@G3;Gs-$z!&AN~`Az^BK1PEOxA|(MGH0TEm?ylBi15xle(E6*k8z zoT;7RRU~Mo_P*Th&SRm&Y7SGQ>=S|>Nl}mrlDXBjx%FZ|Sz3z=QOVRWDV-lyxbJ75 z!8ur!l74hy8UsP$N*27(s>c@qV`PUPu%t4irVC!rj++YR#-{szlZ#}ONe%M*YjdO8 zWql?Rhhdw?v15h!UWf-cY>xDHDa_m(0AmNh;V}-#Z?)kI+X=%q{4e3 zWvZ~6Wd6$>ne2Ajn)VtCT#w)U+NF11eUJoxeH_RwwBPc{Q4c$One!+=z{eaip%%2_ zT?apt?-F<7MLmn*^8{fpS)Eq^p0L91K8gg6O&i8{YQA)LS=^qOd6WBvC;ePvx^*8X z%uB+To2PVHXr-0!##6Qv!r0&jKL|XU(6h;Eli2yOvM_i~{TmpPB~xMnnJy;5G>gds zLDwUq;2HfZS~MZ=oBGB+{>BcgLnp6bkw~BE+W^d^_7UqL{DG>g`z{Viqc{9-lYBCt z??$~^Qrm6_bRrpQscE?}xwbXGrC5-9nSCb-{4Nufhcq0G)awoA5cP`Qkp zaAMfvmBc4R1|DZ8qs2~|7|z~5oyvlpN~_xv#)}`GRKQkQQM1JK=;_5@_V*vk_)xH7 zd_*||j;0vGxxY<}SCxz7I`;Lb9@WZ+ZghUNg3ZQ%+L$mC00KWdRQ6WQN!gU~=5J@u zuXZqNZJo(;^ia4IdJ>E7+j)%w?Vekide7Oek@4}jMJA_ee!%yL95@E#b zU%iQ>U{%IADYZ2wjE!>261nYM*gb@bJUWe!m{6cb^ih@>$4t8;^6EGMLVFiV3&QZ5 zyZcKUmt$96?E5#I3oy}Y3sti^AewZhH@aqU9*gU^ zzwxXrXhT#rBn@zGOE~E9BbV9?G{+#euEfnaQFKMgdw;RtO;+ye_=vj&A6SQa&rgY(#z~=ti=534A>vVDk}#p zaP*dr0#)IvT`Sl|nvHVLZ%0cE%+(9BCd8`v>IjNnQ`o>+`GeK>t35#57ZLTx;Ex)D zBLws2ALH2G*+A16$Vrs`tlLkFa^%Y%>IC0u<{V|rVg z_>6;gv96oek<%ZV0A9e&JWIAaJS5@@JL4h|OLdcrX>H;@_Fb|{ACHoxbE^g^4QHTH zIh(iTbCu=yJm&D|#z5?R{nGIYl8z*52O_G2!5c@SZlpBz2LHkgb?}pE{>bcS_R4I6n8R|eTTdan6f1xr~ zjkL%($$en2#jqK-yp3H+h2$Pf=iUL09A=5C;AB*Pb4V#U|MnF=xxTe#K8IX}(D@p$P6 zvc-n-j@!_>0U;Og#}$;(7~Lx++_aRVqCw&rheyr!6{Uary0&hP+V;CI8exnlM`!!7 zXyi`*CKC>5>90jzD=m4XsvdFJ%F1U8)i2kGis8w`ZbJHlH|=5$q)_@w|?QLdHeb-9v8e}2~>N|LH_8-8t*zax!k*A}-THf5m}t4BQJ%>mCZ_=Z0TU zqAxAauwdCYqH*Kt_s@Gcg#h%LCE-2MV2gX*A8Z$Mh+dkDu3pl3KkXVz2TBl^k1_i^ zIPqDDb*d$Xeu)hUxY0KFM8g<8lXz=IFNy|IN!w*O+cu)E+PjG}jr`hsMWxtjpMtdx z6TL8V3E?LT%LWYbD^pE8)R zg|{W%F<)=P*7b)70#(ox(PSIWilkJ!{U>E{D9-B~@1CDI2H-iS zn!7~kmm-$ecn|<{IhuA;KYmMwWFW0mJ4o)dVaP(Q!@eZvptq*IDuxRA!88Vce%PLyK;C68J>vlI*2Ifm$GH zT?6QKl{_6q%YOq-K(W7PP8E60dgg09DtCcBVee`*T4)DD4tYu&J7@6SE)Zjp>Gb4p zQA0!65uhYVJ<1SQn3veTO_Xy4Y_MO>(qLf{7wZ{HsPRNfyiXp415az$+u~#bhOs1I zYEZ(pYcwba0DwojfpilOAwVXjzKlo|I7D+MHEA6NWaRybXIy6Q`y6pdfO@CF?O|V*c^W;zca&Wlg^;?|_nnj(%cq7%zK6%B z6|;v%i}NG@kR|=0V0dQ431ynHcRGKW)M~u)J0J4_< zG4E+{TXN5jVDk#ukvE5FTC|U#BOfC5sS(WU>mA#SLXDpGDRmqXDTo*1Y`fXS z)I{c@F`KXfmaWr3dj%VNuPj^(3%^}dVYg%ES;A34xUQ=^_0MJNT=Y-1m+!M0auk?_rR)Kr2&dtLaPgPRYz@WQQ8zAIT zu|f5NVdQg*-ytHo_&-tSBwWMFAFlDC2_{sD9L_K*q*$c1k=wtI+^e zTnI)BH&m6Ww=W=t^1JAMBRVw|fPJ`(3d_hlmYfJP`P&FJ!!ff;x$@wJ={R2A6A%Sx zcEOR|GsQ==x>nuok`7%IFG2W&Ru#=DNca-9AgT>6taAA~Q9W0%6CePD-44)6Y_E*& zxw9*h3+}aL;;2eD!t1GEf+rAGjs4%H`nJqmYH;ut!{}h_n>jKj{l1`Qdri!afx?#l zMugmU&#auDEClgA4@0~5TrvnrxbJ@nbQQxAFXPqYm7QtOi?*O29W7x8MP(zgR1*hy$}`U8)ALI%7B+yP-1L4IS^& zZLoI2QNL4Jowka^>pX)#iL*@~6#PqWP)8j+>LxOe?usm6MJbO`mpYFOq-UqsG^5$Lr37V68&~0Ry7=q2z^-6*Y#F}^fz2#aUxKKr`vhIVq)`g# zGQGzj7>!+1w*fW$c)NX~@~Bb`5w5yXI8+ntN{&&w*#Xcv6X>J%Ze~6;(-Vb1?9-Sg z<~;4;)DWfy29Hw%^M8O1J0@L;|NRoa%bwCzFV43@FNH^9Ke*2fU5=^#t@|zc7OuI~ z3ac^#o2vVMIXpq~mU{|nfTDQa3Dh5=S!tnWWQFEt;y9CSUzp^g=P-DP^>Cnw+&{y=RA?rV_9+>ABeGwk(e<<#X24o_kcbD_?Dz1Vi{HRk=vd$1z$R zj>l-fOtC!kC74Cp?W!aP?4`S73fd1$+ zq2C!CAA7|@Q$aZw%0cNIo*GG={;cg+7S6-Auy!%I4J?T;noZM+(CVTklcTc`m~Lp7p1)TEP*Xyf0uqHkc(JDchc1;Xg%| zpSQ$94AS+AY^)nayEaxCTbl8N#asv7#6Dw-uBOAl!V?#Qbl3J%ZpBY|EujnXLW)m( zF4)xVN%IEbyP=8;(e9$~)q9PlQJ4ZHL$iBY|CAiMZwE3NBgzJ!Dadm4T#S93OCIJ= zgetg~G_$ryK<1{&HwO<_5_(yXyIYZ*h7+(39f!NNU3CW|MVymtQo@JV{a;HO7sIe zwv|2%v0x#IxR5;m^U`oOakov&%*`lp{>$yt+Aa08mHUI5+wwMK5`p}~jcC~el2|`` zfY2eU=r*A@EBeo$2z{)oI^dPy`66ohSEVws*DNIJwjD0q^*FTxem${?s!bzWpwXx& zkG6~YjW6_+E!W-)C`ChsO{RlJ&ZeJ78Q`ppNu+$vfxn~G%+Op7H?w>%pPK|A0w|c~hkVfWPh{o-Q;_C)RZf;Zcuo+LRx4OR|oY!*ixW>s#J+=B#J! z7uSeUPi)T}gm=)0i=zgkyHoV}#oLa=HzJizs^Q#-0SnZdEKHg4eoPKX> zz*x}5Z#w0F5Fslnith55Q39={Z14=6(qz2qqE}@M@?(;kySYyIcr`sP1P(ih%G66sOdR-ZWb$?G9ozE!otUZ7( zHSjxYKTbbTb6Ww=B?EEWirjT>&QS` zMj+H3#lm!qC0%fr($Z#CJQo&2edZL-{zTAJG8>Wu4=2h@aKEeJP_>=IG1y6&$jSb} zZuFTarVh`P4E6X)3K%N?gcW<9bwY)(TvEv~>H>h%n2)Ia+S=I4bZ$a}R9u>+2e@Y( zrlL==Em{dkHg5{}cn-_t8NG#M96_RxOGR6VS{Da9o{I1|Ap`KM>M`2)kW*3RGUy~s zC)MqYFfMi|usuHBe9WZ*^kU#;axirE0hB(H?TXQhieI7{zpFgJT~YMS)j8Rb8$Ox$ zatbA?3RMFPidGV1hJ@CIYmUNcsAJ1z;V7{{zGa6fc zV+6%`M6gt@M9(oT19xANniXx_sWyC}max(X@x`rjp?q6`zuqcxX`1jhv^sFkd$7@v@h_A9!GBM|=`%!f9v2~V9Tk09Y z_22nI4@SGTRV>U0y(#VKd3%B6pczRVNB~%h)>? zv0qr1F8s4;F|62cs9H&~auT6N?ehvm?4UqTi_2-qkGj+g`laW=P$xuG^Lt9}8GTSC zkxP=J?gK<=22Y&a4dhA~XX|VONAQngb`A3OuvE1mcb?3IPT_nt%&A)10)_8(umR^1 z^`+lDc|585v9!QlHQoTQkb-2qs^$5)T%}UCQ;Oj&%?Jxa1>Vu;Ps76_*{_c&W$6kK zd=z7*K+p)I3^)DHD9nUBxV(bX<<2)G>i)4CGAL7FMTL zoDL+CBsFU!X#Eb~bDkcyhn*SIBK!x~P(zh$vihZdKcD7|NVI`<8chT3%88a+ z4pVY0ADoV?^6B$SI0#n4fBcKU))yO0xEYMbq|i8m_<=CTl&&>BV%j=1(D`!Cd|@6k zaE6FaF&;=8qxS>LUjPf6IP@C}tM$0q?RTYzs(*L%;y4+de&m#zIK7FRU`qY(;Syn_ z&}cazzB=`@97aXtsT=OGYSenA?TNkb%xEn(njQ1+x1xU-r%`V%=-U1gM_1xOHFosV zj3GyH(8e};G!ph&gYd5tOgD>=LxP$zH%;KwGyD-F^B#x7@x!rHWnWL%IReSG3|V~s zhabBOduefs;~*eXEGA83W1*HN2}c#n1cl}X0XjooUQwj`>uev7l5kNscW@5}`Y4Tv zS^;t`S;USxHsK}eQ1g)amk7|k;uRl=dva$RR;sQ ze7-7ybYXTKslz4~7Z3X)T3=5lTst3lQ<%<+LTu`aCUAlX?GPS`^y7zAaJ_aTZ0-@q zBbY`}$Gyu*lc~Gc?)s}Q@>EUl`(Z0jAEu9c;180FlG z96~GX@+{I3QOYPBMlodzJ+;kLz?$O7TbZdL#bV8?ey&ds!gJIE3D&T|;N?LDo3Ca) zg&3W37EUo%h*J#l*&hY9qc>uRL|G>1T}09aA~=|$%64e@1k4v32^5wU=Au4k4W!N=X*gc4Pu&OIijl|8?y zsn18nZD>Xl%dCNu@ZPkUCSs!VjtM>7#kZH~%|LB-x|CeG@siCUHgINY47aszgi-LF zVBhaUW-+N>`d^64#pAvw&iHv5a2w}H6?4r)zplAkyPikyg@~fM;mwC34Y|r)0%}?# z#g>*u{y`s}2|la+YMfGFb(+MOYp>JgDhJKA3JXRRcVhD^!N?hddTDm!&vEhNP?SLeuikLwhT zaZ`EA6sOmD9+RR;N)>}1)rV5h^HyMlpIwIOPF?S@f2hPvbFg;*p8p!ekyhu|Op2rd z?9av4M1@YGxgA^c^YOSXF%f^YcA`XUn85-vSi}s}ORqR_{4>YxLx`)kv->(kt8jn+ zlllUVWIS?4FLJX;bO{bvjgjMudMrp}fE>J7x_{Oo{wD;p@!mJMY7@>!m~uKHXpu@|Rx)BN}WrZFatgZIa?4?c~2 z8%};Z)Bcjyq71y4Y$D~Fy(XX+kOq739#kTdGS0G@*fu+h-LsN4(tQvvp!S= zR*o_#`)YR<|<=Y83Lpc z#n?rf#A^7l)TGKu^glb|m;U|$zq8<6?XquuzXOt*$O^G}<%jzD7}Cw$<6AcGzB%H& z-2E;pLnNx^XxE|D0iVUyjsFAhhe|+s`AAsRP~Je+V-UM#YE!k7=2+*3cyMZuI&UKY zj|4DYNowz@mx^z0QTDDmfZUdFKK-GZ{3uB5+afO)dN)W2D#(in{X zQ+fZ>g@8C}te$$l?Y)jT{m}`XT}7( zKoc5W3o`W1ndG4DVHrkC40Yo+z!{wiPlr!88z6aGK7y`$Z>Rgs=ghqhWW<>H6@twe%9YbOfhMKLN}O?X>tV7jLj&!P)=n}(TS`;7-EX=Nl1 zhGUdvCHTuOHgjE$3N08hI37UfwD3I=R84LZ<0pZTz=&L93pTk@cu738PMhpc5-VWq zD{Ll=7WP53hV`N7@Z_lgZ2R<=l~gdrY#e5D$qKRFiIBa-b2lRTfl?908xE#^MR%)X zC42&3uZdhEyY)@CL|*(cs}{G1DLX{f8}ALbWGOeTiC4D)7c=!6AC-g0z^StQM1eKI z(p1s>M@cX|G;Vkpj;fs@{M7sF$+il`%n?DKZYd(;XBXLb+c;?$88jmahr#ChD2^*X zQ-s?Z{UOG-!zd^69HMXaYBX0Vm?mxgvFpHC^n?S*+~e7P-PdbO)Bq61v-frvd%L#N zEVOwE{w*5PYzV5|rAIA%b(q>fP{87e8AokwM93AFhv4R^Co(GOk5u=M>v0tp;~}|R zpmIl9+D^8oc1W!sdVL;8%~a7^K0P{5hOjp`TrX^}Dfvr=Op3Uo&sZP>|jxv?4^g*)g|M4cNnb9&P@A3T+W3~ zEo<7K`fu}INMXB%qNhDy3Ql%g$`2!S%jS~cWHrDJh4}o-ippPM@sVh22%W(rA|Np1 zVwtS-XS+$N#Y8*g^s0lWE9j#cDw8w6$aRlaC8LKB;<>QxgDv5k=AX9KM4GsiZZfEK zHM$Rf0lZeWUp0G~ zAHck0GfKCe@IbkiI3pYX57ZvQH9AhrPVfkkg5CB>nb_&+7`L@T$Aw$GTNX7*@o+PM zGnDHwXwbNq5f^dUpM1p5q}A;HC$*ckvrnKU`5M_vc-q{7+PG4?x-39C>h}BVwVIz7 zj!*kBccdG+Aq59eV-!d%Ub*sSEUeWYh!4vgjB5bu@ZTo}2**J2vVlZgW1ROg;)=s$ z;*<~c1s*C(&3gAugf4ygs5j&nO@9A-@EBP|^DiL$Dn zVT<^0j~I2tnrq5l!#xhvIuLxkw1mK}ZUI)*mMk}JE7EXo1Dfa-ruW?*_%EvKG?@J@ zXSRN$8#)_LViyD}@a%cerF$b^#8YjGTLPX9!;UfEGnclN6+1%hbLE|2MZ?gFaj^{G zPGLiR&H_D@R#zpw8F+76aYNl&C7@AH*l1s|1cdwcwrJur(j3q7kW4eH{iTB~8VsA@ zoL@;tTsMSa02Y{gv=}qxf56u@+wWp+jHvrCSZsSIj+8HtD6$Nnk9FuSA*g@;>KVn2KdnA{hMNBv$4%bWu+ZBsdrXOqnJ9cSL zB1|uIr`aqA$;C(x-AVT6YM(Av)lAE1!ZvasH_q=pcmnw)`N3Fj^kh?Pc)9&GcK3CX z@5@u;1(<5#6G6`r(9$Aj%1I-P{|I5`y@d=KxDf3jmQL(8tLY2N7rz5LENPh}m_;@b z#gPf)Ou1niX1i>(tR>AR$aOEDCq;oqZ%(a#Ira&tYdv_Rc8D6-qT}m9_JKHa&6l5G z4Lb!|p4vgAFUZmbbf2w4ZlLmkJP0o#3mkcRK-(g0k#Z&YSLeliQU_gZ{S?2)zGsIt zS8hAF$7z{P*()&**y?#wcS$A!*pIiq0i%lnYRpvwb~u;Rvx>2;-SK$!jM4H#kLW+i&;r2v%FlNVpHrl-k0Ff0_Lb58d;6=rI%yGE`# zV)ZmjWy=RE!>2~Tv5YeQCzd?bSt^Awt{LRs9|^20b(;4%5l`c_X&&#)J0Hr@b7_d}sxY-VP&7e#J(#p)*Fx z8%7Hf3n%1Kw5OyfK-!|dBnt*S)WjBvhhJ{GQjUl_{;mAF(v|Lw8h}(DC15r(v5Fpu zj1RH>nNHM@mx8P2VUh=__7Zo&DFg^{A%t&A$>#=uS;FAi1#aZB+2)1Cj(?DnV1Lb%e4>Fa6}Pb9~di| zd}DfzpoKoBJB;_M}pq%nZ^MuyA|YSUhe()kyZ)9P`}I zFL+%SW-L^JWEAYT!3m~t-=Zw)Z7c}Jcs#A)IY2NwK!*_`aVei1r#Bc%=7 zVPI9aV4}MSji=V$2ENfaaS}%svc!9!S~%@fuJ0_zjR08(-Xe1-QlI?b+mwS1(f|FE ztn15rH=R(xWp>ilUurl+o`O=lq@~Ej(;Fk~Sk`}Sc7n%xA41hmDm2h#mwX@3Y=?(7 z1>H0D4pZQjHSE*2WkRM-hNI?4ms2=`-T0vPPVqa&*UucSH;(mhEDrHGX|M<-d-?I_IH| z4$EP(Y$i=`e7?`!fv{3#>7wTB+q{R_0J%;H1sHEduiwc)5omQ$DAG1EXxXkqne}aR zCxgV{R!g7q!XSu7sN3zau@<2Q-O4WHsLM&88zGWAN3D^@ywkaQdr|$2Qo5#uj_@{f z6&%F5yolnYYj)gJ(D~AZc&OSD<%wqD!|Gyh#q{F@AO&FF!-^5ya%cJqG9eH9oW`2_>M*7> z0Hz&b1EvHZZ#b0rbbv)*S-pcfRH#M75Ye8Ybb|T!`8q#CE#<~Wp{DSDV{{W(1F1MN z3$!n3bXEnU7?PTj&itKzp=F3Muc$jqw_3}B3(+S7Ld_5!x_9Jk`Ehivuj4A_YtY8d zA_b)iw%SG{Q7XH|rl0_gV6?Oy$Qz8eE4NHJx?X1*vOpnh*+yk zV_^k6{IIA!XGB;>AF^%g*G_g_ihCgz9KE0qO2EY6O8 zfm*%7CX@$A{J9z{jqJ=K6@|~WJ?=d%+9Xy70IhTn@!Eg@ z37hA1L)Q)hjZ$!HKpZJ1TJydcUyo=P8U*P+odLfgbR2*P`Ajh!E%PxJImgk2j()nu~z(dXQK48T_OM3u$;)Kh#!GGaioXPPr3K zzXnE>naZ$`Ra?e4YALgImo4}wx{HI1(Ymk({f~o%7qfXO?6}b!O2;MqgL4K+6#AYY zSP=j<%usKHqA#v-0nYA=*p$t{7U$2Z3!Bc?Mr{q&J1&QJW5Tq^CNS2sc!>UX^Ogpg zU*JWm#b=@-m+Otd3E3`+UmUT!86#yBiY$p2G#hi7NvYdV=(WRgT^0C z9*tE$zWds&w`?#}X+2UE?2N}Q-QGzRdz8^C_eBICkP+8K(-)3bCyQz>MKz#(9NaNNWx-Of||cqUX0ND?*9f%Q94 ze*vSLVBP}Np==WaGkcUe46m->z#*1+n-n<&M*jnZuoNhqM63fQC|IF~YPJNk4TX#4 zV$6@fSu_I?li{`~BBQx%$M4R{!zN7R57pKf{#H2%~`88%WI;+xqNrHJKs z?aY$Y?4M%u74{?W@oF>ka608O2-mm*3TzF5-MDkx)YK zy-*@Hf6#jnGtoo zPlWJ_4lm6@x*=VliZ*?bp`jusBz_*4EG%2WpKV{8B6OTth2a#9OtWIz*aO3?XkaAm zrqe`KIMxC|f#lbs`h7gO(Og7Hi^VavJ&_PfRQ)f{!#GX_Es*PsXI4}pg$@?l|0e{F@7F)fj@|TFJ$MK*a+H^Z;k`Z9E3Wkg(J!@4; z-PkpL?#Kp3k4THR%R+n{XGvb4Y~zO|@aD5nQf?BK6Uxi<7Ij(5=jSBP`EIsf)bjjB zGe^@^=pmAtKFE3%nx(X+buq(( zQHo_ponoMwv47#W=Bkc-SUXlmTXL*=DmsK3%ZsN;w;sqpcS8*SkoN4Sw*$}NfqI!O z?h$O5nLkYCa(DV10#4ru4qgIw0(y~kXu`O|v($78PPo_$i=trJU`_aU>vU^*BVihV z1Rl|dA#x59&XH&;?@lVPR9Bqd?G8$ z%`N^c0xL&CSfZCoIA@T&||+o7#F@{X=r=YBYIhzYYt zh3UY2NrGZWg~1sY3)8N$Y+7a5`ZwNow)hn_z6VemWf1R*AWfvn=)SqwoAe%@xhBa9 z1b2+cGzlf4#)Laz_hmSMv)@z9k;g*;hu5bM3tic)sBnWFz zcaDd4mE%@v_~67FLw3F4U!ZmF!faFBA3)%?_|MrtK=T8=T>WOwn2th6>9de{M(d5` zZMR}QK4T|{PL>)fIw7L{Ss`Ay$UKKFvW%d%Eg>|4VLemSqLoI>NS9tO;OYn+(G^dv9V>5tt#@-8k z&u1E5#JRSG=!Umf=e6*Mo!qaXpYhcC?R5NE^rFKmgF~@ylrMqBiZ7BQl+oclbLad0g3%k823p!bm= z5KzrRSTOXo93Tmi>vmS&ZT|VXpZFXPJSa^N;8l0u7grJzdR_^<25BrnQvpyWkvy(S ziC;DyJVFn^%B^!GlwEm+F4*G!W4~L6bI`Es5>+^)6VGJgi*l{ppJPt!T_)0@!a@K^ zAEu=i9a(|cQ}<)77*g)sum;1|Kw}8N*}4PD*sGBOksSZwSUFssOXvTzcwu{`1OZ(U4iMKUPvz z2maV2dbGFipcxoG9SqV) zbXt5uK!>q^Ii-;QO-QCPzNz0Z{rCQ!ASkquBJ!&pOL<4+S}O#b4%~T%F%R7zz?T3Y zo+a4~f5w$N=sm)h8cR+ek4+uRAR=|;OM>ojE~lYnJPl)NfKjTu%RX=T$rh&sIr}1g zlq@rfTU7I(=tI#_{bPTkev29rSI4U6|3V9Hei5tBcB=noA2Io8q~t(?7V&stdA~3| zv*2y*6n%B>92Dgh%e-I}=$X;D@>}qsvHRB3GN_bO=^Sc}GvBEjOnoYQ$0VS4fPJ}w zU<7cPaK_TH!|u`%N%3^yqPG8%H%dAoZeQgiAf`RC+`4#kG=dOO1ox3M z&CPdyfj}GN)!@VW4TLD|E}c1DPc&S38kcEjm%n83%F31M@LHlwDv<*R5(LWxTy}88 zK&p|-$Lkl=BxwENZO#PW*~s;Ehd#A`3(fjaVPss+`XV8?L5-W6mxlx}`#ZvmJw z4bqB_oUxH=x;XHEj*$%K3!}t3w*X49KoP@7^OlzYiVEav7=J|`#A5!Mgkr()8uw%# zem`vWT>58FwNOuYz5(8Q=dGh~7$P zmDcZfuU+007(h1DCa8HJ)w#^C5Vr1d9>gP-;$e=XZA#6ZLy-5TrU>8*jZQ;t@#_56;Ga z6ifw{A?2pmJ_gaM7JlVDLa1Et14pXy|DXH7N*Q9p5bo)@?%vpRFJWejvtdQVpEU;T z%WxL|O^}A*IV`CJcK<3$J^({D`X)BC)64EhU}9^pmusfz(iaNIE$vxY{pl$2oJ>Tf z?l%a4RXn^@OE_GnBTSC%Ud6@5B=6`A{W^+Rr|p@lj@y@NdWK7)FtCGdPTR~bxxV3Y zu_Hbl)aCuglEy{M2O%NrpB-Hv)uqPI_oVA)q6XX9|Ey019^DD?!TM|0*CrbTlC4NCwT7OCVl6`0oka+qGxd0>e?@|N()NpPOT2B-8(3TA>lzTW3iOe@amiS%~bA(iH) zbs!&VBJPe=PvaWZ2t-}E^CKt%*!$L9zu%MtPfu)_hO1Cvq(A!GF4xnJ+ ziM9Mt@4MEpT7$Yp%k15?{e}AYOqUmK@6u87^(q1*^3A{`CTTXM;A(u~R{;t|5Hlk+ z!EZUqKp?NAPFUVU5e+Sd+CQx@r3WsJ$JRPYZS>D?49ON68qgDV{T?KS(!JF$hCm3; zh0X?Zf^lw~2-Q>8h!fYbSmwcq2=tGU`rQ~r7XQYkyu>_*;RA@78^0L>pubY3W2?wY z3yv75rLsq#@e#=7Em@r%7&O(#`kNTIXG{AnzwCYvcd6kXnP_cDO%rbPyb+rKnC5AP z(i3&_?_j1)d2Vi$f3lW?-O`I|{zkm26sQ4{hNkj3shv;P!JF#hxVp~fzqR(={)rks zozbtcP3Ll}&bt+w9lZAe;zP@ID<)m0BR`rRo5xWaYi@ZD@RO=Y6oD#T#=R;EZoWOj zaV%dsJka%oLqm~z1uerBE-jtEW*B{%4};PD!aqXuNH1hrfqWg%ZI;9Ytrqt;-;MbY zyEy0W#m|=_L7vTQp~6Nm1Wtp=<=Q^qC&!5k-)R4DQCZ%(KNYLi@)uVK_&rM{(21II z1wb3aT9d^>=NZgsraL26^&v%QQb^eXA#uOt4z-kFSmn>>GS|yzns=1kB;|Ct*h<@= zOny|q?j1=Mam4fy4RGA8CJAX7U*&p=cl+vx2_Gr zNgZ=3q+46o7NJS$?Qv9E^J~Np=_ZW~mmZP@cI~Mm;EeR$a06l$1$i65>_F zF**uznCnNX$e9Rvh{lbNXKKAA_SBg|QJ;SZg5Nm|D*r1Ds0dSNBl{&fcaI$#*SCA9 zvH$dRH1k&w5H{u&ksumZ_30$IEN@*opdohMrB1fn`XM84$u5xKa=gk!(#L0uQ8x_J zzVB(RSJxt%*&K&ziu1m)PYuf_re+&7GHcPuJw~STM}Oe%m>GE`A5-(0l4Mo~ayXJt;T0@Znoem5-1{R0gmRQ=&}v=t+c`0)| zYzOMFi3W{(YC`DzGN`g1jr4KSBE=k5LDL7I?a{})_|ReTqxHXAR;vu|7fe$(z0MZ| zRCD-xjUI*c8+VTXWU)7vQZQmmf~X7}F+Cw5JuHQ&v{PHxM>BLy=(ZcbTg!~(@lqcK zRU3tWo%3f^{IRCA#>e0zu6TfldXfbuJIk3tGn|81g)b^Q#S5j31-%6vdeAKThP2}; z@6G0v#b{>{;ToIyQC`>8MvA1_<>g?q&_)hW!K1-#a>Vf-L;9moMbs2`$sn(oCBGkA zNRe5zUZ!wZrEGsYVes<=zh0V7%tn4+(vGujY6hiYzer?y#m8pkE&%W$(am^n-={7A zEJXr~F-}rt4I?WqfiiY8>gdSRb`lJ-Tdp)r#UGGih zAsNk=OU7;Y{Nx0aHOT8sx|Y2$v0*^Xs8Rl|o7*9LpFb7z=-a4cUiy%EL$+b;>w7vZ zGsR`*tVMB*SQY=3EMCuXo0)XUb0W@ZTxF23)c_33cXgj_8*?`bf8S8GfWaLdWgk+iW@5P$XYTU&zva?QxJ$=bPX zgEhc@tMe;LprgdXfeO&7YwBT2?HDV_V-*CkzPZ8Da$ANLd!;Bhvp{d-Q->{6N&u@c zicQ3IUSzuWld!f~|dph+8J1wU~A=@0SV8`R`o8 zpb`dV7%>Jn=wUr705ED0*?!r2@YcJp@HC_B-MY^qf=y3wcMepNy#AH`?|l0BOyw7E z*&ok{%0o&1w1`nH#sqV04slP1s-3zct&gXCFT-in&uz~Av%TU-T0cYxfG$DzNb|?4 z5*sqlkzUqauS4A)L_CxGR@>As=Unz(uGzmAm4sT$HjBb#SE|Ptnkr97-IIWMh28za z#JtkrTR;zI(F_Ep4!>JV>Ap5h)tW#+@~1*N*Md^#6KR;u*yXZopZhwCK%* zUd@H;X1!QKr2ihQp&$mHJg=Ryu(&Fuj%D8YkOI)3C+l;&y32*uKaSg|(a8}h;?Z(( z3gVDT)0fI~C=FsK(KUC~r*T`$`NGZB4LgBCKG>=ch`(4~a?iYS+EOLsYj!V8?y@$~ zy8&3*OC(aqmnXgz^4?GHfyTnXyw?_90GvpwtGlRD(J63b!m}3<+6Htr{stXFd{2W; z5v_@Jk?f_(%q1*cR5fV8)J}ud0VPaC#H&PNF)MQaHA`J6zY3FR;A32nRn(V5!+S9o zKPa;JejnF9px@*b4+^Q>gFs1s`2;UrDpQ)Up7T^CaK$SP&}W&khb0FwBar30886yJ z-7+yYWIT~Z5vf|zsYrD>;g(n|;}`}v%{;cxuX$oHv?9<`aaxXk56&`8S$xnNFR>hY z5*&i4sSh@<_Np^?EntH>=FNJrj8hW2Dvle((ZhU;=h z;P?Nem9$O!u=9i5LT-7F6z3Q4>+lG~W>&W9J@_fx-=2}5$Q5D$S4O%3COLfRT?F_c za8Jk8_W~16M}%V@&>?0ODs;E>ag;pD-7M!xfzZ7C!94id@I2}TxejxibQd@#{>y+& zWQ18sD<>;DWh$mz`*by$;Afb-Z^fbu()sgy&-Va8*pNMsTUzZWQTev_d{o^H(b>5W z2~i$|m_>3M*jM(_$}>o=e3seihz}n3wo1DWp7s^h85wxNnAC%8t_wb?hR|Bm7516) zg$S=t%z9a4iX^Y#*^ex7Y$SrEp}PMvMaS4>LAk&#y8RmfIk_33{eb%|+q<0r$SHmE z)klsZ`ihpm>7l@NhUs>(G8gaTaw|_*{-l3vYsb5g5~m?=NR6%8(eN6;J2vYP!?_sp zV`o92oQn9FZXx_LXdeCt04 z{RSMj+OgvJo$Cq4vbb{UjHyyFr&nOrM;0w9nVRF@V-vC2?J*|qU^9}lW&Ih7M!V!l z_;+EmLTFhrj=;QWHogF5EH}XHfy}bNb}=(CiNL)e+ct_>wfHkhq3qUwRAMUwWJ&2Y z*YQyIvnt%c)`vr0fUE#o!jk}zGqN6^_Clk2C)-&I2}`PM<<4*I)(h26^ev08?ihgP zqHDq5mb;d>xNn8I6`P5jS~&v|h;^ikPZ-U~1&bvV>h!c?)C=Lge)1NPyq1?Vod{mk zNMO*u9iIU){@d9k@JL*Mxw!>N7S+?6o0>Vd{AxS6V468Je+~+%HZW@a<;*VEvWY^=?b3GHR;do{y z5wL7q^#djS@iy=OBvehvA&x8X&gXMPy(34zuLP@Q^q~hzD7930P8HPTI9?(sMuUyi z+m$IT>v2m`0I6|TFUJp%#akkA^IElA2Dl!>Q9y9=kx*X}O&`Q~Sp?XQOzr)zO4R3^ ziX49GXu$MBIcz|nf>()BPU+=lAtsv5Tx(3UmvaS^4rIHiK3Y5bef*9RngCruqQ8LA zzOx#hmmtnda~QNIgE;`YK`NfQ4Jgv3#7VH8ei3xXE^~zzX-1VUB4|;T8X&rcYsIGin%JcQMD-o?n49fBp7oKYevq6iy+zzzkVcD0H^feoCDbEfIJMK z#pEm^uegWoOXlDSb4hnq3h4aM59RbjVOuiGJy zC)?3hD8zI*i|_dg`>Dk&@i!JsJ{}FH1))MX8|9rG*gi#n!V!l0<=E%cWk1)39m9}W zC_qT)`XQ6prN%#Am`2=}mB-k~HfAgyhP7ao5K;!-6>W#88B-iwU_5>QlSM-tL{*lD zJp|(X6~Hg;&Kp-WsLSy!VwEKX_tUWPVhGb24gN2j_L|JpX#6?D;6f7;SZ_Gj&1IS&ARDq9B$?srOIC-bS`wQcL0XqC@4hqrhP{Y10m_UQ^OB~7kb z{W1L=g(eMUl2ix>;t9_7B2-+*nwC*`zatPE2x@(kS&&`GS$H!Kn233@#yV0@MNzq3 z?}eE_tkv~rO;Giw} zU-l7X`L9W<%H(`*fW6FZH{CgHni63mkovJim7Qwdjhg@Dflrv*w>iSVQp;m15y7-y ziRO8j^{^NFnDCsOwcf&Fv27IxOiYoPsvSfp(x=zb^`1|e7$0o9le=V7hh0&68A+LkNQz2}D%eg|FvK1NvI zo?FETuZgVmz4J7pyx7VxgO9#BDcRRaL@3Hiz2c0!qVu9Rr)4a718l?aO4Pojr!8o|3phHeyPu@@Y-=#;K{Aa3d#eIl^R zBlbi0Nx{mLadXix@Q`qqD($qa(oY?wt*N^S@{v!;QlWZfEMr895E0F*R?4($O%-!i z$ZCacpM$cbSK*=^b*GF@xkk7;1-DQuubhiJw>|>Es;_PZUJ`_>}dhA!K|3p z8i-6nmto$FgSDN;x)g+eG)|#xB2m@~+3h1h&-$q~@=Oc&>G^XoFC{rh?XrxWh4S1c z`Jk|wSv5wII1=Frq^S*qw0q?S&{%Bv^s$1aWGA;eUWdxI|PxcGL)Hb2ESefK;!a> zSKcw%4lt6s2V)Nzy=fZ-*%hm~=)O_382q9bIK*-;OK9m?MU<(>LF}Qh%}u6PXm)XkF7takZ`u=HEeG+NpI^NI~8t~$@}ev7+-u&g`lM@q~o_825eLAyc|wsK_x2YyPuywG4gZN zQ^xYjO(=8#pdG)XFp=xk7Vw`3W*D-z6jiUzmwARW`yu{|nH4v|03@>TC@@iRu z&Jw^Mp{G^a^;&onlo^H$lhZbA-Ol4gynG?eq^`WEG>tU!Ak6hn1;G1*kTzOFZFP~f zdFC~xCJQ>0oS0?+3*;sQe?R2nJB`RfZvl~##4v~*oI#8OU|vtAu7?x6Pn(E8q4>tCCbr&idgl|+cdsNI)wC%WLWCcF2OsW!sT|n9m8x|9aw_1 z$4+1YoATjs^-Jv3e;a%kLp|hK&b}fKceiDY&y~}&CuvAg4^*{JAO0LJ4}S$tZ{_jb zHF94bcOSFEx;-0wZV^*vE^HTi3>&)O#NCgVa=w;!DB!>!>kE1ZYtdaicX|4Z`BD)| zx6ZqT3JTe~6rncryy(i9Q2I?PqKS29f-Ar7 zbewdCkz!9oO)2T<-aEC6R!UWg~BGzWmY6NINgb^JbuFBZntb2+m@7 zv;fKt6c5k@6?EC{zMAczChwpJHxt*3Uo_|yjhc|%6Q?u+@MTzsCU|UGy?fOL-aFRG z;z~UO@|@8_Ns?Gq@&>79)CF{f>O!JPa-lFIuB2Trr)h6Q<&2h-J&S4s&>RzK>HIv* zUxXCo%+k1S9)70aD?-qeU(r0X>ktVMDJ%a!&Bj}C5v=gbPDArmYtoK{LC$8cY@dMF z_-V!Im6Kx5tl=-kyFHcEDV8luw2!OIwnn>GHGys(ZIyBPr_`>-ap750R6+dZ`As>@+eezc|u=DQb99lhrDfUkR$X*tdHZ#yQN zjt8ebZXMK7U{9| zyu5p-$l%47sf@@?ZkVPI-0 z3$Qk6RNqBJGf9Vmj#9bZM5O)<$%|09r6eX3VTd2_-@#xgxV-dEs4Fv7#P;k?cpAO; z!69#0<+^0c{$ay1sB%Q%Pi1f;@j6yDPCti6MJZo03nJxG%yXSx0VtFpq#r7L`LexT zxIejXiPm*ed-+17C9V*sYBWK-Q=iwA`J)p*+FsCu{AW8zRWiprh=u9LX4rIP5He7K zNRPGj5TjN%1IXM?=9ENk9q|nA$&XsiKsnBVZHYjbZ-5cGt<7)eS|#>?;#%&Kj#Kql z7O-5wS7D%ZU4Il6mnLf0Ps6PHg1i3Bngu8kwWdRdtr~h&DAvQgzbKP>mqx*bY(TOi zHl~E>M+Z%LZv1TfWpEt}Tu!2Z+W^M?!YNSt4BR z8?f5q`49t)7KuNg7LRe(s8Aj8@aov|vFr)%{TW5>NSaaAisH$V!3rm4t(fbmUANL9 z_b)X#+tI2)7Xfs@&~0J~g1CVVof!F3tF>~XWb?0yecWDYm0{Sr_mNRDJY+$P5Up5+ z!q=$6xxzwyIUdCpuP+IjWeasxlhUaPaN;3f=o+n-yu8xI?L`$#f>^-hf=AV#P}Qd} zX{HzJpITIJ@wuAJ&_0ynDcgmNL<=?$zQZa@J-ZfIMgq9nBlZXGpXoM91}ZX3Dpi(C z&A?WCwrYprG$JRoy}7Lm)D~;*I^;b>Vg~8P_oS1@FWqzX3v@nCDO;Z^FTv^3#LB!e z@!aL5TYRn0>tmii9=j%k;l`9|uyvB#D;sN3e*C-dkm6+~UJ{tRwkT}?5g)DlkNMdP zEe9OHevWtEj3X0Ikb|^$%!(0`nr8qxy?5+%Fh#csKpH2?pjkuz9#S(4$~^HJPmB0C`g_~VYzqnUC=aFDLK_Yh&d|sET@QSh`4G*fua;0G+L7?^l$iMx zaRj_aBDIYjdTcYC^!O<1A+vrwFkDb>vc548Og|&CN^Sa)1}|p%ahd(z6j^W{%kpm- zy)-a5wJZ0hW*MSD*#zrt&)^aFt{MQ{f&n{8@6#k*U{5OTGod8mFA|Ru-oR4av`!-U zx*zOAB^=$O?gf5f_8(5V)0~Z{E`0M;$jo7@Qkh_<$RE= z(HG>K$p7RhDwU@Q1bR%(QmzBB={Ov5C7)_!Bf@Cw1!}Ek+yqKUv7nF})o^64#1ElQ7g-)Cl5Qw`%Mw>2eC-_OR*`pEalY zIZQaHzpi83sYjOj^fY{w{tdK-wA-H0qE_J8lNj4NlJSWalMipjo``nGV&?-|7Jsfq zP}(3D(!eV@7CP<3<)|BCzpg5^+Oh+(!p1n=xN!GI%7>N>uyu!!mS7h^&uC9~8T{YT z^OLa)2oqqM3){9OQwInuH4`RM?Fnd~*!A}HL2@ZyWdY8`7;b0=lQ&FgI|*=tJA+Xz ztIpj20REt4$%(Z@T9&A&nY_%MVC(BtN{jVu@ z`zet%D+3mmD=btTtc&0CBA##0L!7G{R9GX;uyJEqe&^0!^!Aw@Mb|T%MJ$+7{HO$) z2ePsDbiS;%-YmT`jJqy=>x%0jv){`?Ef96OED2pcprmK#!=$qSusXw7X1$_FJkmU9 z38a9**Tj0F-$`C!SYs!bL#9;%KrYc=ym zTnkNW$?nP$$@|by7qWd-EZ4*XcgCU`;)s)|-H~G@_g3ows2f>s;GXJ;VX5uMK9Mrm z(z}gl_ir&EfbuLk&_i>huOS9NgNv1vn0`#iOdkLMN)eEa{0}{To&dor zWluGJx9Iu3Ed0~JT{a%p<@DSpv+ksI_x6l|Qo^Lbl7^2( z>Uilv)F+afO3Y8+r$T4R{ELd9>k|1**&j)KY+NK>7YubzrarD>J34d%n}XtNB@h7K zCe%HvL`ve#xmEa;$&!=Z)twnF3TP(>|G!$2&)OnfFcnjI2v}XqKbLd>F1Ko?)aW%6 zlNVX<$D2!d@&+cxuxr@vrZ$DmYR`LfR)TjSy))Spc7k>INDZ^6T4Yu%L7BB*gL1;M zsb@;*)3Zn`w*%!u`dj1ED8!@(&vL}e3tgAz6Iy~np=0jVFQk&Cl zSf&a5amm@7w}CLiE$K)*)3F7okP30h5^^Un{>5@_7+|lDjk0jm`m@-Rz=Q`m@*q%1 zsVWRh0Gy3*B%2U)kO4j>zEEGoPe~m@{-I+uP64H zAP~jSdmQ2-zXv^$T4cMOtV)_$BoZaVuD(@yvkZn7!(yr4D{kyq%%WN0iA4Tz>d#{ytzr6Eph`6^|3{cX6)ThCdG)~InBJFIlN|lB zcBGf4%``0JD9%f0k(XA!Q>RC|@=jrcUcT5qdf-A?wmjeaD{Fwx<6V|GM-37M`Qt3r zt{H?vigbW!s$vOraPB|%y8DNWGvn)NwndH>(JV&}Rh}OAC^u;TKh6q*nytqe28Ccj z;*~1+__!2y;@jr@pQ)&I^{(Z*0=%)T4Qeh59>#6F@)5nlUo+WLp3W`J>?;`9J}gJ+ms zT5a$bAv+&2_Xy{Pk(WUML?<-eRHP|JIE1Q4+n34;a&Reqy(Wpk_=7=ZMuG1FPM@`C zpACx!WyD&(4rQ!rWTb@vt9Q@jJ`5w7B79E=j|j3_XQja=sKJ3-beqX+h3>tZaB4!a zuVjbWT1t{yq8HD1feMu==nZvV9GLi-602{WwDvjTlAA~bm)xS%0f>*}grVx3rH>v6 z5C}@(iQ_Q~5^FIi+_v-5tHWBW*JELQ37F0q3Rg`AvldV2Ci%FB?7v8Ft#c7KSzePQ zc4@kceC&Rg1FCz^y-Moy3H*nBst}&_36tXaar&q86crqpzxhQh&Sg8M=mZ0~$|K!&Y~j@OHmEAZO( z!Vl^xX;;8N0aB9j4FQbrR~teyvr!nu%@6YUf{ko|%To2Ix$-3q#*jv}?7u(V_!|$= z3AE^;i$sF81+Klk-Qb!A{D(f{DcM><#yfdpLa6M zMrBr5ya2v!cI~_A=O3OyRtol3BiT=;$=r-H=0i##ZrlFAh9R~2SP|YUxLj(4BkWtCh#91Fz@J@?8V;LG$E+5(j-=cu$3KIzBpOw z2N{R%$5j8)@i!mgeZVOSL6s2mbl~xH&2E_d7d5 zE#>ofZgSFhqe^+D%F$ToOnUqSHDjIq9o?*5`3*|j<9G3K)byM(IzwB}ZKS{hnsac+ zyj2J8m2dbV#)C^tALhn61rwRWi9P1+fNW{tT=>Yi{_W+N(0TVGOJ1C$N4h8B8^_B<>-$!LqS{9W}Yv>KiAv zdBjkS$N(ME>fFozcEBc=I{iZEl3*x&K5TEt0F;! z7-G{k;B6e<0XbSUoIs-+z3(j(D&K#BmoNwqmbzzagt$fo1UQgNa+}tm^A9ONoG3{( ztr^HOH1nsE-7Y};nnTL0E^mRYh%#{4h{=^?D%H!4^JqMxGO!p zFc|HN=R|k0Zv#ndj6z-y;yabo-`PA=YebJ_t8syg>0vaO4D|k-24SsI=F^3=LFs}z{p!%P?7w_d`dV>6pc)2VL~1_xWb_3a&C?SC@1#%2%OJ@H-yJW7xr<$3g(X;E!=;zBrlnKuw0x>U}$cMae{gE?UY6dlao_quPhN z@xDpz8I~Hu^#1`-A02iNOC6e>CXKt(=uh4MS=e`Y_>!rOpOo5@x%9vHyC<@_Wo|5N zaMFGh3l+jJ%;Z~hizGi z-cTeE=Pa4r<)hTF>~hCP{I-PU&!CxMO2QOZ4$5v3#+TC8KLUehaiNi2q-lxG`D{l4 zGv+CAFK`fq1Ky(@p}gUcYnj7}WS~{f#C!8V)XFDjlmso^qTjpP=F=Cqbz~87T7T%}w;!D!4p&z}Q zuG>V>im}ztldU9U1+d~EYZE`MPN=$`MG3(L5Q}!{uM5`LQ9J{jWxR4V#8J< z+^tvVkyQ<+Zzg~j_5{N3=xSy-HXN6<^4Oju4vlon;`EG|$e08TttdX6wnZ z3u_zf!Sp^ba*?i>>AN0{URc{m1?M@IHEx84bbn=l2{I3&*Nk{KG4uyb3$I?4p24io z03<|23&Wu>&+S$} zX_zMHwk-^0{GMW6L^WApbpBikDQXJE5H_)wrZL6<)z8yRxLr5@K{QgjFPO13IN3jq z4ZDORQqJeSFtbu>{e6w)Eghto@bwiHJgnS|0v*PqizX@n#M}xl##W}|fiBVnR@=n^ety2%=e!twwW33(Lj`vT9h34-_wH|z!-(K?uhycTd}-8-$tiV*7q0xmEe=mCyP*0 z270Q-g`Hp1)y)d|k}8*i;S57pTKjzi9UAs1(Z3sz9)wwMZrKusA|{ zFy{lR_(Du0iXj8#73l`%WK;%@vD8Dxl#xCEcj-DwK&&1kD@AU;Pcl1jX@PUY6w0t{ z6k|h=Ri?xDl_X!)5rSTMgyi1EEU&a1ed47xBxi5TPkpEVC~0&9x++Y~L!}^DUqX9- z+tga=_d5w$TC@2RC8r%C0-eQI-g~?91pRr;T=udwPbc`r4!uz#8+HsQwJeAIUXlk% z8(}abofn=h`YlpQHt$KI6Q+O9EhOjhau+~7B+iCOy5i#mgqoe}H$wsa?BSXXqoK+B z6wEjf&koo~!*AZg0WHf*0dPa(5qPM^HCqbkpvpSl7kUzyjw`$Q?qSG}5oPLIGe}a#(2Jf|G_(MAb=i`> zxB;Z=bgHDWC&;0nfV7Au-Z|$^?siq9*@siH46P#X4S!<6SP8)uFa83-loe|EUJm_x zf?#f9GZ@Y72jtIm>AA6#nk19|evq?~0gSG3@+eJk)0fh*=3E``T`Q*7RwLz$#m&tJ5JAc#Q>^R+X>Aogdn(G#AqDjBXLAAOq+Q zngk}I;@zaLng@E9N`1-|gdQm`gxzX=L2Q$GP6*4EG%P5z169!dedq*vXm zhbT9B$RINy(9bPtjP#-b72)er%G@zD3m$9SH}l8ha*3Ysl?bjGbG3LNpN6e`dG5Jp z=Mk?)lUUyu&?#5z1h~oEkgn>rSw%>chIjU2*!l>n{*DQBPhaB0*^Jx?F_>N_&x|N= zGd-a3XN80&V_f^U@k$%Rs&5MAFz(hc!?%IoNTm;~(s7tLDXNR<>OKH8@*R>h-TIBl zFTnQP>RBEoD$)&dat;A1#G6HyimB#jPTY%n_xqnb=1`GU+8dFcHrnvj2APa7IpeYn^DP<8>B10B=-K zJB;?7WHGA@8+~Y!_8OERs>n%4jM>eP;SdAq59g&^UFJ7wl%Pd`>A^Rtd7m$`u7li_ z*l=Tw=ba?aqMA@C;<8MtQMySKm58T#4uQ+qh4G}-KG^K0xCM~RyT*!TOulsVaT;&# z+g0sZN6%sP$p{(LZ2)kSZ?R4j&8f9zjp07rX(0~~T-;PH2Yy^l?lL*L>ya}G_%!4> zA|>2Ix@dFoNttVh5U((2Q3l1a1}xT!N?&$Q3V%7P6$9z6`o0~b)M~NfDjONu=?s1i zp=Q?gpJ%KQ2<(1sczn+WI`Ig2KJZKXxZl~E*I(-|qZpEuEmV?S`Oyt~4#gR(t!9?i_;F$v zcJn*Y^XNmkU(TTDZA_zRG|6sNubU_Juv&utj&tAyuf2^u8*%^e2-#{>GW`1MV4aZ; z4T7mfYNfC@TIw|PK`XDQE<{Zk3!@)Vdef<<>d2Y!UqsA;Un{f=)$>PS9|R29$O6nY z{=ss}E!+eDR&7xhbYdzN@}QZ9GEb(SnzYg)UXBC}wAAt^>`f5&G%g9k=$X;Jp4AWz zcN}PCHa8j^)(Bfr9}6}iPb`=OPkb7O4-n{AI5KmiMG%iA^1Pwk@W}vwGtAa&TCxos z`j&ey!;gYs1pD7I6xVk5Fdl{w0vJ_@28HrgttuuSD6)x~(Fr?{R2EdMGr*R?#C$f# zB%V+XOU)M!RT?thfuPREU&J#j5;j2-tTNYuMuM+ayj~4dw7EieG~4RdqMw~yc;b0z z)jUy8H=;2hWNhnFqlQxPaCXvH_!_MHA=H30No5Nki^3?IWNyq41XF-wHx)t;*&<1) ztf3Y4-6O*ViVNt4&|EhIu1H{K|FIdSPhgt^#hxJqyKSF*;g}ey1VGpbepdhV|m)UaD5QHviWcktWE_%QydURJ9(*HW6B%T>P6z zYd}v9EspOo6es^3zs1|&vo;Aj(6`rz%Y`m!VJiy~rWL!R#|WwfN5->F#H3Q9H#p}o z;gCJom>sC^{h`Au&t@31JJm<8ubu%dro7BRX-8VWm*Sm~_Z?Xh(*?oLGV+8L3%W#{RKO`?7 zgxxr6`2uS3*vieUb1kJNQQ40IRO!xZ15wnA!ebcbQPg(u5TYUT2wJGqSy^?<`3`dB^7j(WKCr$S`9Q(yv1WHHblRaQ9SZbdXz)kL54U1#zqgzon`+m<(XpL-wk!$#%te+>HFI6|bd_`|dI3Ct zmz1YkDU@`7b<<7YwUenIyilNl1S{iYZ;?MV3r&NLC;sGYv3z%MQu zCxEA0FvuFT1&_2PL=|0B$BW?U;$I^BBYuT1kX~bz6DT8qNgsEFB0tQM3lV^#mh5U= zJpr}(Ofdie000000f*TCwWH-~`Q9ZN69osl;LCji@pzz3>S)PRD3c*5vy`}d|9M>4 z`d(_3e#fJ1KVVa!U&aNVDx}bT^A`y~z{X&g@ZvBh67|6faf+Pege%-QxDlt7m6fmG zwfBxk;(w})ilAlq(-N8sj;kpq{$PWBIwh3P650o@0}X(THIyWJScMP6O0PC?Bb7h^ z0000016qqBes^-%<2gvhdV!qw!JLhiXU3z!gHQHnd^**yVtFXQUY#c!sep-vW!xPP z>#dM34(h*1dVyJ*pB*BzT6H}I$+|RDUweF?*6Zuq@59G;OqcDyuT74uYkqss} zpc|`AZY}0kb2dE>(E+*LcFxu%O_lK()P5UnR95_{K*J9g9ox1(TwrFM8*vNz7NNa} z%(9Xyj9gh_HQ#P$V0>4f*y5e*C%u8Fm}lZJKS9gCr(4u0$nQsgv`w!mz|w* zaPN%jep>*eRK&0ZWu*BFV+Qus32ZqSD5~_<4t#W3M4JQwLb#E*&mxiUsI?z4&74!> zYP!A?NG%u!&++oHJ>q2im515==_TST5-DV)RZMu>!$XcUgt>r91gqQ}11#Gx%@)M_{ND!Pw4lG)`1odY6NmaTYrA7G*NeBhn& z11y^OB@U)64Zk$HT7e5ZR%-tuo}cAh;`tA)#$*2i|5sR=4(COR=ETtuHpOk z+2zolI#eJ!xl^42&@xrsv1l+$zmagS{jBv0xEbXXmzwh)oqG7g)*%Lw{O9fU(BM3} z>VK7N&6V1$QInKUU70cfHpjnoj6&%8uI#{OIR{8NtJLpd2kf)q>?7vUJflwM>LzJP z)93WM(u093Z1gA&S+X@)({6R8ZoAUAYy%BGXY{Gpqx0mf<TcKHHK1ef$eIKgS%=!?@<3ML&P0d+KtEP9J@Clu27Nrz@py&? ztHaNZJ3_Zet%j6`g)p(_ap*T;nZ!wn{6y-^FQ{>0lVJre(adT7?1UQwm_mg`T@RNh z29b1^uR|m8@BNW(gS&_Zrc0*S>Mn&E(cA`mMVR;@2*JodDLmA~Oh9z+L9Em8e z-I1i`zsI!fTKG<~j@2|@91K>Sc*geW@Uj&o5QG)CWf&LsO{4Yu$<7kA{GzJU0bcr@ zl@g{erPmTG*j_7Q8SP%1s8bPbZrw-|ZNtq+V9+)2$grySz3hOW=TK%kGg&tUds_aC zR-GZM>2Koa{sHJrMY6+(ilq>~#+tMx&t2pE_8L6mcSTT>1*(S=s}aea@Y0GJ_e5E#fHwBsAoJ`Bc8Nzt+w&NtLNPM z%x1F+c+$iwHIhSrM!l_RNa{zor<+eQB}K!Iq-U>()0KEW_B5z{wyUec;VE8d-X~B8 zX?|edKT6G8i8#&{t3RO>ACZWnT6o1)rt@u2=bciZK5%WRYeC^Q)Vs1`_l4SKVtCUc z^Rw|+S~_9mws`O-uI|?y>`>^sOdO#3bzQ+%)qq9Yiu1t*Qg`oZoSU#Ap1q zxg+jc8P|*0HmwKuksqL%6jj!UqVORdK?pKPl%QHhy}oHrm&q8Az&LXw|D?>i!M4(A z`y+zY!-HjhuS)`wS4GYR&p1R&f%_`qIcW;1+A-^hsHzC@PY0ez8Ne#bBgu6Yj?`D* zRz{`+RtP{v^T3d9P&X9nE*JJ-&i>tSDi(OXs4ApkPF(?3l@IDt3sCHj2L3=YHt-zK zJ&va&s#&~PMC4;`HkEEU_@|-PxuZtQ&uB~n+#7w=Ob( zW=(}tftZ}VYzfEJPt}6#in?<;#L%LXWd8b;X@ezdr$Y=I@tfOKF>A+Zgp5Kt??T|7 z&u(QqN~cizqLE{-l6z^{e^5XNr4Jfv$a6X=yi~p7zLx|NUHWXvUdeZzjl*blPV)lm z+Q`4BSAZSsxgohcQT%4>@Fd;OYCO0FBr0pndQ$5~ znvI&Tbi)g8GCno)ncBdw9La0iFqt`JW}neJ)Xh?*sA8J>W^&RNYe41xWe;HTI;5!u zylnD{pQxBDS_H@m;wsRlx@K|R)LBTVz5-gSG&pZf8nL}hu(rt04=j6F_MdIK42J>$ zSja%))C|L}?lORd3IEH9Ul}&L5$l)98VDk;|5R&_w5!y$@MKvZ(V{hfIpG?pSC)A` zzDIY%u56*oufTWOgWMPL&VP_LT{*sBt6aP@`(7jXEtZk*I#PLlE$r~jHuhx_=G{B- zGvhZ~t!?jvP}8*Cw1|HvFm}9m-bw8(FikPSYOcDlS z-gY_B@tfo5L2NsKUhd&&Jx;Kyp#Ft7WhBBwVuVT>vGJG8;tYp;=?cQ)YMw9FRB1^h|V}k%O=>s!Z z#HP_CD3g?2z{38%dA$2HgGdHJ0Bya+W2(Ao{7&WsubnBR%w*QX17A=#_Li55IU zNkk7Z{dqj0FC(2TehxkVSHu4uDO$>g_KMSO-9|KZ1I^27ym%xKNQCMl5-r+8<)=oCyaRJ)z65^(BlU`pTPs{bOL>@p|ucY z?XDI?)q((Z$sXV|@@B)4bc1&C?o?FT&=y{}DmBf-AT@EyRT?o!jFcGlPA9H*dCxVm zWIT3YWTgj$Fa*)QSH?}YByG3W>Q9DLU%H83II}f)A!>}YK-tipEIzx zX<(T+;Jm4`@iQ7(jL!;;8a*Y*1LlsV$d1rg>sIJ}w!gEF`kRVNJb7!BuCcw%&e>)( z1s}S}e%bi{2_+c05eV{w@71A!r$7 zCfC<*%O631QF+xcenb@GrFhfN9(a{oTZhJngp{R^Egd5or1%0ang7QE4w7P`YH&rw zd7Qlm1nJ0SrbLq-we75G(-9|d*3wY39U&XEEOmIb0WP3kMoXbALX$s~0tEk|ct=*r zBPlZ|qOLJ)>GtoN;_Nq#;~xDYEYM*jWm9!StfUI>3w-m2n`k2i4H57E>7u`wqXQ(7 zg#p|Ofz|V!h{&B?3BcW;uTEhX3HDgh5j8M`w!QI)7>Qc5RU#{m59sVYj`W(}_yP(7 z4$NSAgo7ODlSH}kP7SK>Bexk|{#ynV=Vf?NYy=r8jxWr>7>kWdN|1dwM`GIFWi=c! zMBb_WRRGwvF{d7Y>w^C=dsFIv*uOY4y@S(*jo6Y(cKabyRQZ3SNmC-Ds!OKotmWzy zhuoqz;3>;^1CYI=B_6#{J#;HvQQVX#WP-AGK_kh#&g2RZzs5oGJ~gSb%&v3Ogn-kP z++jTXsa@0+31rouXWRg!He9*alTS8NPOfQCcMH7vlsr(iZ^d(M%nPY_3!NYcu`I_? zyYUcmV+N+8x(4E|>`$-p(75t5TID^POiZ`GGIErY{NCu(F4~J*4gx$THJc z8}5-BSN|XT2__NS>Ift4}b3!;wW3sD_4Nf zN%&9vo0wZQhc87%@{I2!Kb^_2fh}FhJTW`xiM{2r@F0AjCtqlM={_46N=oD&hbQF$ zrAM=(+MRr3=n0o|m;~DqN#hgNa!t{_`U6Lf=Iubrz>5M5M3-5L=bp*I^^oLtggDet~KBoAu(=Hq#ntX<`j7igqQ{Ws)AK)nZ0&;&c{ zuq_dUTjf7`;uTxfP8vcmU4LR5eApaST54R| zqjApE2QxA%Qi=`?0JK{2zS8y4`Oxe<2;u+nRe-OAaZ}?PalT3wGtDZV6#vE+`$zt6 z-7dV)Y|=*4qywt&ojY9yg#NCKP5`k&6#d&^kM3x;1{B2*ueAUcqMYD7!9>7q5wDA_JRDVcT1iD4E&ib=5~c`T>g z>#*uDx+o_a3VthNnfIa=<3J-J#5k!@UO`=j&K0>cg$&*3Lh1*Pt+tXWF12bl%YlCG zc*exDtd+fiOd5l3!+-&}Z0D-tqhhJ@=<`BZOOts7Q;13y@^iJN6}b^d7ep`zM5v1m zSAM7pCZ_1+jvi%=$L6>AT!_Xb_(N!?f|cNHu(y(ogJAWt0(@Uyz02wk7V>L*pCVoD zh<Y9|u8molZu^ufy|7WByw!aFFUAAb1{$#hH7;gYq1m(I&E z-?#lXF-VIz@>tJsEMXIqSO=NDgdqe(4r>nFrAhx4N&{|lR)i>aOlDCy&hJM$2mT|R zNuuD#uF%QUxSL>+pYt}yaHxH)-YN58C|la3lFNaX37z@tqa~WmPD(_*SQi;=MX;Lt z_hSXGnr9_5(QI?Mhq?F2G#c*do-JB$2~2&X4Va=+MB5}BevjZ(ISFqQ`m}hjOlDtZ zXsBzAR0!ZzJu_1xdpu@EsgiJQu*^YBMH zYTfb-(a!B_$+h3Y1YkwUAVG8??b2aTE(|&T>EfbY4{1&c@fH=hSig41BwrUCXn^y} z4o@+H;FF4k58+NV5XtftaN7sIga!BnmhC|i*KpBYGu1vKr zZ&Sa2-rtlKJ^-rRT|%()2eG3(TYpeLTAbn@<>PaDVVqsSZVxY5nN*oFTcxwax15a) zI(W63ifcX33)6b5hiFMOjIz5ZKYS-04&bQC3qxapYZ?d@hZ=4B(fCD7-FJ&6 z*o-+M%+{hH<4#n=Ao;L&c|8i?A!etZG&4@ny_~tH_msT&JaRIjs5Rf$kyPfo&cDxu z$f0M+N0*0j9HlPBLN*yafG_J=`qVUTDCzz8)%j-P80eE4nx1qXp%;ks$4&;Hk{?Xw(OZ!z$h z!!^M=G{t*zRizM?Y+p`k#DU8uaoJ^-bFoI5gTy#sc+s;5Or1xDQYara648XpL=R;& zEC$1Z?g!M->tJ2eV~{Cwr-Ef4`seeM5n<}clg|<5Hr)WjuyP9q6-~{R8Vzge6{0=_ zB%dG5LCL>PD|aPLO5PF;=2)UFwSpKu0+%%4v*sy*o67yJ%EW?#l1>(X{6#HHHvg}Z zdSPTc#Vtm*8v5!mip!z%1c1kqSiZsih5x~9TmKtQwFKHU@?Rgbd)+tQroO`dbSHwD zshegZ|H&&Z81l*|7E!3A^C1(W@?XJ_(4e^3|AzZLT2j1H4RYPq3Ya`C+?&Z(BLtFs z9}H(G`|^o`7>FLgZ|m&&#@t^;KW%`#ZL)6bG0fmn6JudJE{%1kT@Tr{QLF z;{noq+5p=sVb?ydd;KU{!tKBz76g!54rM}`9KCrMIrL(TvGun%$M!3NY+}?W;1J+f} zH*KE-M?^*(h(FIsodhSHRp|0nD?759jstkTyCVh^Cmq9qJtPFds_Gc%Y;a#l(N}~& zBzI}+3S?Lsd^pYW5VK{f4l}U;_ko=8RgTgZPZJPB_((kYU`Q@`c|O}tu#eO*Yb`*^ znouGC+|;0+%_!Y8_W%qFrM$u9dwB@0uI|_oM}c_8eBc0n25&L1i6c)iA&rkiOqQxy z!Y4pBEX3hi?$Eu(juYxD!}Rl|R`xjGWYsgh>Vi(W@XUM<@$fsPJaOItM?kp0VCyi8 zv@Pid6k~j3f00uQAh~0`6$7yha*%^ylYFTTPWEtU)us+J9A6`KW5~S1x%CG`9Lik; zIr|&_PA#npb}cMg!9^Zr)z^B-nHJ7@|jILI&n(wl<3YqJHg^5N69%^VDp?h71R9v{)yiM2&x%r2JV&0 zF?ErB#@a@OJF3?DGJI`r@inY2Y&LJhbFEn@5pV-DJ6BkxBhQbJ$pBEZ4J13cRiZbS z{#SIBlfPIUXDks`0;`AHYy!)vaN=Ln-BU}br;=SH>8jUmEaY#FjrR8F-Z~DEgu%wb z)>Zf7oq1sYUNRYW+4oq5`@t4q-k0m!_6E}AD@z_r`7MYgm@URI#&ve2i92Q`kOK+^ zuLqLmF9h#=-s8}8E`~!M618SUVji-CSL3oCIGR9?R(L!+qTfV zmpzTNv%HQQB~565@g!DRE&TxZr4{DoC1k~d zy)v^2G=+%qE|6}a3&1J3RtaTQ(BapCeh^Kh=6i^9SPa}z|-2~RmHzx&K zRmqM;7}lzuhasmoA^)Oo#; zy7!QT_qOK|PT$tF+N#F4>h;PK+c$jGC<%9xkX8`F;_aZ-G9v0NGoXfH9O<#2K3m?+ zh?!y39lVzK6P&(%Zt{!_;+RLO79Q7Cd4ptqen#x27Tx}UsSfT3?C9)3kmH?Mlt|)5 z8*ynL;<^#e;aa%yfB<+%mJh5?yH7;MUI|akqSC?-us?7CMv>H~%VG~3IM%W%SCuTo z=5ElABuoYPJHJiwXnNGEcU?1xmmuX~i>SC9g#t1}^ED{j!a%Wr``PT5M(T5z09HO* zEi=3A9TQef=%2;EoaTBXgQyU`FJi2&C0R{=B!$^-lHmtLBdBUTdb$~PvY31M0pEEs z(?Py!;&MU-qEmlNbBVFSwhH|G7eHX23pT?3%csG*-K{}sM{V2!uRpd+A$sp#1!2zn zf;Sp01VR7_+Od?_yyyWl;7m=QuJhMm1rJuyQpk)G_4sBFp$39%INwoEGyW|{`z$dd z1N3X9$2Ng71FO#*W7~vskNRMK4zL=6YB+e zvfn#u;Q89e&sv})hf(4|k{Q1%DWyd`4cjtgM?W?;!&vxFg{>EYW(1!B?6wKb!%)sN z?~$*tG8~{ow0f66{+jSKIGUOk$aSkuv*_X!nc*{5sEb#_Wp`Uq#M{`JK7fn%yS>vy zLz-%JJBN(MBj`Eg(mWDRmj=P6{O`2!`QE$-m1lY*!iP}{T9hMs0oC0p9k^5B7S3Q6VXDu4`%TfaFRGg{5}Gw3Ipg-EcaYVVh#@RgEC#L@&=irQ@UTOInIAarnACbr({}ah#2z(^@5%>o_ z%|pn-N_QD&iR1J(hUCsg!^h-&}Gvn+aW$ z5=lY|`fTuz)DzzR<eRoLw9NGs`H}tX#0rv=p9_S)10wz~d_o7Ar;jZHIXCi{h|; zD&+mKfe~O-6M}Y8XlC`wE{9R!KwN`?&h+5<%Wh*t%rAsW2r7!>C^C9@=Fxz#xAv0_lF)vS^Y<&saiTwL>V#bXw` zIgtIL|5;jcXvjnsn!{5O^6Of-ST8n=;^Vinh)BP-W%0hOr$o4cLxKe{&hGyqJ&9Ax)`kO?{D@BaHS_^CqNpNqD3t zq5A#Ci}v>mTIl3NbE`$0e2GuN@&68jh7!nynpx4_x2zn3?W%a6N zk#qH8=DuMbb@BV*vK8_OJDw)iOCJ`omstC)@r-Q}`Jtyx?(MK!Qpg|sBWT-3vzOzJ)b%Kmivhc(QRo3UGd{5?;ir3plw+6^Y8zUc4dea%`|4sXA z?D3*x>b$TeVNhXPn&@%%jodtvpvZ3(LrcC@{mvrnIxU%Rf`J=H<@Y6{HVXxwm;du}M|$wkN^eSn z{dRT}H&YxGf@vV3(H$Cbbwi+{LA9gJr*D_@%mDTix?qe#(-B6dliDsreOpQt$2QFJ zUKg~v&B{B5_~da@F-k~WR?BVMUprH|?vFly5R?4ZMrcCS2qd>rrU?l0h}AsUx4u%o50x+R9DD@FcwUJMBr|KoW~44zwbUmL>)PlU_r zhVFryE-&Z=)O2lnt@+Dfk_(>~2-2;M|8UWFY9b0O(iNBX%6z{Yo>LwZm_CJpPz8q^ zS|m~S`4XHbPmF|ICvw?aK|3>XGi4^+q~4Q->x? z`(?;n$n?4yZ^qZ!#N(xCd*;BiKUt?EIyH~hAB(lu@)Jz6j>G6T?OO^YPv@xhinYyq z_NEKbR?yekf3^Bg-#vA3mT`vtK-A)kmIkhK6M>QS!@oU^ZW~S8Uc5S-0WoDC0q=u z!!~)p;pu<9`Jj`pc*B#8UO6nBte`90MCIz=;Fx1;b6hkswG~e`KER6b`{S%j>(22) z-cAfJ;RxrC(aMs!2061a66nNpxi)tMVnY0~-fjfmUjHC{h(uy}Ibt1y+VG)O3iAfTdCTT;DP(>8EO~rD%>bWfQZHUIVIjur9P(i-` zQJkg8JPV)JMPWh!O=;ODUX`wX9(%MaaqGOd)k2%OQoa?g=) zW%PPRn!42E1S`@kKBX!BNjP#8j$S16K6$e=mRhiC3RuUz%#RT@UX>8gS>x-d3Jh^V zx#?A@dRfkuljm?Zk!=Zbw7@NUpKJE>0pNLcbp&AZ0VOjz-&uSfZ(x) zRyS??90{c7_LQD0d6p2=xKkGOSW=i1D7mo(zYUoW8y zH?C!(;U~hm00)J54-x>M%4C5@z~?z7S5IfNkQD7}N@k$WUf$n!XrR`2-20Ic(rq{>L^g69Dl$WeIXVZr%)hX{ejZ>Pfhb*)pxs!PkQr zO;l~DA0z+~oBu?5E)Ko+DyCzc6Ahp*H$pohfCDRtsb@#v5 zYAAs`N?lB3Jp@%ADnt@2Aab}tw2(OvnsvIRNmLLBd`O$+D7xuK$!Wd2SA6cLbh* zaCu>3nav^=^O}Q;qQE#$-m?QKZtO2$4|T7s@zJr5{<@lO`8hjOA#3pKQOBCd#zhk=Du z2c#NMzJ*?C#V#gtJRkrcJpL$*cYw|epv@@JKR=)U4>3NI`373j;#BMpEa@rhNW+35 zCkb~ip3m7@0r*pa;SzBcnHqPeD$;$c2bP%=SgkNX_HAM+TD9#T)$G3?qon`!QH0-HPzw;=XQ<7wu73* zb`31@PV#tN$P{hKsg=@_TBZEGq7^Icmkz!?Sm9@4G4h8lzt&`qP#|AzUybHG`Er1E z0@Be}RH9x-Ir(q)7Fvn7O{=Ic)C~-=*PtP_d>Ise|Hp@KdJ#SP5S3l)OBH&ykO)w; z4d!{7>LZ+v^A9ERs(o1-`LF$WQ;89i0LH-7OEH2ocSkq`p5zNH&HAIIZo_IYMh1t| z;j%@ftSQbt+|&u7F}ny8nJOGE=sJ{&b^7pWaFHIqDDU&lo<1kA<>}Ot-_05jDq7z% zPBTJ$$(a`Ljx9;^q$;#oF_@oybu+CSLo3%dL3=QlaK{oQ_!HJ(i8Fv`qxF1`09tjw!12DS&2K(s))8J6XB z(Po5V0wzg7QN{+O8kilS7Y$w2M26Ze5R!}Z2ZKkqv~uIIu5bN&u(W(p?J^F|%n=(+ zLN&&AyD`|~Og7Pef?%&4(SgZ)00VN>y1FoY2X^nSF^$$MO|+*p+h$Pz5Oz^qM4ARt zP7R{Y^I2qa%Wnil4??L*otimyZ*9!9Vy=0&lgfAU=fD=T+ zgGST|UOxO}(a_9s|Lci>o{SzjI2MN1=Bg3s+|wv;S)cam+Hxhq0t+;@?0mN>fG2m& zt8Dd%t$m1KDf8uq!=*8082z@^o>%2QR2Z;8q8))oo6RY1*o6{^5@#Tp?dC$OWd035 zf9F?4c0G231oU^s=N%hR%jvv;=ccVY3ZWG??R?w zgGWU|hc)^0M@H_r+mQSGzOOF1g6)K@x?wtS#sr7_-~MVY6Rclmb-Q$bx1adlz2kNU zRnDL*{t{?X$7$Y}a>cNgBO^E(8nq@ZZID9lmEI#X7A;dKUXYHtI{c(eV_2_Zci1SJ z8`2X~>R}=z%^BcuI`XuZ`p~ltLz)hOO()I|(w@cjBpq4tOs^m0eWnS0K2eLfLfZX+ zg7YKbv_2BAPGW?tU`p(DDgjQ3R(i`{A&Fl#G_BncNeZMH=wpcRky-E-Uu&n8RtxE> z(}lr|XBl$5f++?rYjqnHFd)hQGxEQs!~*=*kNE;U=PrIt=p|EJy1TPp;=aslV9^{L z4B0J2wX9wtd;x2aNB9RBk)b`Jn7jW6rCDj&+(C7-&%KPY%BWG29&;`Y(7@d5Acaho zpy?~w^D_fA0Y*Q!^1Xs@=aD~xDL#fR_>sS@gLA+GBUsMy7|;#9fPM$HWI0c;w~wcU zO&8BG{y;z_vQ;peRxdSLxtSv<;eIW=$Gp=cfROfu65EjcYoU}G?wP2g1KTg8Q%lfH zFG7-s zPzQJh&+d6~ys~BlFHp#8QLEUli#Cyg>Pzdnaj6Fkz#l)&U-Ga?05`d#=3Eom5VPzp zK$ll|px4AyRd|Z3oQZxgqYm2Im{zy*WG>MTkKR3=9k5Ih-taVl<@?Ws`Pqi9`c-1f0hTvnd24^y@ zL&9fjBkgwG@#-DEdWFgrXH%s_U zaL$j(SZ<3X#Lh1JU)RWe=9z()R$RwH>~GNPEnhEvUTaaGw{hHu#-f=DH+5&r5(o2n zmzV_?)AK|C2;$}7T4c||fT@qaRmVQN&1|e%lnZ7a6KipU%)kEOk^Fbit<>b&1L|c3Jnr<>kC%PP1q?oQW7uE^g~K*fy&QSQ zRQ3Cvg3sq9Qvj9Jb_w_}`FweP$}`w@1t~MX^JriB$cl=Y9`7m6_8#6QD~dG{G4(#9 zBI8K{c=YGWzMzGrtnUj%z(8P{I{^7jq}8 z5Q<(vH*$0+iZoq78I(s=?Z2vC`GlvstiY9oiR*>Dd(pVuakMSc)q1xhc8pxLy)3#g zjQQ?_z{-?j(#H{S^HIHcGBB>W&HpBYG?N=*O8S^9rqt7IL|$M+TwyeE8vjcTgHsER z6%9AU8935%i@uqshe4qVPJIsn2LR;Fay<~#(CB1_W~{~}B!PL>hpB_k=aDh7lv$oR8$RS%(6Th!G2oJ<=ytJp!g zG%IVe+m2X+^~E4DnNZSDrvaKZAtA}k4LhME8KdJMhx#pq_%!brXP~>}qbJmSE_6WQ z8p-q$j|^^6M{x^DZ()p{bh`1t$=|Z74h1oS$0>-z>7zoGC2aU*94ql>QAxg-9yO#= zN*alztFxg$z4Ccdqf|vp_S6=k+Qs~o;xM*&i?_H1E#Nx3vnaW^VP65KLCEO7Olr*K z20Yiy#F1picrJCZN1%;)l`%015c;4yYKK|-{N(Wo5RQ+|iJs)J21-NTc(4mSAwH0# zK@$IXEsqBF^rchm| zj~?Uu0U;h^rE$j(qq^yOl{|qVk$n&5HGp4$avOYu%1~gGFM3it@>VNlNr@YThv6bR z)xRR9r3Zh}R9+_04;By|-u|9N_v43zz} zy-PZpmw1VZX%6aFx3SzF7?b5IkhNoih;*zsgirNnM2efv*Ns|ds-}0H`94%c5;;Hq zD62ExBmihg61zsY7P@(PfWrF0{irV>Fs}L4lm(!6AkY!C+#i*MH!KUv--3SQP|6oJF-H7*Tko=*x0s^m-_i2d>qUmgXmgMT|0QG1AhIPY%Zk0@`+t>_38c3P| zGxow!gc&+!$eDgU(}RG|sm z@y_?}$HBbjHQK03!{d*rNG*N8ezUjlC`@wW){iw$pPG60jB?@B zDW)<&247XzAR9Yje(c5wczM>)jAKHxj$kXUMLgs;2xEq4&sylgZ zCNeUMdm?qoK-Z8uQcCrj`J^!O3Y`z*0(tO z#@RbysDL=689J6s+xejR)WF%x8p!@nkDo3TLaWK8L}IBYw-EKP`@^M0`0`q0%4#;T zC&b6KomrCZ93?FgUNJ1ssEdN#9M_Tmt>rnQ$@~;)YT0cZ!4B~K!O%&{RL}*LoH#3y z)P^%iN<*7sb=!hr{z+(x%NgaF;BqV;@2g8&y#D~P(z>HK+{*t~=noomuFij+)(o3y zY;sVV#{rwY&-2YH*)stjK-{Rk@=!~8YDX~w(PmBggDIQQ7zWzsXBKPvdBNM>!g|~- zc^%rfAo;s$Ije%NkqkBTkk707iBH3j431>T(h8ah6ApF;c>BLca^-#tBz+H>EZ+8Z zIR$DU%cJ&HHh|=#N9Ex?8-8Wr$ID{6UhNB?sKyhwh(xBXU)YdhV-e3HL*Wr68lhg> zgXePmNbTZKvXg0)%PD5$#oI}J`+b$Zq?MqS5Z6`ii%(b8=g#zjJVMEy5o4x~06SZB zTLO8a+`yZIwmT9W7&N zZq!LJfd9v`pA*S$9ec|D5leKh6*~ifs(SpUJEB70>&NS)Z~dy|QVAmMe`QL&2n&zB zkGK9IWs)nEb70g>V1C)GW5&A*`5~+ed36@_k~GEah~mbK@LcO+k3k!L(S-hlZ8e+I zBIt_zP+7J~yEqU1bYPnZ36v8T3Q+AB7)!s9C_6!2Wz$xyP%F>K)}2FM(uq`_lFj`h z`AO#6AdRT^&pLmk#~eJI$;xL_Xp7@WxF-V>V;Szm-1g76Y3qG*gc~RNCsyPr-?nv? zFF;4o)>bx+I7hij3kn5DlIzLtjTy#N0Znrvj`$A??Ya6^(p~ z#m;UR&BAt~nYIb;7KB-(bbWVXX`upbl}=eO+C!Y{Nn)h9At=s%74D>?CN#srLthT@ ztO69v2@lOfs6t+0=1V8%G9GoEreMpdG1CA1{zF($EtRSqI>IXr@Xz7`geVRGVsJqX zJ-xandC@xu82K?aCp{WA^>+_mJW+bg18Cu;bnj|_-HHEL4UsfgZ}fV3{MeBDDEL1> zx5q2PR@oTG_n+y00kyoxow7T+LD8+kcQZX^?$0_sIp||2FE6Y}lZ)SGU4BG$$BI_}xAQ5YIOF;e;+mIQ4dD0)y`iwz7X zQ5$-__3m^%dkh={_`j;@*<(pS5TMK+6#Z^R8zOp4fcalsKq`Nv$Cw-ah}Nj(555g3 z5V|OyIx!lQIPKvTRN>vl&K^Y)O=@HJ$R!wqPi#M0v|JmLM0ls=m9Fn0FBnP45q>|Z z$_G)BJ%<}Y475&48h%;f$sWQ{fNrImJhvC!f;HJ~pp&U=dq%&e*-Ga2Rj*oI&F=HGveR{Q+r#taMP(F7&S)qiOYWau= zn`a1xB^(^Vu-&nlPM-*KBNtH2iaeMC*|Jig3HRTOMYN~J2&@MfKWx>Eqbyx^HWy?J zOahP^;`LMYeY$6%__hlIfGwDSN!KaZ!n?R&f0<$baQ63x(1GRrnxKdwCd`a-%l5cY z6gYu2xF&gM)U&$F!=?r7xe;xn+gIr~)B>D~QZl@tsMjYO-jUjhir?I>gi<#;JV!Fx z^1xETbzK|O;w#E#xIzO`u#oayPZS(=*WpA>aW&+%Ndyxz1Z7yj`%OsaRhrKh{ZFCq zfv*Gqn}cC&po}?fM`MNBUagGlQ1ryOX91wj9ODI+l#GJsN_3PPu0W5sO_PRtM{esb z)giejZ10A!V^3#Zsl2}R74gL?B}0*z{8WLj)xlcs%nUGvsHQNhbuBRyp+>%@! z6DsFf!bvCkP?bvV?m8GTKOcF6RfN*zjM|Ox#eeXT{|;S|-FREpPD^BRW|Pxi5|Jfa z+eEZ;rV~RfPwhQh-4L^6DyTZYQz2SFQ-sL35bPR|?9|+8DC}kFsc`+apiGODPK9G> z!{3hZpWBu5@)2x6U!7ILe1;0I_-p^h*^uW%>)%14MsVvEe1eyZ8pmdi-SZinu7q@L z;*N?%_Q3Ho`*Vr6OPTXaG8-wTgYwcDKDBRAdDC*8hGa1MS#R8=RT=OIDJVz|WXn>B zZLqglB)EJ3rr(%lw~%QlgyvA69fU2q3kJNd`3tcI@}ml;~R0c)F$( zVcTtA>MU5i1mKmky%%d90A0P=q4!Qq!QI^6G?w~g=bgkFI77}|mz|t6xgmXbo?$75 z163+#F2YHkb{XOc;@tt=TjSFTJzvfUaou;mLNie+f#Zr3hHSdQb13|{A~g;;%Hm{j zy~WTDilsUP;d>o7&5@;L_Dl&utnG5YZ3UVLc7a}enpF=SbaYXDTr>ojgU8_Ya4FrCR zx=}T+t?Gx_(ddwBB zIQ5ej;kUp+jpXtIMZ#^`O6Q#lflNVw;2s+k0V@OyTjuP%KnMYP4CcHYPIfD*Tvs<1 zIjJFd4g?!W%Bv?Ar3rpGz57wVnv@-?S>32cM^#op8dc*rTFsB3YvmvAQeoP3X44(XncVvu1TS!#`H=ETY(iCUyHVCIFxyDN zPEYuN&8Hs={o>XJHAj&{U_EcJHD!XR+Y2j)-dPRWD_36^rpXJjxH#5cHLj$c4X-ao z7S!1nFOpug_j<4_d|<1#43535dd&h2kiL22c^1eyQPT-6oVjJRmR86)2AwGm;ERj$39w}|WsIAy_bW5FfjMR8O@uBh<%fg4@^XI> z^Rx-?-4?5!7(MD=SP{+E=8pr2(>m!@bbJt{e@7zg4MYc?0W;cQ=b#s*s8sYs%&Uz;bR zM8)~D6%q%^Jj<12xf-$~y8dGH`=c22<$UV0&ofNSggP@1rQ!aDkEBq9&W(GLoYP_X zcNO!;j=ggnG~O7%Moq`X(cI0I6Nyz*y%s$bE*7EKq*%Te_CKKv#s4` zXt;PT;-c7=%F-G2Ftr|vGi^G^T-a7jak{}*qf*IwgU$tF2#hK{aLxmQDUN92sWfMO z_(WVj1BOlCz~zd@oz3qxj2Snx$U0-1%-4POXic3c_rwLVdby$ZcriReh-xuZ~kz# z5ecJGGVCZQ+d-j#o$I_enK9S2Qr{dl>-^gUQ3}>pMVfa_?uqyH`B0q57g?$cHngmY z#15m%RT2*fbJOHI2EjJgdV;D)fhU^R`|6B?-C=)-n;lb5%0fz)PQd`?eSV7vBz}Zx zXhV~HoHWXO`p$AN^G1|59rDj z0owaU2mit2uM)MRp{Ai;JH|5T=eh#&We>M(t^Hb=G_ImDN}E13sd7FtBpBtaxQ1|t+{9}9nK!NWxWc}h4KA&sjqNycZ)xg#OweZw! z?VDmNi~`+`Uc7(E4rugDE}k=4!S;RNDA3}17TU@GklzovIkjfK-WE6*p^lG=^U%FV z>C(5-!7-)-i1)PGz&S4X<$YGmV_+$FGLH<*#l=`*i}WxJVzFFs<(U5cKvn^^wU0=s zx@cW6*|EP5MCuK_V0ZFTtug6algI#SrJC9CZMSUKmz{8U7-;Ot{G9K|3G?(-}@oXT$APh3VS#rO#;3O#t4{P;%%UvM>k9#J-k+a`B^ zcrDc(^vjn&!q6#!m!I3Xj28JhS635hnYT@yQ^t5^}OeEyjpK-8&aj~@V(`UPG>vW2ghW7BBy$w1Iw6;KQ zI4gVT;`N#gQU5YG@Vr467T8_jYH#2VFW{pT_=1iAWsY*yI3(t%jKi>4!moS>wLk*z zKUkmaxl1n`1lVxEn&1w6!1IU&St(lPW;cK9jF~CXR^zX}6z(vMU4u+|WHDTV24^5D zJmVF7$&pNg@+sjuzX9e$SNMp5zczoSt&y~wr_$%Cj0+95)Kkoqz8_i6le*!lUNmnvAj zQ1smR#NJ{I^q6;LTvzuzML3W7Ov`Gam!tTDYYFCo#Tj>g0^`n=6>z z!McBbNx-DAiQ?=5JPZgaP^N6iF1oCwoB4tb^yrpTJWFUExC}M|FxF5`r=0>1L|E{y2M=P&rdRk%G%LKl)kf1O0aOZEf!@<+=De=fNYnCBXR(5zn;B+H z!noj2xJWoCd2c>A65mrT$h>?!Ltrh66W>zzFYHu1jlZy z0Ycve57K5(63@q`FxwWVmkhdju+~i_E=Oj!imx^%+39vP6eI7_ee>Zl%J}0T{r{e` zgJ&?7Q3jq99q?TDZ{+P0Ih3>EM%(gBVjK;*RX^hmw6}s=TDMnXrP?W=_ws6X=1^kM zXg{p;t~M7Csm|&lj+?zCA#+a@lt85Iyyp>a05V~GX|sdrOd1zuTHl|(QARu%p*_Rj z07&Q~><6LaEJ$DH2*F}KCVq8oB&tEuA;GXtZ?XQfqLpslu~)+C2( zKCYCPlkXpftfCO}PMb`MB7(j`8{)=Kod2W!ayt1iK-Jmd7cutr748LHpcD4x8<)n|1{jf{cpIsY zr%%^Vr%hCUTQL;mOij?snC`*)-u>Z+$xTH9CMG!F+SU0HjI4;C%P~Tg5UW|fD*Yn( zv-T%{FcLPGv(`7w?*@v0=P2CkYmUM(hfxfZvF=gWXv6nR0fm1BII@-zakC5Le*s3E zd#L9RI2~s9ZQ*NWC)3PosH?&2Mw8vDnc8UdDmRyIZwwrF{R)bHv286<9+_VPRhpKO zn-D03k5yl;s;-9X`A~;&3XXRe9y!dH7|?{fkSl=Fi*vm?d{q^=YMd9(_%S{XVm0`pL=8964WCsU)z*>M_{D{ zgV)^~Abx{sce$O+*)#tVukR@}lzz=f317p5?ja{S z`Vz08z<<`QKJ#mg;^ZGgBJUgT6O-oI+%{?AA2`n3*4?)v;JIsL?CkV{Xdo`4*_ZR_ zMj-!0%n&=6C2|Kg!SRtugQ)Ga!e-y$U|+3JO}Y^zTq~cuQ#&HYbzP^Wi~2hDA}xi9 zm|{vVjoF9bbmMQQ_u9jcdLPfbT>@IhFATqr0ini(qvkp$(K@k*Z;ru49>oj|NDOnJ zQJb45E$%`?xG#@d29jICTqm9fQ5;VU+op)!Gqz&7hn{SEO>XGBgSc;_Sx!9Uknqc> zS|6)-lRSzE=aEK=-qyDRiYV+$QT4tD9y6dCW|a3W3g9I`-c3BeXN+^wm6(8ag?O1f=Ae?FbRmvf}w$BZ}9 z5N*Srq($qDWeeK40!|O)eTLDU)Vr~%yqHeRgEI5wADRjp05ciNM$BS^dBXhYuQ>rH z)Au493;=tY9F|nNauHI#(G7x!{JV_YUL_!P*(;(j>~4nrB~>~62Z!KBL$$|OzeprJ zHD@iD7Q*+%6}$BM&>(FR$-B+?ME1(7MYRe>H#~mu;Oygx{fvET5gN7HpfmT zUzm4T+8_&)(P{t5#r(x$S~c3%gc8>|{=BlYe2J(#hX-?N(aN+)t;x_Ip)Y(Ei&n7( zt6%3L@->X~_LwM{($TfMsvZefV%KmapB@=d{b5w}Wc||n5YzQnd5C_J9BsdW!}rHTopy-N7A}_)0!PPzZ#x@+jE`R7x50QBjSH~>Cy(4Ok7&$0Dx;bo9*mgtB6w`i+q45frEcc3A(v^^q zWY@@ckxq`kX2x;gkTLKkO?Q$5z&z_XfBL;~j2!aXTAD>lP1$_g} zyvu@@=El#kUj0`?`S+O$MspHBweTTV9f;)RK%etoXt=f}kyL2BM)e(_jxEwC08Xf%J5PZ+k73=IbNkcVI(>39)oo#c> zFrTm}xvBtnO6qF6nq*uU;>dQ?{FEMHDb%#A+(0+K_vXMoBJmyD1j#(mu|n!DdY_zo z7aPe%lVrkbMThN~tfw9Jq=yb3j%Ytt3+y*^d~ZC-Ys?stkP( z&lYfEN>|Bp0-d0^t*p?B(jO%g#Ph>wY2Oc+&~~5@6IlBHjms61=CeG?$}rPrbdoV8 zb~bmT-X_r=tWWX&Db8F3pc=wdiLov~c;eN^0mp|5pL`L>3Ho7H8?0XqsQp&!LnN3uE23m)YUOqI(VVfIA*loVq|vwy~|U`SK8GTIXVM*q92<} z5=aELD!im)^}F}zMB84uF2GJkXq0dm_Q+Ojn@uGY;U%-P(v5bA&zx>UDzSd1M)cV% z!)grx-SAd@%)*VOEAx~U5uPw91tE=?>GmfF<_LRIT8%5`Sz#V!x1yZSXkBcjv_9Sl z@Td|TTZc`zwA2Z74JtkqEBckd_0JAB>yI$0CY|Qj^q4LAp`unL6X2Lj;bt`cIm(Ny zvjqC`^az@M=Pli^eZNe<{$XP9=+fH#6|sqrcs>1W92LpUPVf6J1KkjRxF_L?h}!U4 zn@13&zHqCSeOE){h_RzE);L5nbeU$UHd?7`fAzTxeZVB2xU-`<;eJ<5QV)?Zk?UkdDYDVSMR`P~0z^U*qa8g!n7Vn!F$==ldN2 z$mF}d+2#$J9)XhBa*suM7>37ydTEojMm_1C%R9?-BT)>y_TN#Ti$+TCFRfJHtPp+RhguQU8mYf=Iq*?X&$;|ng9AohR5~2DeRtE{(Hc{9;ib(DAYDMNL zewV}WB=YJxOV>J%+CK1^@CAfiC7SWtw+9i z0j6IAFpaaEx7rt2iqb!8xzB`ZgH#g0Eu;Ar1{9;{JTw{;Fyr&#mF6`_Y+&{=M%%-> zL01@3DHomnGHFs1vmyEKFqQ4wSnxB6PfavR^NM2UE1=mAG{WF!8vD*dV??R6Y^m5MUQuX5L9fiNrlqb( z89X8x&EZ~GB%R||c7SfKk);c}h|)^<(iKklo-i4$qmNYLEsn3j9M*I{TGzvpOEzt$ zx_f32QLrL&J`hMS2qM8h+NzNIOCA^AXpe0l<>x38?rKN>DlJxNB-t{myg7F_D1R3) zc%p^(+(6<9?l@Y;6_>?rHYpncusBc0g6$hv1q^*8yrMHM)Rs7y#IiR+3~fgim=CB7 zPKn7b9ZWQ!0_XcOPnHTE6(`pR$sIe?^A&bK{?!?a-335%R55S3EEs-~L^#rwjtNo6 zONY{1EjlCv88WNqSq)9pRGCMf(ef8ut#IraWdq_WJ6pv)Ob`+&w@h*S6;S+#@}rTP z@)#q>ivKEi_k_dP2HG5z)O?#MM-CUb@fNJvf}Pk#jXbKCY+Lx-;nQPHTzIcwqN`T< zAy&yq$1)+K{e*-cV%yKesTcU!_SDLd3cU@rra7jd!-x!pew3y?#ZP;I=n{mHaxQo4xMZ*WNW? zw{R%Vxs36y>e@Yk4AdMGQo0QY!e7Zsf}278S)|EEjoJO0wYOeCsN`A&hO7C*1~Th1 zNB!Nyi|pQZfZPuzjuuz7go113-a*FSBqE>_peb;9jwkHVq;ADJN^bKw{!sTSD-k0M zNI$%uJIWT(Ml7Mr4g8}QCr_zMPrQ|vc*<&ABYSD8(eCp0>a`fX1Tu|)dcY_Jhj(U! zUgE3j43T+1?2Z}S4&tFG59MR%9P=bem^e`^J7N{19VBPfh$*Vf^E7_dy50SRqF);Q zR=RH59#M^yI?wzmRn}e_+WwWlrM5SXT<;#=44*j%R<4UmD;p4A-|}2ziLAEE10&2U zX;2cxf+=v~sQv9uY{)C))4> zzmEP|2weKZ$VrtflVd-`hQI)Fz5&s{ztQe|-_+I&7^~`L%8NP4PEsR(GbmEnubm05 zhE(lf7UZ1HkS9y-ri|~ERa!jHQg>N*bbq7(Hu*k^yKOG{84Ge@cq~0g>OXMkgbN6d z3xqjfqd39UNE3POX;O`3_slk3y!3j84%snU&*GBvslgTmrKtG4SfU{SRwp-#KSZE# zQo0mj&wVYW2=_-%9sGzPca+kKt}mu@NDnlsvn6mPluXDYZsO`k`2Wwd{tTa7K77!; zxEn6JDvhVDeCzuIKAd;+Z4pRnMgvj2(4$rpo1)0>AZ^=33UQ|?J&DnbZe9A&!-dG> z7@@j8>JLN3ZsZim6Xac2*rcU1ea#$YvP%W3n*;aC0??!EbTWx^74m%9Cm zI(rf%ZD`%cTa&xqQMqTfQa{3H@`CfToVmbA_2=;ck-nF#Gnj1_tE==-t^hyZhAefm z&Z6X`L~3 zQf%870ev@%oJ*>w8q|Ayfmhs32cb%MW7K6kl@Qml-2H*@gq~1U zce4|Jzl+Ti9MAFR&r-(ymJjVRddh7;%Ub7k`rJ#uWMNSb+n74m$Tq$-?!M$2XSKyH zW3_V*HL+jJ*Ra7r-j3P~hv(M4wB;fCX?R zZ}d)+h9V9G*n!F79Sjnd3W}{iwq3k%xHqW}k1nT{C^Zo=0AcgB67Oc*;%v#Ozlo;Y zRdZ-~+2U#ytBRVd=+j>K2#GuV_obX!AOt@HvF7Wm=FOPSnR4PA)?FXFA=T9Er8UhP zD#*a@9L7;jx0NMsOVh`i)$)YyDKD^QnW6Q`^Y znlT_E9&mxjOaDT`22Wv`e<_oNfvZyJnemb8*Z!Y{_NuRTcOko5AxurNJ6R+BO zzesKv`~_%AZXm7ZEnhlUst$0JkZ7R?Vv|Kc2ZKDtTDfyXj=R~J8Ufa~+X%baeu;zo zjzMPju6_D04ycYC^43zY+`uwsv0TH$2BrKGgMFLh`g|%JkkW>|e)bOqk9`!opfAF9 z=#WvJ8iZ~@56rRVQYByZSX`3ZZIm^~cz6+V`jk$Xha`R6X49 z3fg*JX++x*Q!~f<`h7Jb^d?>3GyP^>6qPdi?hJ|^by%~b*CN#XaokIqs|kB3p-ze2N5~|Lp=X z8I+J%{R5D>)2PDN#bC#ZxyX%ve{MW1T_|L-&kfK152bjRGO#N5v8x%bI9+GP(JO_NF;D=W>*zxMnb)^$Iqkex>Ge;@nkIYT*@bm4r)7RR! zi7&13Kp?dPT0~B`c46vJWP^LP+xtCfG{1idmu!;D!?8ppi7G~6Ja+M0zpXF2^V!N= z9BnT_&th9+4KOwf+5IMRpf2J7O?#k1O00goAFEnu34>EY@kw7xLmlQ2Q`IHhAkJr? zM58WNpy!a8pt~8Ea5}sTWse;V%pDQtQ&txqBR$;)&_ZAm$w12UZ6Nz{m+-~d?kw;_@ zL9;p(sgSCpp2aqWk4SOU$tC=kwc_kuZ}zcn*hI#hN&H5o6&3C+)h)=Z-2fl=30c(!Z+IGEgZsHq#F{a!^Mx zJy@pfi7gCukx_Y{EE)@9>NW>*Y2ZH5je4T-RFsejpi$K0M{+rPtH1R^16s5$hMdUE zLlW9Z>EgDFHwazGJnQA_QT z9x+_{dNdwn7%8GgPt%VFH&mNGf^yKY z`J}_PF{p2FiaVu_$V<@5Sd<8GI$GK5kV4+EMt22XYF0n$yb_Cw<@`0efay$)&Tt0v zP}SNI)%R?%>HiW02#6xuPn6(j<}B$51oh&miE!879&A!PozHX_Y3s&EZ%EN=ffrW? z&E1J?QtajS{o(k#2NX809;+-fti4qXTcOw9^vjFA^@0_w9sY#oY%5WGt$lw5q~!#$ z=_AyDD?Dq^E6L!vP7*HSumqSvO$o&3#qsQ(D!iSY*s9C$mr5mAdUQx*fwuFyPFPYJ zXl}eDRz5x3Icg9{1z z`1K^;l4pHUHb4uE<#uPm>Je?rW=A-{*HGDrl|DwNOWfP*S4GKJP!e5YH}WYo*p0tG zS^ww7*Ngmf9ZstkGi?f;thhIQOn6h|Cbi*}BUV706_#rN6xO9b4$x*&@=Em9l9;@7V84?9G+oKrJLPb?~^|ULsOEjKBZ@ z0000009}0=+|C;HHum+9nL}3EK&mZLSxd?WL3de;n1<>dZ+{$nlo-s-^h>JiS1d!Z zNmA$Dp;|sb%Qmuq*X}+cqFFkaOXih|VGUyZg{4uL#W|0om+OJ!7iY%JXiJ-`(=H=; zEnOh6{Uo^c@ELGN)D@UXwrQ-cmTwcdVH%;Tty~=116>K&wzEnP&$Wu9wrn&x5d+8PpPSW)xsso#J~H}DKRuHZjlMC z6rs1`2GWPjU9lh%?S> zshLE5zy11zW1icDE{IGOy_#(zpX1VN`gm-)#M8K>3JDYKz8lsZ*S{RDpmcA?f)m2d zsv1V~14zm4sgZ?vU4rfyy*IUxs~`=%oSQDs3P=YK4mN*#P|qIYM(gm!xUq*0PK5tW z?l8%a<2Pc6Epweam#2c!zny9u#ccN5{=sLCS`w!L)P?8V z!CH!7Wh+#l_R29XN}}3vX@{y@J4(MN>mmAl09Tr9x8g-qySZ>qFH(}c4Z2(2fdfud zKWs{VyN8G#+0+!!(!2`nOGYi*We+Rk500+#_?mZ5AD>g1T2^Gu^{t;{HjU0uGbmX( z1baljK05>PBx-%#(vBXv)%^Rq?UZNq)-i<3Ay(?I$wr zoIpcm9NgKPiokg{-85*=G*7$zmr>#DxnaMx-i@afx!ob>i#tlx3)uS-XnBfe z_*{eC;}$|pTdXd-IjTF4W-H1`TI}&U5UUdPNEKsvVO+RLQ)seng=(wAq>(I3-=;6x2;;JQ#B5< z%}#Fp_mPL;2|FBybF{)aOMT}+Sn=C{S`ZDC@hu$9#RceA1{f}_XtmP}@!Z%!i4jdt zRKDWf=TaruHhX}|zycB+ScH_}wp!AFqqw%4NQ8*`gxLs2crP1bJCTgo zt)~a~DZFFJQ4EYCF~6l2zOpd^{>a%1hH5MYCAjdI?Tu)~OHWC6+ZEyEp9g~=$>`w+ zuch6W?GTXO=1fXT7Rn@vHNRo0Gt|HEqkbxUrYuTmp!%9z=;Z?8%3ySz+!E%! zLD^fIv66;x!>x`3@Qdr^Kc_afWUR$mm^L#&_fSLL1q4}p7X3C~RR5CyC=m_lC6T5v zrce+eQ}Y?f_%yU%pb9&q?BlUjE3~_9)={+HK1+N6PA|T2d&!=fq@mf=k2qel{fa!e zRqfhdn|$j;PY-npu`P+aiC9;Xqf_?YsB;bXXSqdTZqfjl+=5?*2l`y8xa0!DfkO8K zGe~JpaMAhhTTHwjzs1>DM1KCqbD&hUJ<|oeGmHYI_%sLXMKKXUtE3Qpk@vu(6U%>B z@)#$&GurIHeDLiBT)5N9QoY?pWVrKws<3D3o!-|uf6+J*^$rH2ffZczmKuo=Ev>b~ zNW=~kCa?ja5xmeloH&S`2? z{^fKxJ^Onug5@MaXcX9T<-fC*TwM{4yRPF2!gfbO^j!?jgIfW_z(ck8%}O-9=>j@^ zcmw{ipUI@a(U}skr?zUIX~!|#`aQ0;m~AZ(9G)zAZe?xIAIE_?dGEdO#P9%fIl=1x z(Wuk^VDl6M)y(?i-_OT4|NOMy#5nM8s+?7cu!_$I-d=}?waLh`k@QkDg-KPa8&E{b zsIpNNlu1m@a4*eN8>ju!f1R)Y{T|cGRu@mtvMZu1Nli1kQY1yc6LL$8qVFfy=U)#x z@P%2Z%sugx?)YzDd@0392qMS?a$;`2rit14awTB+2WC5;Bv)WAoh$DbgRnzVQ~T;i zDvh9W;i!?e_g5@2>9T+Dr`aA$YffU`xgu+NC~uS&hi&^;FZF+9zx$WDd$D9pc3e|i z!B`@|8YYFp3I-6IP3&X|y;gnChY0{AU?U*jA;dlGq_1pr@VPou_Lgy>+WZD+r8CAI zV}l%I{rppi4X^Xf(9gKIU-Hx6UGC&9YUtK?3SaGmv;8=$c~iV`CBCljQ% zo0~Vb6mH`4?%5%OOv*(t3*(%_+Qs4)5+W1elEJqh=_x8n==^iqQ3HMhB?X33vPATm=&!%fe44c~r7a#FeuP+8+`ymBzCN{bc_s&Ws#hgD5h(9hx z{-tg!*QAN=xZ|aVNwH_(eN3Sao%`#F9m*%*b$OmV`e-NUZsxMSa;e$e5= zUCJW^h=z&DQ)v445_)(HtrdX>=r;VZdi)CaMggBd^*@78(EI9{rs0_fk`QT74fT0M zjZa*m6>Ih%eiTmoiOmq+F6{*5UAcwr87TJO;asS)aWB}s)MM_n{A9qIH``o_q)@Gi z*=atB_d=g#(n0Iov1}uh(f8T-$x^L01hiFWfPW$hqil6u+l@AO*RmQJM+g2+jAP-} zu7Xwl8|k(<@rxMJ34ZG=C~z|PC6D&RK3gEI=&)lhipNAN@b+b?g1yqIbGq?zwQg8& z=HjfaDa#k`E-b=6s$%>#PV`UcZ(xPG_i)ovPhpquh6{eJT zdxh2ju#@KNmkrVWdFUu{Gu9fe>*%vxyIMxUX1u8$tpEh=Yr=3nrH$y<6n@6vBlyc} z_*k_P@fuN5rP;McCIEkpQAFi)=jLU*SwC>vYfutvu_WDt>9pa((I6Mt@An$}b!X=9 zft)tkFpJf1Fan_DWVM=C``R zcz>9A-_I2)Cztoic>h%F%_~jm=Ww{$J~9Kavoz;pPQkywsq=bGa(*|YH|#B{+Ce2N zOEXvE8}i0PX|}mgtazmeOUVjBKxV|G@21!{tUL-UObT5E#P5xvA%V$Y1vWuvcWgW` zem$QeX8<65cnX+w96g7^EP4wAS9Sj%wUt04E!yi*hB~FG2?|b$Y?HsF71?8d{~Py{ z*byy;QPwC!mvsxw{3J^%)~ANsVkP}H3e$*1ZIZFTgFOMVbv zf%&5acphbFOu7bli^J5+hbIRHVe?O5Uj#73HIA&#C(B=5C>FX*U*${EV6(Ngb_*6U zhyV1EfmLglkEGzn3IF8xFu^~tq@T@HWFrB#mK|bZzc{uSBC-AWjcDWU)b2-^3Ib$K zrTR+YbX;p=s$Gv-8lz}?I`Jqhc*2!>a`JIN2qV}pF!;G9Ct%|ZIjhy6xMb_q9d&sG zYQ!dfFpi~MTaM2l^q1#lwfLf@fU?OD{r`=oyH^Sm4_9gJ7oFUOeI)u-HT;qQh|ZZi zE4TMis8YLq2i}6}Z%MaPcB^&U%;F1yurJ5nQOQi=Yr0`Ow_Bxb8K!0R)p}oIP3nfRS}*dt<~Ix@Pn{OMY0nBQ>j_ykSxzfwEH$jtheWmCL(Lv{>4(-L{| zlCrPpYwdQ)^JWq)o|A<}^Q6H#3fYc;#QBI{7Mb#vnTwkD@@wT{7s5*3_>2GgS9Vd2 z0WVZtt{F05f8M&*7-@t_A0OH8lxQe1I%;9FFv3UQMQ_BHRg>(+sHO1Z_%<=0O0C}} zJk5E5?ybR}M%GD`P^~?ARxQd}KHrL(2$vIx0?thk%P*V9Fyj;DQMU`#`Q(Rd8ec#1-wU z`SvUPGgL2Q4DNmgfAZszKU5xv3PV#|@#VfDiEodMi|3r9Ny@C0g3ElrlpH0^x5Mcx zN_|eLyOoDf`^qc+v}4C`3-kUDXsa8Gqxr_Ple+%0`$oup(at(cF7<~=X+hM4yxqhD zU06MR#gzgVuBUP}bX(!Xi)1fPB_rx$z!=cyY|VGGoo2VV(#y|ZAgd9**{v-QDCM7v zZInF2a^~!h*ayPK?KFHw< z5tV4!3NMt)arWG0c4T}Y(Meq5NY2EqRca!8)vx{g{&$*T9$GO;=oXk)Y{d14pC01% z3Hi^w^|NAej}Kk7feDp#U$w}it3XE39~X>1aRi^3484Hdn)YsuH3;&!mrz+GqPjk4 zHrK?M;o>EPElroDLRI@r!HtkPdq^6u6PFT4z(^98*4_=Q|@{5dwG-dzmez~ltNoj49xt_c?f z`^wLS*m1%;fGA)-l7EcUMDDUhWE%BQks|L=C<8`!VL%B#imGKxKT7L;;l$K^$E`EA zQZ(FyjPLdtRZ)=Qy6S3}Hz!kDF9fTgu5;Yqg=zc~ui@_|#h+Yb)vO=9C7c#ts3*tJ zDm|?f;WK>10G^4~HfBf=Nw8M@!`{>cM4sXA|YIu zSuvQrb%d#*+6X%truWI86>L->Rt`7}LBA8~6W_HnLf*eaRVFb!F1h6Ifrqw(H7h7$dzI zB;Mx#7WM`(JNhv#fGUws(~(kgFRsMQZ+DTVLR|POl9hp!G$GQUTm85;0GFX$SI>Z2 zk#5;d_hA*$Be7~?NQEb>K3}GtuUC45g!F@y7Q3o&Z_MuFV#W4u&1m#cKwj4Ym;TF} zd;g2m(LbC6|Ixk{#l`7z5J}Q#KE$5DpJ$PD|t};BRnG)yyXO$pj^W`%j5{L2| zXcR>-Cc2Lan29%JbNO0-59y097UHU~bR=SmykwiDM(unDvIyzDE{_sxXoIl>NY{FT z-M^vs4i23?i-$~`!0rk#(HRQjqemBnj&S4mSMjj&t8tnldXL-7>GSu54|y>OD^8T_ zJ!tQ^2OEq$CCV|C?v?T{OQFS0 zb`m_B4P5)ayJ!k%3MXt_Pmny@mraTXX5kaQY6Z~VZh(h>)M$9NtTi}LoN>vJmcrpM<42Jk?;`_c-t;r7RUELkYHO&eBy*Ie{q-9Vz9ti3Gt&O} zq*@qn2cvA{;{RoM|FeS=ef2MOVC#qpvE{gsWYD(3uGf4hFO5}^lPKPfgr|Y~o?Ppy zIfV1NycUb_)cA4mq8J|t(RL1Zw+or%C<`+5Md%gp5$Vh+ZY-@K7v=4}p4OYZc*Ijyw`aEs zFT0u5{Ofx)rOO}q5x;buF2>nQGDL`HI)!lgsD%$N8}w1`hsI7!A?{h@K}&SDe-kApq071MSzuK_rBB}1Q#=Ze%6vpeQKNz zZk9M+a}7Jjc~ppAm6a0_Cc&Wwh|(wtgXz`!qSymP>3r`>o3-#&t3gN27GTEkLBkI3 z%>ST3m#XDGh#|LzPb+->K8np)QJlA<|1=`Z6WWQC2@XYI9bZAjBc=VkpozFxWAgPN zfI>M10l}UnxT9MAvTtbQ=IPh7!|Ut>7d&=Di?^nrL^9TJCJ5l;#g4WsIw$HoPv9p9vLN!5|3w(J1~d;+0_M z9xyJ(F)lNe(zx3dRX0!R>LC0vQ;p9_RF)BoIZ@Bct965iG2P|-UEimeA^49-bZ1`= zNYM9{dm4M0zFg*IGMsau*mu~-vY6z*srfy8K!IRM=#)wXUBRKz-aYY|>f87hx z*04-k18qVK$Z_S(>O`Fe`CkS2rK?<2Em_3eAf*+~j?cJ2rAy1`D3*>{xu2=MY1mfg zy>hvo@907YYokgJGyqIt2C~nU=a9?u_t#>?xlSP>gmz#I=Q;$LvB#!f*yBQ-ZQ#9ABluH!H{R;F ztAeKdl*p;ny9pTlk) zeWD^t1u^BNINv`EArZljF+J7#>tq4l+0(8Lj#-Qr2%X&^m<)MJpF^WlY9q^%RWicK z86ZpfZMhSpsCIr!T+?(8+VsOX&@7r~&Qn-eefKeP*(Dp|WEJz`?)~Q-+BAtFZ2$Lk zE%N+zix+S3z1INeSuK#l6Ba?qxlJyFpC6&gg&_hre*hzkF#9-#ERS@KR*@^m>fx0+9cwDVh8tEr$yN|N>=>W=O=^FuyW{~Y1B%GI>IjU*`$cgY#SG2HY#Q3P=pfAtAbwt(h*q-TbUvY%yX z3y*i2%mWQ^;LFQYxw^I@Q)^xK z7E$<_I+KMASY)V}8HA!lm#Bc~1f}iia<(IPJi*uSJU1^aGlA+Ia94`(Yhn4V%}0t5 zWBWYy`17)O$MLcUMdV5$I(PO+<)=;c5`b^~V*+pzlbvK&Lj-E~Z_+6vvBQi5w1p1H zqG88>Ehjb$-kar1@f!p?fvYyYXutr}l%G~IJHCd7qs!X$Gsc8tCk=8~r& zzNUQWd=(uc?+C&`LA;Z42QuiKNap61aD;YEsYr)I_$oH&+0pnkcZAw6$4h?rNLd%j zHS8pklnpRz?2F%M{Z(R#XWHtP-WJ?|nJ~Kc#qw0ukmIId;kx5&}P>eN#fagONk1t)5kVSg>xYg!VL@F%Q&j) zlcd}~&w{&3p$3`pOe5#{|F1$GS}l!2vidbw;;p)3-3`^_Yup)L6_ zGi(DVT1_nxijSY-1fnhyYQi`9tVIAB@JUMCziAh-b5nh2-tLGn*?~But!TCz2J85) zsblD18V>bd$6^?a``xj1biglgDLYeFM@Kcr{MElXK`~`npmkDC z@9<9>Cd_SMsn_aBlVCZ6l-$d^pljph+2L$T;Xz2u-}#=X!gEdrA#Kx7L4-kOYglZV zxh(Nj@x31en2_RTKy6Y;Ki=am^`g|qu7ywDl+2$3nvdP$R_nBI9i!e>Ly;Md41_Ot zTUgo^yC&OsaswK0oqPY_U}D9n*F&BaVRLbf6FT`Qi>e{|f@wsg$HU+CiDP%Y=j6|Y zPxyJ5Vbg<3CF-^oBbJV5vf+&d3l(at4Ohj|+rc0#Pp>=bs09?6(YFVzaU2WNFI3)b zHb%*^m6aHcp6k_xL&mO+N)HWDy^*rcyQR~GcQ4R!K5uSrw83SD=?CA5)YaZt1rRuK zF$YVpt!m3mC%tKQzRR%+RV0m~O`7uLkzdC(Z{M$nGc)hBRwG7L#ZHtO>fLkr6-MPO zP}6-s$ma`A5q?y7)vk5aMy91K*RkRAe&kQvv>-_*?r{4ZE%%?G+((JXt9Sc4NO5=(fT&4x=XGKTztX987U+ zWd0NDLegujJUo_1XaD*r)_YbaV$ij=_k{>5_DaBiYU18z`qQBjUo<>%xZrR9yJ8;A zbV8%K3iluq&mo0pl%7Fs&YQ3_WGKfeIym0gKla8fev;DN&rl}q2igw@wdAN{i5qZU8c*a|q)CcXcr)nn=(cu~A`S|^G(s@GW0WV9wfIwz8KWZB zNiV5^v~NU}p8uuXy-RJc2D{tESGR`GE&9Mh|hFf!E z3P(buk_fXCKh%naL8A6avn)n9dtQiks)M>^@smS)F%36I0!0<=HLF%mn@ioJQ&UJ| zh8(oZ&ke1EHU1pCMpmUVN>+sdV>9Z+8nR~Hj0O^}EzF$5ps2*0lKl0kqC^0LdWCX# z#JYBYCj?$*BCAS2nsVr|>}YU~9YB`x>?zuj1TEMQf6^!G(41_Z?#KIV2$@9%u&k}_ zjPE?SPUw^lg=a*-Av92}(tk*^rSS~e8y@c0Am()%)MwSvY_yuN zw@bR=86JKt7dtqnXvJ4dwO>$E;wcj3(z}&*&O^xVw(w0YB24BQ5@3fV_ayzd4ZL?= zj+LT=4oo%>|7A9`eW31L=0Pg0_tth1PgyWVDh{nA4;D@w(U;f(XdjO_E(U7`KfWWF zP%YD?@;lws))+a0qG3^SK@)e-6$70NJ)4bPv?y^c+8C=Ji%NS@;%{gojYaNomr-08 zgN&5E`pc&^tzIMu8g4PogYuf_kgHyQDrUHjB%U$sd-+EHi40KeYYE)3ztjQ6_+fJPDip#!<8u&O8 z`hzK$e1^izPXwt^p>Ze&R-OGa;835&hP*maQh<(dN&a)anU2vQco`v4YGN8lVAgsW z>7#&vO6buVqiLSh5d6Q?+Kl|{S~TXlo;0~o^eVrRva&BdM6PI%mXB5Fz_mS;@4-um zEYJ~_)q*|A&8}aSK)0}FXH){k$Av1or9F(J7gDfO@zmtfG z&RX1Rog4KXacYaACMy>q5cE(T$9*SD79N=Zw+eGiVs|7M(tZiu)D0L1txJdKe zdHct$^uO(X4VT4Q)3rV9$DX zfKeAdd!AoD=o%qKSm{Q5S?r=C6_duyzFn04w2L$+KsLQ}t5$^)(NsG=8zSH4auigijf9q7mlcDc_eTjzHj$6|aIo-cZ@MCFE=>ym zKPON{WE?Bxh3*5^*FG_Rh9m<*^jcEGtZ17w`6O~O9yA=sA z+_0f5dRS5wU0tg#6E42E$cyeLK>pt;sET#*64xPE^wp%I=SNK;Pf!1C^vv!^C>A_pj=DK$w;9Fm znryfiEF#86|9u3yo9N^XDa-zAmAY%f%#2#zcYx(Id(m(n`2RLxel>6HxS&8wo5Mj~ z`-n*gU@>+pf#BLJk}&T-pU1&2B+kF|AmW9C~g3bu^_3t%Eg@rim zz_1;#UQF*#ORT?nl~^#6P*45k{`?QK-1dAcrYDRX0D{}?Fp0yZF@;72dg!%cN#;2t zSJYRp9Q=q8e7{?gU+!YTBk%0ZF35}p!8E;K9R8>6)3fkZ>e6u>&L|> zdbP|xK^SEtZ zyg|xul4cRhP%hOuQ8UDtJ1p9O=ClP- ze5y-M@^?})=7_r(p6)TpJ+NY4nH0^*jPM7IK9Om=w9QA3g78;HY!Q~l1{Cub(#V0Q z_Z0HCjfcjqA6Tc}_psZ!N?C7e8}b_%eF8UGxG=$Src-foGB_il@$Wl=OnE2r%2-gv zN_Cei6k_noWc)6Y=zo2jdz|=CHWEkYV+4uE->~?3MnsFxQg)qzFbeCI5g|9xK&g>0 z^TenGw}okthDXCm^HD%WT*R{$pY(j;qjtsRAOr#ykfn^&e7%SQ2_ncUFeUn#o5$M4 zb+F&@g0BRja{*Ws1+hoDFpW+rPL$PtX5KZU)nPLGo@>000Xg|7Pn7+g{D*iYWuP9| zd^nJm4dlV!d6l|23VqSK$R>XAYUUnYVQO(!#7zFea?mAgF>0i^IS~7}Qzv|!R8GCX z0ng++Cz^wKcxC-z^!SfbK>5lmvntgkKq2_(G+}m2*{R(?C7Si@b!lEAB;+#%HYa$C zGl&OJt(a7d>)VcJ{*~dnq-tdClu!JaMc0(G*uOyp6^kxCvY@I^S$8_weN@=I<+Cfb zK&R23O$dqY=U$>Gm$=q5y4Ml4DExYfG^oUAn5xeXx!~-G@Dfz0{_U;kpa!5Ypmzj& z^+xYX&EG^{7}&=mtp)5=xdf@(r1H!=<^wQ_(jYegV!8{ZIvDjK zq@EFKJy)ci*}NW~+;8hZweeUn;;x7;wbVKr9cZrf(j_>BDU6?0P5P`XVcOy$Jq^Jx zeT&ttgFby{=#y5OR!8vEUV;O=2ty&eMms5333m&NN|m1S$ra9AT|wR@5hE&9G-4~~ z%TEwW7+dd2{HqtO^!C?;!Q^2bl+zD!rOpOo5@x${}wZH?cvF>^8REZ~P&=CFFp zp5X})D_e{wR+&Ju=h_KHw&v&@JoTYK#k07is?rvT!pjEU6x`YN(Q?~8GL0ex-{9~l fdTw|+e{dW(lE{V~mRF{pczoH&0000000000L(`?t literal 0 HcmV?d00001 From f39f0d7a0ebf8461e181f3cbefcc5af58b8d22b2 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Thu, 28 Mar 2024 14:01:09 -0500 Subject: [PATCH 25/37] Fix links and proofreading artifacts (#1207) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a87655d355..77b44b6f4f 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ For more information, refer to the [download](tutorials/download_model_weights.m   > [!NOTE] -> We recommend starting with the **[Zero to Pretraining, Finetuning, and Using LLMs with LitGPT](https://chat.openai.com/c/tutorial/0_to_litgpt.md)** if you are looking to get started with using LitGPT. +> We recommend starting with the **[Zero to LitGPT: Getting Started with Pretraining, Finetuning, and Using LLMs](tutorial/0_to_litgpt.md)** if you are looking to get started with using LitGPT. @@ -151,7 +151,7 @@ litgpt finetune lora \ --lora_r 4 ``` -You can browse the available configuration files [here](https://github.com/Lightning-AI/litgpt/tree/main/config_hub). +You can browse the available configuration files [here](config_hub).   @@ -331,7 +331,7 @@ If you have general questions about building with LitGPT, please [join our Disco > [!NOTE] -> We recommend starting with the **[Zero to Pretraining, Finetuning, and Using LLMs with LitGPT](https://chat.openai.com/c/tutorial/0_to_litgpt.md)** if you are looking to get started with using LitGPT. +> We recommend starting with the **[Zero to LitGPT: Getting Started with Pretraining, Finetuning, and Using LLMs](tutorial/0_to_litgpt.md)** if you are looking to get started with using LitGPT. Tutorials and in-depth feature documentation can be found below: From be480dfc37c5d9086bc07b82b899d79b81656d51 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Thu, 28 Mar 2024 14:51:37 -0500 Subject: [PATCH 26/37] Move import to top of file (#1209) --- litgpt/pretrain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litgpt/pretrain.py b/litgpt/pretrain.py index 3119e5d9d6..bdd8f03a0b 100644 --- a/litgpt/pretrain.py +++ b/litgpt/pretrain.py @@ -20,6 +20,7 @@ from litgpt import Tokenizer from litgpt.args import EvalArgs, TrainArgs +from litgpt.config import name_to_config from litgpt.data import DataModule, TinyLlama from litgpt.model import GPT, Block, CausalSelfAttention, Config, LLaMAMLP from litgpt.utils import ( @@ -91,7 +92,6 @@ def setup( if model_config is not None and model_name is not None: raise ValueError("Only one of `model_name` or `model_config` can be set.") elif model_config is None and model_name is None: - from litgpt.config import name_to_config available_models = "\n".join(sorted(name_to_config)) raise ValueError(f"Please specify --model_name . Available values:\n{available_models}") config = Config.from_name(model_name) if model_config is None else model_config From fa0085e7b16164e08008d67e3967ea951fe46e92 Mon Sep 17 00:00:00 2001 From: Mathew Shen Date: Fri, 29 Mar 2024 20:22:05 +0800 Subject: [PATCH 27/37] fix: tutorial path (#1213) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 77b44b6f4f..162512b914 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ For more information, refer to the [download](tutorials/download_model_weights.m   > [!NOTE] -> We recommend starting with the **[Zero to LitGPT: Getting Started with Pretraining, Finetuning, and Using LLMs](tutorial/0_to_litgpt.md)** if you are looking to get started with using LitGPT. +> We recommend starting with the **[Zero to LitGPT: Getting Started with Pretraining, Finetuning, and Using LLMs](tutorials/0_to_litgpt.md)** if you are looking to get started with using LitGPT. @@ -331,7 +331,7 @@ If you have general questions about building with LitGPT, please [join our Disco > [!NOTE] -> We recommend starting with the **[Zero to LitGPT: Getting Started with Pretraining, Finetuning, and Using LLMs](tutorial/0_to_litgpt.md)** if you are looking to get started with using LitGPT. +> We recommend starting with the **[Zero to LitGPT: Getting Started with Pretraining, Finetuning, and Using LLMs](tutorials/0_to_litgpt.md)** if you are looking to get started with using LitGPT. Tutorials and in-depth feature documentation can be found below: From 134a07146a0e08dd9718249cadc4919ddc3c9592 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Fri, 29 Mar 2024 11:13:56 -0500 Subject: [PATCH 28/37] Add mistral 7b 0.2 checkpoint (#1211) --- config_hub/finetune/README.md | 7 +- config_hub/finetune/mistral-7b-v0.2/lora.yaml | 121 +++++++++++++++++ .../finetune/mistral-7b-v0.2/qlora.yaml | 123 ++++++++++++++++++ litgpt/config.py | 18 +++ tutorials/download_model_weights.md | 7 + 5 files changed, 274 insertions(+), 2 deletions(-) create mode 100644 config_hub/finetune/mistral-7b-v0.2/lora.yaml create mode 100644 config_hub/finetune/mistral-7b-v0.2/qlora.yaml diff --git a/config_hub/finetune/README.md b/config_hub/finetune/README.md index c31a862380..995ada59d8 100644 --- a/config_hub/finetune/README.md +++ b/config_hub/finetune/README.md @@ -22,8 +22,11 @@ For more information, see the [Dealing with out-of-memory (OOM) errors](../../tu | llama-2-7b/qlora.yaml | 7B | Alpaca 2k | 4 | 0.814 | 13.68 GB | 512 | 2 | bfloat16 | 45.68 min (A10G) | | llama-2-7b/full.yaml | 7B | Alpaca 2k | 1 | 0.941 | 26.81 GB | 512 | 4 | bfloat16 | 1.78 min (4xA100) | | | | | | | | | | | | -| mistral-7b/lora.yaml | 7B | Alpaca 2k | 4 | 0.796 | 20.65 GB | 512 | 2 | bfloat16 | 31.04 min (1xA10G) | -| mistral-7b/qlora.yaml | 7B | Alpaca 2k | 4 | 0.803 | 14.29 GB | 512 | 2 | bfloat16 | 44.69 min (1xA10G) | +| mistral-7b/lora.yaml (v0.1) | 7B | Alpaca 2k | 4 | 0.796 | 20.65 GB | 512 | 2 | bfloat16 | 31.04 min (1xA10G) | +| mistral-7b/qlora.yaml (v0.1) | 7B | Alpaca 2k | 4 | 0.803 | 14.29 GB | 512 | 2 | bfloat16 | 44.69 min (1xA10G) | +| | | | | | | | | | | +| mistral-7b-v0.2/lora.yaml | 7B | Alpaca 2k | 4 | 0.801 | 20.65 GB | 512 | 2 | bfloat16 | 30.96 min (1xA10G) | +| mistral-7b-v0.2/qlora.yaml | 7B | Alpaca 2k | 4 | 0.813 | 14.29 GB | 512 | 2 | bfloat16 | 44.68 min (1xA10G) | | | | | | | | | | | | | phi-2/lora.yaml | 2B | Alpaca 2k | 1 | 0.832 | 13.98 GB | 512 | 4 | bfloat16 | 3.82 min (1xA10G) | | phi-2/qlora.yaml | 2B | Alpaca 2k | 1 | 0.846 | 14.27 GB | 512 | 4 | bfloat16 | 4.55 min (1xA10G) | diff --git a/config_hub/finetune/mistral-7b-v0.2/lora.yaml b/config_hub/finetune/mistral-7b-v0.2/lora.yaml new file mode 100644 index 0000000000..aad8f7c986 --- /dev/null +++ b/config_hub/finetune/mistral-7b-v0.2/lora.yaml @@ -0,0 +1,121 @@ + +# The path to the base model's checkpoint directory to load for finetuning. (type: , default: checkpoints/stabilityai/stablelm-base-alpha-3b) +checkpoint_dir: checkpoints/unsloth/Mistral-7B-v0.2 + +# Directory in which to save checkpoints and logs. (type: , default: out/lora) +out_dir: out/finetune/lora-mistral-7b + +# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null) +precision: bf16-true + +# If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information. (type: Optional[Literal['nf4', 'nf4-dq', 'fp4', 'fp4-dq', 'int8-training']], default: null) +quantize: + +# How many devices/GPUs to use. (type: Union[int, str], default: 1) +devices: 1 + +# The LoRA rank. (type: int, default: 8) +lora_r: 32 + +# The LoRA alpha. (type: int, default: 16) +lora_alpha: 16 + +# The LoRA dropout value. (type: float, default: 0.05) +lora_dropout: 0.05 + +# Whether to apply LoRA to the query weights in attention. (type: bool, default: True) +lora_query: true + +# Whether to apply LoRA to the key weights in attention. (type: bool, default: False) +lora_key: false + +# Whether to apply LoRA to the value weights in attention. (type: bool, default: True) +lora_value: true + +# Whether to apply LoRA to the output projection in the attention block. (type: bool, default: False) +lora_projection: false + +# Whether to apply LoRA to the weights of the MLP in the attention block. (type: bool, default: False) +lora_mlp: false + +# Whether to apply LoRA to output head in GPT. (type: bool, default: False) +lora_head: false + +# Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``. +data: + class_path: litgpt.data.Alpaca2k + init_args: + mask_prompt: false + prompt_style: alpaca + ignore_index: -100 + seed: 42 + num_workers: 4 + +# Training-related arguments. See ``litgpt.args.TrainArgs`` for details +train: + + # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000) + save_interval: 200 + + # Number of iterations between logging calls (type: int, default: 1) + log_interval: 1 + + # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 128) + global_batch_size: 8 + + # Number of samples per data-parallel rank (type: int, default: 4) + micro_batch_size: 2 + + # Number of iterations with learning rate warmup active (type: int, default: 100) + lr_warmup_steps: 10 + + # Number of epochs to train on (type: Optional[int], default: 5) + epochs: 4 + + # Total number of tokens to train on (type: Optional[int], default: null) + max_tokens: + + # Limits the number of optimizer steps to run. (type: Optional[int], default: null) + max_steps: + + # Limits the length of samples. Off by default (type: Optional[int], default: null) + max_seq_length: 512 + + # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: null) + tie_embeddings: + + # (type: float, default: 0.0003) + learning_rate: 0.0002 + + # (type: float, default: 0.02) + weight_decay: 0.0 + + # (type: float, default: 0.9) + beta1: 0.9 + + # (type: float, default: 0.95) + beta2: 0.95 + + # (type: Optional[float], default: null) + max_norm: + + # (type: float, default: 6e-05) + min_lr: 6.0e-05 + +# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details +eval: + + # Number of optimizer steps between evaluation calls (type: int, default: 100) + interval: 100 + + # Number of tokens to generate (type: Optional[int], default: 100) + max_new_tokens: 100 + + # Number of iterations (type: int, default: 100) + max_iters: 100 + +# The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: csv) +logger_name: csv + +# The random seed to use for reproducibility. (type: int, default: 1337) +seed: 1337 diff --git a/config_hub/finetune/mistral-7b-v0.2/qlora.yaml b/config_hub/finetune/mistral-7b-v0.2/qlora.yaml new file mode 100644 index 0000000000..e2f5c3aafc --- /dev/null +++ b/config_hub/finetune/mistral-7b-v0.2/qlora.yaml @@ -0,0 +1,123 @@ + +# The path to the base model's checkpoint directory to load for finetuning. (type: , default: checkpoints/stabilityai/stablelm-base-alpha-3b) +checkpoint_dir: checkpoints/unsloth/Mistral-7B-v0.2 + +# Directory in which to save checkpoints and logs. (type: , default: out/lora) +out_dir: out/finetune/qlora-mistral-7b + +# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null) +precision: bf16-true + +# If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information. (type: Optional[Literal['nf4', 'nf4-dq', 'fp4', 'fp4-dq', 'int8-training']], default: null) +quantize: bnb.nf4 + +# How many devices/GPUs to use. (type: Union[int, str], default: 1) +devices: 1 + +# The LoRA rank. (type: int, default: 8) +lora_r: 32 + +# The LoRA alpha. (type: int, default: 16) +lora_alpha: 16 + +# The LoRA dropout value. (type: float, default: 0.05) +lora_dropout: 0.05 + +# Whether to apply LoRA to the query weights in attention. (type: bool, default: True) +lora_query: true + +# Whether to apply LoRA to the key weights in attention. (type: bool, default: False) +lora_key: false + +# Whether to apply LoRA to the value weights in attention. (type: bool, default: True) +lora_value: true + +# Whether to apply LoRA to the output projection in the attention block. (type: bool, default: False) +lora_projection: false + +# Whether to apply LoRA to the weights of the MLP in the attention block. (type: bool, default: False) +lora_mlp: false + +# Whether to apply LoRA to output head in GPT. (type: bool, default: False) +lora_head: false + +# Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``. +data: + class_path: litgpt.data.Alpaca2k + init_args: + mask_prompt: false + val_split_fraction: 0.05 + prompt_style: alpaca + ignore_index: -100 + seed: 42 + num_workers: 4 + download_dir: data/alpaca2k + +# Training-related arguments. See ``litgpt.args.TrainArgs`` for details +train: + + # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000) + save_interval: 200 + + # Number of iterations between logging calls (type: int, default: 1) + log_interval: 1 + + # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 128) + global_batch_size: 8 + + # Number of samples per data-parallel rank (type: int, default: 4) + micro_batch_size: 2 + + # Number of iterations with learning rate warmup active (type: int, default: 100) + lr_warmup_steps: 10 + + # Number of epochs to train on (type: Optional[int], default: 5) + epochs: 4 + + # Total number of tokens to train on (type: Optional[int], default: null) + max_tokens: + + # Limits the number of optimizer steps to run (type: Optional[int], default: null) + max_steps: + + # Limits the length of samples (type: Optional[int], default: null) + max_seq_length: 512 + + # Whether to tie the embedding weights with the language modeling head weights (type: Optional[bool], default: null) + tie_embeddings: + + # (type: float, default: 0.0003) + learning_rate: 0.0002 + + # (type: float, default: 0.02) + weight_decay: 0.0 + + # (type: float, default: 0.9) + beta1: 0.9 + + # (type: float, default: 0.95) + beta2: 0.95 + + # (type: Optional[float], default: null) + max_norm: + + # (type: float, default: 6e-05) + min_lr: 6.0e-05 + +# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details +eval: + + # Number of optimizer steps between evaluation calls (type: int, default: 100) + interval: 100 + + # Number of tokens to generate (type: Optional[int], default: 100) + max_new_tokens: 100 + + # Number of iterations (type: int, default: 100) + max_iters: 100 + +# The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: csv) +logger_name: csv + +# The random seed to use for reproducibility. (type: int, default: 1337) +seed: 1337 diff --git a/litgpt/config.py b/litgpt/config.py index e188d0feff..caad1454b9 100644 --- a/litgpt/config.py +++ b/litgpt/config.py @@ -1387,6 +1387,24 @@ def norm_class(self) -> Type: copy["name"] = c["name"].format(kind) copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) configs.append(copy) +configs.append( + # https://huggingface.co/unsloth/mistral-7b-v0.2/blob/main/config.json + dict( + name="Mistral-7B-v0.2", + hf_config=dict(org="unsloth", name="Mistral-7B-v0.2"), + padded_vocab_size=32000, + block_size=32768, + n_layer=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=14336, + ) +) configs.append( # https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/blob/main/config.json dict( diff --git a/tutorials/download_model_weights.md b/tutorials/download_model_weights.md index ad42c044f6..748295d0fb 100644 --- a/tutorials/download_model_weights.md +++ b/tutorials/download_model_weights.md @@ -146,8 +146,15 @@ togethercomputer/RedPajama-INCITE-Chat-7B-v0.1 togethercomputer/RedPajama-INCITE-Instruct-3B-v1 togethercomputer/RedPajama-INCITE-Instruct-7B-v0.1 Trelis/Llama-2-7b-chat-hf-function-calling-v2 +unsloth/Mistral-7B-v0.2 ``` +  + +> [!TIP] +> To sort the list above by model name after the `/`, use `litgpt download | sort -f -t'/' -k2`. + +   ### 2. Download Model Weights From a509064d802d1e05bab16d9a49e763a6c955b84e Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Fri, 29 Mar 2024 11:43:26 -0500 Subject: [PATCH 29/37] Correct typo in table 7B -> 3B (#1217) --- config_hub/finetune/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/config_hub/finetune/README.md b/config_hub/finetune/README.md index 995ada59d8..fc82e0854b 100644 --- a/config_hub/finetune/README.md +++ b/config_hub/finetune/README.md @@ -32,9 +32,9 @@ For more information, see the [Dealing with out-of-memory (OOM) errors](../../tu | phi-2/qlora.yaml | 2B | Alpaca 2k | 1 | 0.846 | 14.27 GB | 512 | 4 | bfloat16 | 4.55 min (1xA10G) | | phi-2/full.yaml | 2B | Alpaca 2k | 1 | 0.937 | 14.44 GB | 512 | 4 | bfloat16 | 13.00 min (1xA10G) | | | | | | | | | | | | -| stablelm-base-alpha-3b/lora.yaml | 7B | Alpaca 2k | 4 | 1.367 | 8.58 GB | 512 | 2 | bfloat16 | 13.02 min (1xA10G) | -| stablelm-base-alpha-3b/qlora.yaml | 7B | Alpaca 2k | 4 | 1.392 | 5.24 GB | 512 | 2 | bfloat16 | 25.71 min (1xA10G) | -| stablelm-base-alpha-3b/full.yaml | 7B | Alpaca 2k | 1 | 1.494 | 21.23 GB | 512 | 1 | bfloat16 | 72.72 min (2xA10G) | +| stablelm-base-alpha-3b/lora.yaml | 3B | Alpaca 2k | 4 | 1.367 | 8.58 GB | 512 | 2 | bfloat16 | 13.02 min (1xA10G) | +| stablelm-base-alpha-3b/qlora.yaml | 3B | Alpaca 2k | 4 | 1.392 | 5.24 GB | 512 | 2 | bfloat16 | 25.71 min (1xA10G) | +| stablelm-base-alpha-3b/full.yaml | 3B | Alpaca 2k | 1 | 1.494 | 21.23 GB | 512 | 1 | bfloat16 | 72.72 min (2xA10G) | | | | | | | | | | | | | tiny-llama/lora.yaml | 1.1B | Alpaca 2k | 3 | 1.038 | 13.50 GB | 512 | 8 | bfloat16 | 8.06 min (1xA10G) | | tiny-llama/qlora.yaml | 1.1B | Alpaca 2k | 3 | 1.056 | 16.24 GB | 512 | 8 | bfloat16 | 8.74 min (1xA10G) | From 81d7cf337a14a0e67a94fdde0ee8ff19ffc45273 Mon Sep 17 00:00:00 2001 From: Eswar Divi <76403422+EswarDivi@users.noreply.github.com> Date: Sat, 30 Mar 2024 01:33:32 +0530 Subject: [PATCH 30/37] fixed prepare_dataset path in 0_to_litgpt.md (#1218) --- tutorials/0_to_litgpt.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/0_to_litgpt.md b/tutorials/0_to_litgpt.md index 415190fec6..f0497b45ae 100644 --- a/tutorials/0_to_litgpt.md +++ b/tutorials/0_to_litgpt.md @@ -388,7 +388,7 @@ Time for inference: 2.15 sec total, 39.57 tokens/sec, 85 tokens **More information and additional resources** -- [tutorials/prepare_dataset](prepare_dataset): A summary of all out-of-the-box supported datasets in LitGPT and utilities for preparing custom datasets +- [tutorials/prepare_dataset](prepare_dataset.md): A summary of all out-of-the-box supported datasets in LitGPT and utilities for preparing custom datasets - [tutorials/finetune](finetune.md): An overview of the different finetuning methods supported in LitGPT - [tutorials/finetune_full](finetune_full.md): A tutorial on full-parameter finetuning - [tutorials/finetune_lora](finetune_lora.md): Options for parameter-efficient finetuning with LoRA and QLoRA From 6d04a87988ecf499b2a9ec1cfe1ad529ffb97894 Mon Sep 17 00:00:00 2001 From: Andrei-Aksionov <58434077+Andrei-Aksionov@users.noreply.github.com> Date: Sat, 30 Mar 2024 18:19:03 +0300 Subject: [PATCH 31/37] Fix unreachable links in markdown files (#1219) --- extensions/xla/README.md | 2 +- tutorials/0_to_litgpt.md | 2 +- tutorials/inference.md | 2 +- tutorials/oom.md | 2 +- tutorials/prepare_dataset.md | 5 ----- 5 files changed, 4 insertions(+), 9 deletions(-) diff --git a/extensions/xla/README.md b/extensions/xla/README.md index d71a0e0f2c..6182f24d54 100644 --- a/extensions/xla/README.md +++ b/extensions/xla/README.md @@ -78,7 +78,7 @@ export PJRT_DEVICE=TPU > An extensive guide on setup and available options can be found [here](https://cloud.google.com/tpu/docs/v4-users-guide). Since a new machine was created, you may need to download pretrained weights. -They can be copied to the machine using `gcloud compute tpus tpu-vm scp`, or you can follow the steps described in our [downloading guide](download_model_weights.md). +They can be copied to the machine using `gcloud compute tpus tpu-vm scp`, or you can follow the steps described in our [downloading guide](../../tutorials/download_model_weights.md). It is also recommended to set up a persistent disk from which to load checkpoints. Follow [this guide](https://cloud.google.com/tpu/docs/setup-persistent-disk#setting_up_a_tpu_vm_and_a_persistent_disk) to do so. diff --git a/tutorials/0_to_litgpt.md b/tutorials/0_to_litgpt.md index f0497b45ae..760c5d00e0 100644 --- a/tutorials/0_to_litgpt.md +++ b/tutorials/0_to_litgpt.md @@ -527,7 +527,7 @@ lm_eval --model hf \   **More information and additional resources** -- [tutorials/convert_lit_models](tutorials/convert_lit_models.md): Tutorial on converting LitGPT weights +- [tutorials/convert_lit_models](./convert_lit_models.md): Tutorial on converting LitGPT weights diff --git a/tutorials/inference.md b/tutorials/inference.md index 81cefe6816..4675624149 100644 --- a/tutorials/inference.md +++ b/tutorials/inference.md @@ -1,6 +1,6 @@ # Inference -We demonstrate how to run inference (next token prediction) with the GPT base model in the [`generate.py`](generate.py) script: +We demonstrate how to run inference (next token prediction) with the GPT base model in the [`generate.py`](../litgpt/generate/base.py) script: ```bash litgpt generate base --prompt "Hello, my name is" --checkpoint_dir checkpoints/stabilityai/stablelm-base-alpha-3b diff --git a/tutorials/oom.md b/tutorials/oom.md index c12573da10..c02ee5b2fd 100644 --- a/tutorials/oom.md +++ b/tutorials/oom.md @@ -34,7 +34,7 @@ However, your hardware may not support such large context lengths. Here's what y * For the finetuning scripts, you can trim the length of the samples in your dataset. All the finetuning scripts expose a `--data.max_seq_length=...` argument. This might also be useful in cases where sample lengths are highly unbalanced, as the presence of a single very long sample would incur a larger memory usage for all other - shorter samples. For example, the median length of the samples in Alpaca is 110 tokens. Truncating the Alpaca dataset to 256 max tokens reduces the memory requirements of a Falcon 7B model from 23.52 GB to 15.73 GB. For more information about the dataset truncation, please see the *Truncating datasets* section in the [prepare_datasets.md](prepare_datasets.md) tutorial. + shorter samples. For example, the median length of the samples in Alpaca is 110 tokens. Truncating the Alpaca dataset to 256 max tokens reduces the memory requirements of a Falcon 7B model from 23.52 GB to 15.73 GB. For more information about the dataset truncation, please see the *Truncating datasets* section in the [prepare_dataset.md](prepare_dataset.md) tutorial. Keep in mind that reducing the context length will affect the modelling performance on text sequences longer than the limit. diff --git a/tutorials/prepare_dataset.md b/tutorials/prepare_dataset.md index 2cb63ecee6..7f7cf238ae 100644 --- a/tutorials/prepare_dataset.md +++ b/tutorials/prepare_dataset.md @@ -79,7 +79,6 @@ For comparison, the Falcon 7B model requires 23.52 GB of memory for the original ### Alpaca-GPT4 - The Alpaca-GPT4 was built by using the prompts of the original Alpaca dataset and generate the responses via GPT 4. The dataset consists of 52,000 instructions and responses. @@ -126,7 +125,6 @@ litgpt finetune lora \ --train.max_seq_length 256 ``` -   ### Deita @@ -162,7 +160,6 @@ litgpt finetune lora \ --train.max_seq_length 512 ``` -   ### Dolly @@ -281,7 +278,6 @@ litgpt finetune lora \ However, you can also select individual subsets via comma-separated strings as follows: - ```bash litgpt finetune lora \ --data FLAN \ @@ -385,5 +381,4 @@ Note that you only need to modify a small fraction of the code file, namely the In addition to the finetuning dataset described above, LitGPT also supports several datasets for pretraining. The pretraining datasets are described in more detail in the following separate tutorial documents: -- [Pretrain Llama 2 on OpenWebText](./pretrain_openwebtext.md) - [Pretrain TinyLlama on Slimpajama and Starcoder](./pretrain_tinyllama.md) From 13369769a0705731a3c64ed71d6cff1fad5b311c Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Mon, 1 Apr 2024 12:58:53 -0500 Subject: [PATCH 32/37] Mention usage of related finetunes (#1223) --- tutorials/download_model_weights.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tutorials/download_model_weights.md b/tutorials/download_model_weights.md index 748295d0fb..55c214a01c 100644 --- a/tutorials/download_model_weights.md +++ b/tutorials/download_model_weights.md @@ -154,6 +154,16 @@ unsloth/Mistral-7B-v0.2 > [!TIP] > To sort the list above by model name after the `/`, use `litgpt download | sort -f -t'/' -k2`. +  + +> [!NOTE] +> If you want to adopt a model variant that is not listed in the table above but has a similar architecture as one of the supported models, you can use this model by by using the `--model_name` argument as shown below: +> ```bash +> litgpt download \ +> --repo_id NousResearch/Hermes-2-Pro-Mistral-7B \ +> --model_name Mistral-7B-v0.1 +> ``` +   ### 2. Download Model Weights From 449eb29b111324090fa7066e0b26e9166806b02e Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Mon, 1 Apr 2024 15:44:48 -0500 Subject: [PATCH 33/37] Reduce microbatch size (#1212) --- litgpt/finetune/adapter.py | 4 ++-- litgpt/finetune/adapter_v2.py | 4 ++-- litgpt/finetune/full.py | 2 +- litgpt/finetune/lora.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/litgpt/finetune/adapter.py b/litgpt/finetune/adapter.py index 88fd4ecc26..304ea6bd3a 100644 --- a/litgpt/finetune/adapter.py +++ b/litgpt/finetune/adapter.py @@ -46,8 +46,8 @@ def setup( train: TrainArgs = TrainArgs( save_interval=1000, log_interval=1, - global_batch_size=128, - micro_batch_size=4, + global_batch_size=16, + micro_batch_size=1, lr_warmup_steps=100, epochs=5, learning_rate=1e-3, diff --git a/litgpt/finetune/adapter_v2.py b/litgpt/finetune/adapter_v2.py index 97d0e51f16..d925f99ce1 100644 --- a/litgpt/finetune/adapter_v2.py +++ b/litgpt/finetune/adapter_v2.py @@ -46,8 +46,8 @@ def setup( train: TrainArgs = TrainArgs( save_interval=1000, log_interval=1, - global_batch_size=128, - micro_batch_size=4, + global_batch_size=16, + micro_batch_size=1, lr_warmup_steps=100, epochs=5, learning_rate=1e-3, diff --git a/litgpt/finetune/full.py b/litgpt/finetune/full.py index 38aa1ae466..fdcd6bff1e 100644 --- a/litgpt/finetune/full.py +++ b/litgpt/finetune/full.py @@ -44,7 +44,7 @@ def setup( train: TrainArgs = TrainArgs( save_interval=1000, log_interval=1, - global_batch_size=64, + global_batch_size=16, micro_batch_size=1, lr_warmup_steps=100, epochs=5, diff --git a/litgpt/finetune/lora.py b/litgpt/finetune/lora.py index ce8b7764bd..25ae0df839 100644 --- a/litgpt/finetune/lora.py +++ b/litgpt/finetune/lora.py @@ -56,8 +56,8 @@ def setup( train: TrainArgs = TrainArgs( save_interval=1000, log_interval=1, - global_batch_size=128, - micro_batch_size=4, + global_batch_size=16, + micro_batch_size=1, lr_warmup_steps=100, epochs=5, learning_rate=3e-4, From 3a3ae43b804b490d2813aea4c8ed28d33f1a086d Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Tue, 2 Apr 2024 13:31:24 -0500 Subject: [PATCH 34/37] Pretraining docs (#1216) --- README.md | 8 ++-- tutorials/0_to_litgpt.md | 1 + tutorials/pretrain.md | 65 +++++++++++++++++++++++++++++++++ tutorials/pretrain_tinyllama.md | 19 ++++++++++ 4 files changed, 89 insertions(+), 4 deletions(-) create mode 100644 tutorials/pretrain.md diff --git a/README.md b/README.md index 162512b914..c06e792578 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ ✅  Optimized and efficient code: Flash Attention v2, multi-GPU support via fully-sharded data parallelism, [optional CPU offloading](tutorials/oom.md#do-sharding-across-multiple-gpus), and [TPU and XLA support](extensions/xla). -✅  [Pretraining](tutorials/pretrain_tinyllama.md), [finetuning](tutorials/finetune.md), and [inference](tutorials/inference.md) in various precision settings: FP32, FP16, BF16, and FP16/FP32 mixed. +✅  [Pretraining](tutorials/pretrain.md), [finetuning](tutorials/finetune.md), and [inference](tutorials/inference.md) in various precision settings: FP32, FP16, BF16, and FP16/FP32 mixed. ✅  [Configuration files](config_hub) for great out-of-the-box performance. @@ -37,7 +37,7 @@ ✅  [Exporting](tutorials/convert_lit_models.md) to other popular model weight formats. -✅  Many popular datasets for [pretraining](tutorials/pretrain_tinyllama.md) and [finetuning](tutorials/prepare_dataset.md), and [support for custom datasets](tutorials/prepare_dataset.md#preparing-custom-datasets-for-instruction-finetuning). +✅  Many popular datasets for [pretraining](tutorials/pretrain.md) and [finetuning](tutorials/prepare_dataset.md), and [support for custom datasets](tutorials/prepare_dataset.md#preparing-custom-datasets-for-instruction-finetuning). ✅  Readable and easy-to-modify code to experiment with the latest research ideas. @@ -114,7 +114,7 @@ For more information, refer to the [download](tutorials/download_model_weights.m ## Finetuning and pretraining -LitGPT supports [pretraining](tutorials/pretrain_tinyllama.md) and [finetuning](tutorials/finetune.md) to optimize models on excisting or custom datasets. Below is an example showing how to finetune a model with LoRA: +LitGPT supports [pretraining](tutorials/pretrain.md) and [finetuning](tutorials/finetune.md) to optimize models on excisting or custom datasets. Below is an example showing how to finetune a model with LoRA: ```bash # 1) Download a pretrained model @@ -336,7 +336,7 @@ If you have general questions about building with LitGPT, please [join our Disco Tutorials and in-depth feature documentation can be found below: - Finetuning, incl. LoRA, QLoRA, and Adapters ([tutorials/finetune.md](tutorials/finetune.md)) -- Pretraining ([tutorials/pretrain_tinyllama.md](tutorials/pretrain_tinyllama.md)) +- Pretraining ([tutorials/pretrain.md](tutorials/pretrain.md)) - Model evaluation ([tutorials/evaluation.md](tutorials/evaluation.md)) - Supported and custom datasets ([tutorials/prepare_dataset.md](tutorials/prepare_dataset.md)) - Quantization ([tutorials/quantize.md](tutorials/quantize.md)) diff --git a/tutorials/0_to_litgpt.md b/tutorials/0_to_litgpt.md index 760c5d00e0..65d19cef10 100644 --- a/tutorials/0_to_litgpt.md +++ b/tutorials/0_to_litgpt.md @@ -125,6 +125,7 @@ litgpt pretrain --help **More information and additional resources** +- [tutorials/pretraimd](./pretrain.md): General information about pretraining in LitGPT - [tutorials/pretrain_tinyllama](./pretrain_tinyllama.md): A tutorial for finetuning a 1.1B TinyLlama model on 3 trillion tokens - [config_hub/pretrain](../config_hub/pretrain): Pre-made config files for pretraining that work well out of the box - Project templates in reproducible environments with multi-GPU and multi-node support: diff --git a/tutorials/pretrain.md b/tutorials/pretrain.md new file mode 100644 index 0000000000..4a8db678e1 --- /dev/null +++ b/tutorials/pretrain.md @@ -0,0 +1,65 @@ +# Pretrain LLMs with LitGPT + + +This document explains how to pretrain LLMs using LitGPT. + +  +## The Pretraining API + +You can pretrain models in LitGPT using the `litgpt pretrain` API starting with any of the available architectures listed by calling `litgpt pretrain` without any additional arguments: + +```bash +litgpt pretrain +``` + +Shown below is an abbreviated list: + +``` +ValueError: Please specify --model_name . Available values: +Camel-Platypus2-13B +... +Gemma-2b +... +Llama-2-7b-hf +... +Mixtral-8x7B-v0.1 +... +pythia-14m +``` + +For demonstration purposes, we can pretrain a small 14 million-parameter Pythia model on the small TinyStories dataset using the [debug.yaml config file](https://github.com/Lightning-AI/litgpt/blob/main/config_hub/pretrain/debug.yaml) as follows: + +```bash +litgpt pretrain \ + --model_name pythia-14m \ + --config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/pretrain/debug.yaml +``` + + + + +  +## Pretrain a 1.1B TinyLlama model + +You can find an end-to-end LitGPT tutorial for pretraining a TinyLlama model using LitGPT [here](pretrain_tinyllama.md). + + +  +## Optimize LitGPT pretraining with Lightning Thunder + +[Lightning Thunder](https://github.com/Lightning-AI/lightning-thunder) is a source-to-source compiler for PyTorch, which is fully compatible with LitGPT. In experiments, Thunder resulted in a 40% speed-up compared to using regular PyTorch when finetuning a 7B Llama 2 model. + +For more information, see the [Lightning Thunder extension README](https://github.com/Lightning-AI/lightning-thunder). + + +  +## Project templates + +The following [Lightning Studio](https://lightning.ai/lightning-ai/studios) templates provide LitGPT pretraining projects in reproducible environments with multi-GPU and multi-node support: +  + +| | | +|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|

[Prepare the TinyLlama 1T token dataset](https://lightning.ai/lightning-ai/studios/prepare-the-tinyllama-1t-token-dataset)
[

](https://lightning.ai/lightning-ai/studios/prepare-the-tinyllama-1t-token-dataset) | [Pretrain LLMs - TinyLlama 1.1B](https://lightning.ai/lightning-ai/studios/pretrain-llms-tinyllama-1-1b)

[

](https://lightning.ai/lightning-ai/studios/pretrain-llms-tinyllama-1-1b) | +| [Continued Pretraining with TinyLlama 1.1B](https://lightning.ai/lightning-ai/studios/continued-pretraining-with-tinyllama-1-1b)

[

](https://lightning.ai/lightning-ai/studios/continued-pretraining-with-tinyllama-1-1b) | | +| | \ No newline at end of file diff --git a/tutorials/pretrain_tinyllama.md b/tutorials/pretrain_tinyllama.md index 245ec48ab7..f4976ee097 100644 --- a/tutorials/pretrain_tinyllama.md +++ b/tutorials/pretrain_tinyllama.md @@ -5,6 +5,7 @@ This tutorial will walk you through pretraining [TinyLlama](https://github.com/j > [!TIP] > To get started with zero setup, clone the [TinyLlama studio on Lightning AI](https://lightning.ai/lightning-ai/studios/llm-pretrain-tinyllama-1-1b). +  ## What's TinyLlama? [TinyLlama](https://github.com/jzhang38/TinyLlama/) is architecturally the same as Meta AI's LLama 2, but only has 1.1B parameters and is instead trained on multiple epochs on a mix of [SlimPajama](https://huggingface.co/datasets/cerebras/SlimPajama-627B) and [Starcoder](https://huggingface.co/datasets/bigcode/starcoderdata) datasets. @@ -26,6 +27,7 @@ Here is a quick fact sheet: (this table was sourced from the author's [README](https://github.com/jzhang38/TinyLlama/)) +  ## Download datasets You can download the data using git lfs: @@ -42,6 +44,7 @@ git clone https://huggingface.co/datasets/bigcode/starcoderdata data/starcoderda Around 1.2 TB of disk space is required to store both datasets. +  ## Prepare the datasets for training In order to start pretraining litgpt on it, you need to read, tokenize, and write the data in binary chunks. This will leverage the `litdata` optimization pipeline and streaming dataset. @@ -95,6 +98,7 @@ python litgpt/data/prepare_slimpajama.py \ If you want to run on a small slice of the datasets first, pass the flag `--fast_dev_run=true` to the commands above. In the above we are assuming that you will be using the same tokenizer as used in LlaMA/TinyLlama, but any trained [SentencePiece](https://github.com/google/sentencepiece) tokenizer with a 32000 vocabulary size will do here. +  ## Pretraining Running the pretraining script with its default settings requires at least 8 A100 GPUs. @@ -139,6 +143,7 @@ Last, logging is kept minimal in the script, but for long-running experiments we As an example, we included WandB (set `--logger_name=wandb`) to show how you can integrate any experiment tracking framework. For reference, [here are the loss curves for our reproduction](https://api.wandb.ai/links/awaelchli/y7pzdpwy). +  ## Resume training The checkpoints saved during pretraining contain all the information to resume if needed. @@ -151,6 +156,7 @@ litgpt pretrain \ ``` **Important:** Each checkpoint is a directory. Point to the directory, not the 'lit_model.pth' file inside of it. +  ## Export checkpoints After training is completed, you can convert the checkpoint to a format that can be loaded for evaluation, inference, finetuning etc. @@ -172,3 +178,16 @@ checkpoints/tiny-llama/final ``` You can then use this checkpoint folder to run [evaluation](evaluation.md), [inference](inference.md), [finetuning](finetune_lora.md) or [process the checkpoint further](convert_lit_models.md). + + +  +## Project templates + +The following [Lightning Studio](https://lightning.ai/lightning-ai/studios) templates provide LitGPT pretraining projects in reproducible environments with multi-GPU and multi-node support: +  + +| | | +|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|

[Prepare the TinyLlama 1T token dataset](https://lightning.ai/lightning-ai/studios/prepare-the-tinyllama-1t-token-dataset)
[

](https://lightning.ai/lightning-ai/studios/prepare-the-tinyllama-1t-token-dataset) | [Pretrain LLMs - TinyLlama 1.1B](https://lightning.ai/lightning-ai/studios/pretrain-llms-tinyllama-1-1b)

[

](https://lightning.ai/lightning-ai/studios/pretrain-llms-tinyllama-1-1b) | +| [Continued Pretraining with TinyLlama 1.1B](https://lightning.ai/lightning-ai/studios/continued-pretraining-with-tinyllama-1-1b)

[

](https://lightning.ai/lightning-ai/studios/continued-pretraining-with-tinyllama-1-1b) | | +| | \ No newline at end of file From de1890ad52d1e7e629b928432ff0f4bacb42071e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Wed, 3 Apr 2024 15:40:35 +0200 Subject: [PATCH 35/37] Drop test xfail (#1239) --- tests/data/test_tinystories.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/data/test_tinystories.py b/tests/data/test_tinystories.py index d0318995bf..bfb009c1a9 100644 --- a/tests/data/test_tinystories.py +++ b/tests/data/test_tinystories.py @@ -16,7 +16,6 @@ def fake_chunk(path, data): optimize(fn=tokenize, inputs=[data] * len(data), output_dir=str(path), num_workers=1, chunk_bytes="200MB") -@pytest.mark.xfail(raises=IndexError, strict=True) # requires https://github.com/Lightning-AI/litdata/pull/77 @pytest.mark.parametrize( ("max_seq_len", "expected"), [ From 72c42fcc4d010efcfe418322e06531a47b9f06f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Wed, 3 Apr 2024 15:53:23 +0200 Subject: [PATCH 36/37] uv no longer requires editable installs (#1187) --- .github/workflows/cpu-tests.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cpu-tests.yml b/.github/workflows/cpu-tests.yml index 5f43634679..bb3b85c3e6 100644 --- a/.github/workflows/cpu-tests.yml +++ b/.github/workflows/cpu-tests.yml @@ -46,9 +46,7 @@ jobs: - name: Install minimal dependencies run: | - # uv pip install . is not yet supported, only `-e .` - # https://github.com/astral-sh/uv/issues/1896 - uv pip install --system -e . + uv pip install --system . uv pip list # make sure all modules are still importable with only the minimal dependencies available modules=$( @@ -61,7 +59,7 @@ jobs: - name: Install all dependencies run: | - uv pip install --system -e '.[all,test]' 'lm_eval @ git+https://github.com/EleutherAI/lm-evaluation-harness.git@115206dc89dad67b8b' + uv pip install --system '.[all,test]' 'lm_eval @ git+https://github.com/EleutherAI/lm-evaluation-harness.git@115206dc89dad67b8b' uv pip list - name: Run tests From 70218de1bf40e2c3e0844e17f23b4da8dde1232a Mon Sep 17 00:00:00 2001 From: awaelchli Date: Wed, 3 Apr 2024 16:51:44 +0200 Subject: [PATCH 37/37] Enable setting a fraction of the epoch as warmup (#1238) --- litgpt/args.py | 22 +++++++++++++++++++-- litgpt/finetune/adapter.py | 2 +- litgpt/finetune/adapter_v2.py | 2 +- litgpt/finetune/full.py | 2 +- litgpt/finetune/lora.py | 2 +- litgpt/pretrain.py | 3 ++- tests/test_args.py | 36 +++++++++++++++++++++++++++++++++++ 7 files changed, 62 insertions(+), 7 deletions(-) create mode 100644 tests/test_args.py diff --git a/litgpt/args.py b/litgpt/args.py index d6ce527d36..b227ffe3f6 100644 --- a/litgpt/args.py +++ b/litgpt/args.py @@ -1,5 +1,5 @@ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. - +import math from dataclasses import dataclass from typing import Optional @@ -16,8 +16,10 @@ class TrainArgs: """Number of samples between optimizer steps across data-parallel ranks""" micro_batch_size: int = 4 """Number of samples per data-parallel rank""" - lr_warmup_steps: int = 100 + lr_warmup_steps: Optional[int] = 100 """Number of iterations with learning rate warmup active""" + lr_warmup_fraction: Optional[float] = None + """The fraction of an epoch to use for learning rate warmup""" epochs: Optional[int] = None """Number of epochs to train on""" # TODO: `pretrain` is the only script using `max_tokens` explicitly. replace it with epoch_size*epochs? @@ -38,6 +40,14 @@ class TrainArgs: max_norm: Optional[float] = None min_lr: float = 6e-5 + def __post_init__(self) -> None: + if self.lr_warmup_fraction and self.lr_warmup_steps: + raise ValueError( + "Can't provide both `--train.lr_warmup_fraction` and `--train.lr_warmup_steps`. Choose one." + ) + if self.lr_warmup_fraction and not (0 <= self.lr_warmup_fraction <= 1): + raise ValueError("`--train.lr_warmup_fraction` must be between 0 and 1.") + def gradient_accumulation_iters(self, devices: int) -> int: """Number of iterations between gradient synchronizations""" gradient_accumulation_iters = self.batch_size(devices) // self.micro_batch_size @@ -50,6 +60,14 @@ def batch_size(self, devices: int) -> int: assert batch_size > 0 return batch_size + def warmup_iters(self, devices: int, max_iters: int, train_dataloader) -> int: + """Number of iterations to warm up the learning rate.""" + if self.lr_warmup_fraction: + return min(max_iters, math.ceil(self.lr_warmup_fraction * len(train_dataloader))) + if self.lr_warmup_steps: + return min(max_iters, self.lr_warmup_steps * self.gradient_accumulation_iters(devices)) + return 0 + @dataclass class EvalArgs: diff --git a/litgpt/finetune/adapter.py b/litgpt/finetune/adapter.py index 304ea6bd3a..9326793e2b 100644 --- a/litgpt/finetune/adapter.py +++ b/litgpt/finetune/adapter.py @@ -366,7 +366,7 @@ def save_adapter_checkpoint(fabric: L.Fabric, model: torch.nn.Module, file_path: def validate_args(train: TrainArgs, eval: EvalArgs) -> None: issues = [] - unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings"])] + unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings", "lr_warmup_fraction"])] for args, names in unsupported: for name in names: if getattr(args, name) is not None: diff --git a/litgpt/finetune/adapter_v2.py b/litgpt/finetune/adapter_v2.py index d925f99ce1..3c4634e354 100644 --- a/litgpt/finetune/adapter_v2.py +++ b/litgpt/finetune/adapter_v2.py @@ -366,7 +366,7 @@ def save_adapter_v2_checkpoint(fabric: L.Fabric, model: torch.nn.Module, file_pa def validate_args(train: TrainArgs, eval: EvalArgs) -> None: issues = [] - unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings"])] + unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings", "lr_warmup_fraction"])] for args, names in unsupported: for name in names: if getattr(args, name) is not None: diff --git a/litgpt/finetune/full.py b/litgpt/finetune/full.py index fdcd6bff1e..3a2e2a7176 100644 --- a/litgpt/finetune/full.py +++ b/litgpt/finetune/full.py @@ -341,7 +341,7 @@ def get_longest_seq_length(data: List[Dict]) -> Tuple[int, int]: def validate_args(train: TrainArgs, eval: EvalArgs) -> None: issues = [] - unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings"])] + unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings", "lr_warmup_fraction"])] for args, names in unsupported: for name in names: if getattr(args, name) is not None: diff --git a/litgpt/finetune/lora.py b/litgpt/finetune/lora.py index 25ae0df839..bb60b2d180 100644 --- a/litgpt/finetune/lora.py +++ b/litgpt/finetune/lora.py @@ -397,7 +397,7 @@ def save_lora_checkpoint(fabric: L.Fabric, model: torch.nn.Module, file_path: Pa def validate_args(train: TrainArgs, eval: EvalArgs) -> None: issues = [] - unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings"])] + unsupported = [(train, ["max_tokens", "max_norm", "tie_embeddings", "lr_warmup_fraction"])] for args, names in unsupported: for name in names: if getattr(args, name) is not None: diff --git a/litgpt/pretrain.py b/litgpt/pretrain.py index bdd8f03a0b..eef48bdc0c 100644 --- a/litgpt/pretrain.py +++ b/litgpt/pretrain.py @@ -244,7 +244,8 @@ def fit( total_t0 = time.perf_counter() val_loss = "n/a" - warmup_iters = train.lr_warmup_steps * train.gradient_accumulation_iters(devices) + warmup_iters = train.warmup_iters(devices, max_iters, train_dataloader) + for train_data in train_iterator: if state["iter_num"] >= max_iters: break diff --git a/tests/test_args.py b/tests/test_args.py new file mode 100644 index 0000000000..0b13c83976 --- /dev/null +++ b/tests/test_args.py @@ -0,0 +1,36 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. +import pytest + +from litgpt.args import TrainArgs + + +def test_compute_warmup_iters(): + # warmup disabled + train = TrainArgs(lr_warmup_steps=0, lr_warmup_fraction=0) + assert train.warmup_iters(devices=1, max_iters=1000, train_dataloader=range(10)) == 0 + + # lr_warmup_steps and lr_warmup_fraction both are not allowed + with pytest.raises(ValueError, match="Can't provide both `--train.lr_warmup_fraction`"): + TrainArgs(lr_warmup_steps=1, lr_warmup_fraction=0.2) + + # lr_warmup_fraction invalid range + with pytest.raises(ValueError, match=" must be between 0 and 1"): + TrainArgs(lr_warmup_steps=0, lr_warmup_fraction=1.1) + + # lr_warmup_steps + train = TrainArgs(global_batch_size=1, micro_batch_size=1, lr_warmup_steps=100, lr_warmup_fraction=0) + assert train.warmup_iters(devices=1, max_iters=1000, train_dataloader=range(10)) == 100 + # lr_warmup_steps multiplied by accumulation factor + train.global_batch_size = 4 + assert train.warmup_iters(devices=1, max_iters=1000, train_dataloader=range(10)) == 400 + assert train.warmup_iters(devices=2, max_iters=1000, train_dataloader=range(10)) == 200 + # lr_warmup_steps truncated by max iters + assert train.warmup_iters(devices=1, max_iters=120, train_dataloader=range(10)) == 120 + + # lr_warmup_fraction + train = TrainArgs(global_batch_size=1, micro_batch_size=1, lr_warmup_steps=0, lr_warmup_fraction=0.3) + assert train.warmup_iters(devices=1, max_iters=1000, train_dataloader=range(100)) == 30 + # lr_warmup_fraction truncated by max iters + assert train.warmup_iters(devices=1, max_iters=20, train_dataloader=range(100)) == 20 + # lr_warmup_fraction rounds up + assert train.warmup_iters(devices=1, max_iters=1000, train_dataloader=range(5)) == 2