From 375e99ead0718187f85012b4b661d178eaae8d79 Mon Sep 17 00:00:00 2001 From: rasbt Date: Thu, 21 Mar 2024 20:37:23 +0000 Subject: [PATCH 01/40] `litgpt evaluate` command --- litgpt/__main__.py | 2 + litgpt/lora.py | 2 +- litgpt/scripts/evaluate.py | 108 +++++++++++++++++++++++++++++++++++++ pyproject.toml | 2 + tests/test_cli.py | 5 +- tutorials/evaluation.md | 77 +++++++++++++++----------- 6 files changed, 162 insertions(+), 34 deletions(-) create mode 100644 litgpt/scripts/evaluate.py diff --git a/litgpt/__main__.py b/litgpt/__main__.py index d31fb9f612..1c2db41eec 100644 --- a/litgpt/__main__.py +++ b/litgpt/__main__.py @@ -23,6 +23,7 @@ ) from litgpt.scripts.download import download_from_hub as download_fn from litgpt.scripts.merge_lora import merge_lora as merge_lora_fn +from litgpt.scripts.evaluate import convert_and_evaluate as evaluate_fn if TYPE_CHECKING: from jsonargparse import ArgumentParser @@ -78,6 +79,7 @@ def main() -> None: }, }, "merge_lora": {"help": "Merges the LoRA weights with the base model.", "fn": merge_lora_fn}, + "evaluate": {"help": "Evaluate a model with the LM Evaluation Harness.", "fn": evaluate_fn}, } from jsonargparse import set_config_read_mode, set_docstring_parse_options diff --git a/litgpt/lora.py b/litgpt/lora.py index fd54d6f771..fef0ad9598 100644 --- a/litgpt/lora.py +++ b/litgpt/lora.py @@ -22,7 +22,7 @@ ┆ weights ┆ ╰─────────╯ ┆ ┆ | r | r - rank ┆ W e R^(d*d) ┆ | ◀─────▶ | - ┆ ┆ ╭─────────╮ + ┆ ┆ ╭─────────╮a └─────────────────┘ / A \ ▲ / d*r \ \ ╰───────────────╯ diff --git a/litgpt/scripts/evaluate.py b/litgpt/scripts/evaluate.py new file mode 100644 index 0000000000..0a1bf6cfa2 --- /dev/null +++ b/litgpt/scripts/evaluate.py @@ -0,0 +1,108 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +import json +import os +from pathlib import Path +from typing import Optional + +from lm_eval import evaluator +from lm_eval.utils import make_table +import torch + +from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint +from litgpt.utils import CLI, copy_config_files + + +def convert_and_evaluate( + checkpoint_dir: Optional[str] = None, + out_dir: Optional[str] = None, + repo_id: Optional[str] = None, + skip_conversion: bool = False, + tasks: Optional[str] = "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge", + num_fewshot: Optional[int] = None, + batch_size: int = 1, + device: Optional[str] = None, + limit: Optional[float] = None, + seed: int = 1234, + save_filepath: Optional[str] = None, +) -> None: + """Convert a LitGPT model and run the LM Evaluation Harness + + Arguments: + checkpoint_dir: Directory where the `lit_model.pth` and tokenizer files are located. + out_dir: Directory in which to save the converted checkpoints for evaluation. + repo_id: The original repo ID the model was derived from. + skip_conversion: Set to `True` to skip the model conversion, + assuming the model has already been converted and the + model.pth and .safetensor files exist. + tasks: CSV of task names to evaluate. + By default, the Open LM Leaderboard tasks are used: + "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge" + num_fewshot: Number of examples in few-shot context. + batch_size: Batch size configuration. + device: Device to use for evaluation, for example, "cuda" or "cuda:0". + limit: Limit on number of examples per task. + seed: Random seed. + save_filepath: The file where the results will be saved. + Saves to `out_dir`/results.json by default. + """ + if checkpoint_dir is None: + raise ValueError("Provide a checkpoint_dir argument.") + if out_dir is None: + raise ValueError("Provide a checkpoint_dir argument.") + if repo_id is None: + raise ValueError("Provide a repo_id argument.") + + checkpoint_dir, out_dir = Path(checkpoint_dir), Path(out_dir) + + if save_filepath is None: + save_filepath = "results.json" + save_filepath = out_dir / Path(save_filepath) + else: + save_filepath = Path(save_filepath) + + out_dir.mkdir(parents=True, exist_ok=True) + + copy_config_files(source_dir=checkpoint_dir, out_dir=out_dir) + + if not skip_conversion: + convert_lit_checkpoint(checkpoint_dir=checkpoint_dir, output_dir=out_dir) + + from transformers import AutoModel + + state_dict = torch.load(out_dir/"model.pth") + model = AutoModel.from_pretrained( + repo_id, state_dict=state_dict + ) + + # Saves .safetensors files + model.save_pretrained(out_dir) + + os.environ["TOKENIZERS_PARALLELISM"] = "false" + + results = evaluator.simple_evaluate( + model="hf", + model_args=f"pretrained={out_dir}", + tasks=tasks.split(","), + num_fewshot=num_fewshot, + batch_size=batch_size, + device=device, + limit=limit, + random_seed=seed, + numpy_random_seed=seed, + torch_random_seed=seed, + ) + + print(make_table(results)) + if "groups" in results: + print(make_table(results, "groups")) + + json_result = json.dumps( + results, indent=2, ensure_ascii=False + ) + + save_filepath.open("w", encoding="utf-8").write(json_result) + + +if __name__ == "__main__": + CLI(convert_and_evaluate) diff --git a/pyproject.toml b/pyproject.toml index 907887b074..f5974d5223 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,6 +43,8 @@ all = [ "pyarrow", # litgpt.data.prepare_starcoder.py "tensorboard", # litgpt.pretrain "torchmetrics", # litgpt.pretrain + "transformers>=4.38.0", # litgpt.evaluate + "lm_eval", # litgpt.evaluate "safetensors", # download "huggingface_hub[hf_transfer]>=0.21.0" # download ] diff --git a/tests/test_cli.py b/tests/test_cli.py index ee23d3926b..85510d9ae1 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -15,12 +15,13 @@ def test_cli(): main() out = out.getvalue() assert "usage: litgpt" in out - assert "{download,chat,finetune,pretrain,generate,convert,merge_lora}" in out + assert "{download,chat,finetune,pretrain,generate,convert,merge_lora,evaluate}" in out assert ( """Available subcommands: download Download weights or tokenizer data from the Hugging Face Hub. - chat Chat with a model.""" + chat Chat with a model. + evaluate Evaluate a model with the LM Evaluation Harness.""" in out ) diff --git a/tutorials/evaluation.md b/tutorials/evaluation.md index fabf21cd14..083a38d76d 100644 --- a/tutorials/evaluation.md +++ b/tutorials/evaluation.md @@ -9,59 +9,74 @@ You can evaluate LitGPT using [EleutherAI's lm-eval](https://github.com/Eleuther You need to install the `lm-eval` framework first: ```bash -pip install 'lm_eval @ git+https://github.com/EleutherAI/lm-evaluation-harness.git@115206dc89dad67b8b' +pip install lm_eval ```   ### Evaluating LitGPT base models -Use the following command to evaluate LitGPT models on all tasks in Eleuther AI's Evaluation Harness. +Suppose you downloaded a base model that we want to evaluate. Here, we use the `microsoft/phi-2` model: ```bash -python eval/lm_eval_harness.py \ - --checkpoint_dir "checkpoints/meta-llama/Llama-2-7b-hf" \ - --precision "bf16-true" \ - --save_filepath "results.json" +litgpt download --repo_id microsoft/phi-2 ``` -To evaluate on LLMs on specific tasks, for example, TruthfulQA and HellaSwag, you can use the `--eval_task` flag as follows: +The download command above will save the model to the `checkoints/microsoft/phi-2` directory, which we can +specify in the following evaluation command: -```bash -python eval/lm_eval_harness.py \ - --checkpoint_dir "checkpoints/meta-llama/Llama-2-7b-hf" \ - --eval_tasks "[truthfulqa_mc,hellaswag]" \ - --precision "bf16-true" \ - --save_filepath "results.json" + +``` +litgpt evaluate \ + --checkpoint_dir checkpoints/microsoft/phi-2/ \ + --out_dir evaluate_model/ \ + --repo_id microsoft/phi-2 ``` -A list of supported tasks can be found [here](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/docs/task_table.md). +Please note that the `litgpt eval` command run an internal model conversion. +This is only necessary the first time you want to evaluate a model. To skip the conversion, +when you want to evaluate a model a second time, you can pass the `--skip_conversion true` argument: + +``` +litgpt evaluate \ + --checkpoint_dir checkpoints/microsoft/phi-2/ \ + --out_dir evaluate_model/ \ + --repo_id microsoft/phi-2 \ + --skip_conversion true +```   -### Evaluating LoRA-finetuned LLMs +> [!TIP] +> By default, `ligpt evaluate` will evaluate a model on all Open LM Leaderboard tasks, which corresponds +to the setting `--tasks "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge"`. -The above command can be used to evaluate models that are saved via a single checkpoint file. This includes downloaded checkpoints and base models finetuned via the full and adapter finetuning scripts. +> [!TIP] +> The evaluation may take a long time, and for testing purpoes, you may want to reduce the number of tasks +> or set a limit for the number of examples per task, for example, `--limit 10`. -For LoRA-finetuned models, you need to first merge the LoRA weights with the original checkpoint file as described in the [Merging LoRA Weights](finetune_lora.md#merging-lora-weights) section of the LoRA finetuning documentation. +A list of supported tasks can be found [here](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/docs/task_table.md). -  -## FAQs -* **How do I evaluate on MMLU?** - MMLU is available as with lm-eval harness but the task name is not MMLU. You can use `hendrycksTest*` as regex to evaluate on MMLU. +  + +### Evaluating LoRA-finetuned LLMs - ```shell - python eval/lm_eval_harness.py \ - --checkpoint_dir "checkpoints/meta-llama/Llama-2-7b-hf" \ - --precision "bf16-true" \ - --eval_tasks "[hendrycksTest*]" \ - --num_fewshot 5 \ - --save_filepath "results.json" - ``` +No further conversion is necessary when evaluating LoRA-finetuned models as the `finetune lora` command already prepares the necessary merged model files: -* **Is Truthful MC is not available in lm-eval?** +```bash +litgpt finetune lora \ + --checkpoint_dir checkpoints/microsoft/phi-2 \ + --out_dir lora_model +``` - It is available as `truthfulqa_mc`. +  + +``` +litgpt evaluate \ + --checkpoint_dir lora_model/final \ + --out_dir evaluate_model/ \ + --repo_id microsoft/phi-2 +``` From 0c53da17f25d257a9c2e055eb9b670dcf0c1e32e Mon Sep 17 00:00:00 2001 From: rasbt Date: Thu, 21 Mar 2024 20:43:40 +0000 Subject: [PATCH 02/40] update package dependench --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f5974d5223..47ae5c4f66 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ all = [ "tensorboard", # litgpt.pretrain "torchmetrics", # litgpt.pretrain "transformers>=4.38.0", # litgpt.evaluate - "lm_eval", # litgpt.evaluate + "lm-eval", # litgpt.evaluate "safetensors", # download "huggingface_hub[hf_transfer]>=0.21.0" # download ] From 669ce22c6704145be79da3b6f008d72f1aacaa03 Mon Sep 17 00:00:00 2001 From: rasbt Date: Thu, 21 Mar 2024 20:49:50 +0000 Subject: [PATCH 03/40] add llm-eval dependency --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 47ae5c4f66..39bf7513c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ test = [ "einops", "protobuf", "lightning-thunder; python_version >= '3.10'", + "lm-eval" ] all = [ "bitsandbytes==0.42.0", # quantization From d161d12af6b4e06a8c9ce46bcb3f5e66dea882f2 Mon Sep 17 00:00:00 2001 From: rasbt Date: Thu, 21 Mar 2024 20:52:03 +0000 Subject: [PATCH 04/40] move imports --- litgpt/scripts/evaluate.py | 7 ++++--- pyproject.toml | 1 - 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/litgpt/scripts/evaluate.py b/litgpt/scripts/evaluate.py index 0a1bf6cfa2..6d52d958ca 100644 --- a/litgpt/scripts/evaluate.py +++ b/litgpt/scripts/evaluate.py @@ -4,9 +4,6 @@ import os from pathlib import Path from typing import Optional - -from lm_eval import evaluator -from lm_eval.utils import make_table import torch from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint @@ -46,6 +43,10 @@ def convert_and_evaluate( save_filepath: The file where the results will be saved. Saves to `out_dir`/results.json by default. """ + + from lm_eval import evaluator + from lm_eval.utils import make_table + if checkpoint_dir is None: raise ValueError("Provide a checkpoint_dir argument.") if out_dir is None: diff --git a/pyproject.toml b/pyproject.toml index 39bf7513c1..47ae5c4f66 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,6 @@ test = [ "einops", "protobuf", "lightning-thunder; python_version >= '3.10'", - "lm-eval" ] all = [ "bitsandbytes==0.42.0", # quantization From e7ebfbcbb2a977a1f661b54ac70e84d6fedb93a7 Mon Sep 17 00:00:00 2001 From: rasbt Date: Thu, 21 Mar 2024 21:05:03 +0000 Subject: [PATCH 05/40] update cli test --- tests/test_cli.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_cli.py b/tests/test_cli.py index 85510d9ae1..2c994fcf96 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -20,10 +20,10 @@ def test_cli(): """Available subcommands: download Download weights or tokenizer data from the Hugging Face Hub. - chat Chat with a model. - evaluate Evaluate a model with the LM Evaluation Harness.""" + chat Chat with a model.""" in out ) + assert ("""evaluate Evaluate a model with the LM Evaluation Harness.""") in out out = StringIO() with pytest.raises(SystemExit), redirect_stdout(out), mock.patch("sys.argv", ["litgpt", "finetune", "-h"]): From 46605075b0296a94bb868fc2899267bce7d82b57 Mon Sep 17 00:00:00 2001 From: rasbt Date: Thu, 21 Mar 2024 21:07:37 +0000 Subject: [PATCH 06/40] cleanup --- litgpt/lora.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litgpt/lora.py b/litgpt/lora.py index fef0ad9598..fd54d6f771 100644 --- a/litgpt/lora.py +++ b/litgpt/lora.py @@ -22,7 +22,7 @@ ┆ weights ┆ ╰─────────╯ ┆ ┆ | r | r - rank ┆ W e R^(d*d) ┆ | ◀─────▶ | - ┆ ┆ ╭─────────╮a + ┆ ┆ ╭─────────╮ └─────────────────┘ / A \ ▲ / d*r \ \ ╰───────────────╯ From 018cc89ff6c546927fb36ba5918b225034c5d615 Mon Sep 17 00:00:00 2001 From: rasbt Date: Fri, 22 Mar 2024 15:48:54 +0000 Subject: [PATCH 07/40] eval unit test --- litgpt/scripts/evaluate.py | 47 ++++++++++++++++------------- tests/test_evaluate.py | 61 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 20 deletions(-) create mode 100644 tests/test_evaluate.py diff --git a/litgpt/scripts/evaluate.py b/litgpt/scripts/evaluate.py index 6d52d958ca..316629d5d8 100644 --- a/litgpt/scripts/evaluate.py +++ b/litgpt/scripts/evaluate.py @@ -10,6 +10,30 @@ from litgpt.utils import CLI, copy_config_files +def safe_safetensors(out_dir, repo_id): + from transformers import AutoModel + + state_dict = torch.load(out_dir/"model.pth") + model = AutoModel.from_pretrained( + repo_id, state_dict=state_dict + ) + model.save_pretrained(out_dir) + + +def prepare_results(results, save_filepath, print_results=True): + from lm_eval.utils import make_table + + if print_results: + print(make_table(results)) + if "groups" in results: + print(make_table(results, "groups")) + + json_result = json.dumps( + results, indent=2, ensure_ascii=False + ) + save_filepath.open("w", encoding="utf-8").write(json_result) + + def convert_and_evaluate( checkpoint_dir: Optional[str] = None, out_dir: Optional[str] = None, @@ -45,7 +69,6 @@ def convert_and_evaluate( """ from lm_eval import evaluator - from lm_eval.utils import make_table if checkpoint_dir is None: raise ValueError("Provide a checkpoint_dir argument.") @@ -68,16 +91,7 @@ def convert_and_evaluate( if not skip_conversion: convert_lit_checkpoint(checkpoint_dir=checkpoint_dir, output_dir=out_dir) - - from transformers import AutoModel - - state_dict = torch.load(out_dir/"model.pth") - model = AutoModel.from_pretrained( - repo_id, state_dict=state_dict - ) - - # Saves .safetensors files - model.save_pretrained(out_dir) + safe_safetensors(out_dir, repo_id) os.environ["TOKENIZERS_PARALLELISM"] = "false" @@ -94,15 +108,8 @@ def convert_and_evaluate( torch_random_seed=seed, ) - print(make_table(results)) - if "groups" in results: - print(make_table(results, "groups")) - - json_result = json.dumps( - results, indent=2, ensure_ascii=False - ) - - save_filepath.open("w", encoding="utf-8").write(json_result) + print("results", results) + prepare_results(results, save_filepath) if __name__ == "__main__": diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py new file mode 100644 index 0000000000..a42d2536fe --- /dev/null +++ b/tests/test_evaluate.py @@ -0,0 +1,61 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +import sys +from pathlib import Path + +import datasets +import pytest + +from litgpt.scripts.download import download_from_hub +from litgpt.scripts.evaluate import safe_safetensors, prepare_results +from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint +from lm_eval import evaluator + +# support running without installing as a package +wd = Path(__file__).parent.parent.resolve() +sys.path.append(str(wd)) + + +@pytest.mark.xfail( + raises=(datasets.builder.DatasetGenerationError, NotImplementedError), + strict=False, + match="Loading a dataset cached in a LocalFileSystem is not supported", +) +def test_run_eval(tmp_path, float_like): + repo_id = "EleutherAI/pythia-14m" + download_from_hub(repo_id=repo_id, checkpoint_dir=tmp_path) + + checkpoint_path = Path(tmp_path) / Path(repo_id) + + convert_lit_checkpoint(checkpoint_dir=checkpoint_path, output_dir=checkpoint_path) + safe_safetensors(out_dir=checkpoint_path, repo_id=repo_id) + + eval_tasks = "coqa,hellaswag" + results = evaluator.simple_evaluate( + model="hf", + model_args=f"pretrained={checkpoint_path}", + tasks=eval_tasks.split(","), + limit=2, + ) + + save_path = checkpoint_path/"results.json" + prepare_results(results, save_path, print_results=False) + + print(checkpoint_path/"dump.txt") + assert save_path.is_file() + assert results["results"] == { + 'coqa': { + 'alias': 'coqa', + 'em,none': 0.0, + 'em_stderr,none': 0.0, + 'f1,none': 0.0, + 'f1_stderr,none': 0.0 + }, + 'hellaswag': { + 'acc,none': 0.0, + 'acc_stderr,none': 0.0, + 'acc_norm,none': 0.5, + 'acc_norm_stderr,none': 0.5, + 'alias': 'hellaswag' + } + } From 98130f90061d080092d0e4a9efe8c6784d2fea54 Mon Sep 17 00:00:00 2001 From: rasbt Date: Fri, 22 Mar 2024 15:58:44 +0000 Subject: [PATCH 08/40] run tests on cpu --- tests/test_evaluate.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index a42d2536fe..b20f88f533 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -36,6 +36,7 @@ def test_run_eval(tmp_path, float_like): model_args=f"pretrained={checkpoint_path}", tasks=eval_tasks.split(","), limit=2, + device="cpu" ) save_path = checkpoint_path/"results.json" From a5495354c7746d056ad4f2e783a43c12ecdebd50 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Fri, 22 Mar 2024 11:18:47 -0500 Subject: [PATCH 09/40] Add lm-eval to test dependencies --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 47ae5c4f66..0135508f4c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ test = [ "pytest-timeout", "transformers>=4.38.0", "einops", + "lm-eval", "protobuf", "lightning-thunder; python_version >= '3.10'", ] From 7ff3ff2b6d6b7052842640bb4bfd8d87d7b9ec6d Mon Sep 17 00:00:00 2001 From: rasbt Date: Fri, 22 Mar 2024 16:36:54 +0000 Subject: [PATCH 10/40] bump version --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0135508f4c..66b4b8f4a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ test = [ "pytest-timeout", "transformers>=4.38.0", "einops", - "lm-eval", + "lm-eval>=0.42.0", "protobuf", "lightning-thunder; python_version >= '3.10'", ] @@ -45,7 +45,7 @@ all = [ "tensorboard", # litgpt.pretrain "torchmetrics", # litgpt.pretrain "transformers>=4.38.0", # litgpt.evaluate - "lm-eval", # litgpt.evaluate + "lm-eval>=0.42.0", # litgpt.evaluate "safetensors", # download "huggingface_hub[hf_transfer]>=0.21.0" # download ] From c47e7644d5610f8988778e2c5fdf04d02f1e7473 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Mon, 25 Mar 2024 09:09:00 -0500 Subject: [PATCH 11/40] Update litgpt/scripts/evaluate.py Co-authored-by: awaelchli --- litgpt/scripts/evaluate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litgpt/scripts/evaluate.py b/litgpt/scripts/evaluate.py index 316629d5d8..1d123c5790 100644 --- a/litgpt/scripts/evaluate.py +++ b/litgpt/scripts/evaluate.py @@ -29,7 +29,7 @@ def prepare_results(results, save_filepath, print_results=True): print(make_table(results, "groups")) json_result = json.dumps( - results, indent=2, ensure_ascii=False + results, indent=2, ensure_ascii=False ) save_filepath.open("w", encoding="utf-8").write(json_result) From 042d2a5b447084dd96c5ce7dcaa603c680644d61 Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Mon, 25 Mar 2024 09:09:08 -0500 Subject: [PATCH 12/40] Update litgpt/scripts/evaluate.py Co-authored-by: awaelchli --- litgpt/scripts/evaluate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litgpt/scripts/evaluate.py b/litgpt/scripts/evaluate.py index 1d123c5790..6ce382a6ce 100644 --- a/litgpt/scripts/evaluate.py +++ b/litgpt/scripts/evaluate.py @@ -65,7 +65,7 @@ def convert_and_evaluate( limit: Limit on number of examples per task. seed: Random seed. save_filepath: The file where the results will be saved. - Saves to `out_dir`/results.json by default. + Saves to `out_dir/results.json` by default. """ from lm_eval import evaluator From 359dad5a90243141138e97b34aef2694b3211f0e Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Mon, 25 Mar 2024 09:09:25 -0500 Subject: [PATCH 13/40] Update litgpt/scripts/evaluate.py Co-authored-by: awaelchli --- litgpt/scripts/evaluate.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/litgpt/scripts/evaluate.py b/litgpt/scripts/evaluate.py index 6ce382a6ce..adc3881eef 100644 --- a/litgpt/scripts/evaluate.py +++ b/litgpt/scripts/evaluate.py @@ -79,11 +79,7 @@ def convert_and_evaluate( checkpoint_dir, out_dir = Path(checkpoint_dir), Path(out_dir) - if save_filepath is None: - save_filepath = "results.json" - save_filepath = out_dir / Path(save_filepath) - else: - save_filepath = Path(save_filepath) + save_filepath = out_dir / Path("results.json") if save_filepath is None else Path(save_filepath) out_dir.mkdir(parents=True, exist_ok=True) From f7147c4faacd43891820e5afd68dfef0d84ed6a9 Mon Sep 17 00:00:00 2001 From: rasbt Date: Mon, 25 Mar 2024 16:04:38 +0000 Subject: [PATCH 14/40] make args required --- litgpt/scripts/evaluate.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/litgpt/scripts/evaluate.py b/litgpt/scripts/evaluate.py index adc3881eef..b52ad57bb8 100644 --- a/litgpt/scripts/evaluate.py +++ b/litgpt/scripts/evaluate.py @@ -35,9 +35,9 @@ def prepare_results(results, save_filepath, print_results=True): def convert_and_evaluate( - checkpoint_dir: Optional[str] = None, - out_dir: Optional[str] = None, - repo_id: Optional[str] = None, + checkpoint_dir: str, + out_dir: str, + repo_id: str, skip_conversion: bool = False, tasks: Optional[str] = "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge", num_fewshot: Optional[int] = None, @@ -70,13 +70,6 @@ def convert_and_evaluate( from lm_eval import evaluator - if checkpoint_dir is None: - raise ValueError("Provide a checkpoint_dir argument.") - if out_dir is None: - raise ValueError("Provide a checkpoint_dir argument.") - if repo_id is None: - raise ValueError("Provide a repo_id argument.") - checkpoint_dir, out_dir = Path(checkpoint_dir), Path(out_dir) save_filepath = out_dir / Path("results.json") if save_filepath is None else Path(save_filepath) From 07862851c1aee05cb2bc618e559415a1e450aa85 Mon Sep 17 00:00:00 2001 From: rasbt Date: Mon, 25 Mar 2024 16:20:16 +0000 Subject: [PATCH 15/40] automatically infer repo_id --- litgpt/scripts/evaluate.py | 8 ++++++-- tutorials/evaluation.md | 5 +---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/litgpt/scripts/evaluate.py b/litgpt/scripts/evaluate.py index b52ad57bb8..4c00d5ccc1 100644 --- a/litgpt/scripts/evaluate.py +++ b/litgpt/scripts/evaluate.py @@ -4,6 +4,7 @@ import os from pathlib import Path from typing import Optional +import yaml import torch from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint @@ -37,7 +38,6 @@ def prepare_results(results, save_filepath, print_results=True): def convert_and_evaluate( checkpoint_dir: str, out_dir: str, - repo_id: str, skip_conversion: bool = False, tasks: Optional[str] = "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge", num_fewshot: Optional[int] = None, @@ -52,7 +52,6 @@ def convert_and_evaluate( Arguments: checkpoint_dir: Directory where the `lit_model.pth` and tokenizer files are located. out_dir: Directory in which to save the converted checkpoints for evaluation. - repo_id: The original repo ID the model was derived from. skip_conversion: Set to `True` to skip the model conversion, assuming the model has already been converted and the model.pth and .safetensor files exist. @@ -73,6 +72,11 @@ def convert_and_evaluate( checkpoint_dir, out_dir = Path(checkpoint_dir), Path(out_dir) save_filepath = out_dir / Path("results.json") if save_filepath is None else Path(save_filepath) + config_filepath = checkpoint_dir/"model_config.yaml" + + with open(config_filepath) as f: + config_dict = yaml.safe_load(f) + repo_id = f"{config_dict['hf_config']['org']}/{config_dict['hf_config']['name']}" out_dir.mkdir(parents=True, exist_ok=True) diff --git a/tutorials/evaluation.md b/tutorials/evaluation.md index 083a38d76d..b270c50d10 100644 --- a/tutorials/evaluation.md +++ b/tutorials/evaluation.md @@ -29,8 +29,7 @@ specify in the following evaluation command: ``` litgpt evaluate \ --checkpoint_dir checkpoints/microsoft/phi-2/ \ - --out_dir evaluate_model/ \ - --repo_id microsoft/phi-2 + --out_dir evaluate_model/ ``` Please note that the `litgpt eval` command run an internal model conversion. @@ -41,7 +40,6 @@ when you want to evaluate a model a second time, you can pass the `--skip_conver litgpt evaluate \ --checkpoint_dir checkpoints/microsoft/phi-2/ \ --out_dir evaluate_model/ \ - --repo_id microsoft/phi-2 \ --skip_conversion true ``` @@ -78,5 +76,4 @@ litgpt finetune lora \ litgpt evaluate \ --checkpoint_dir lora_model/final \ --out_dir evaluate_model/ \ - --repo_id microsoft/phi-2 ``` From b54095d9da0c14fb11224499c2a1b40652008698 Mon Sep 17 00:00:00 2001 From: rasbt Date: Mon, 25 Mar 2024 16:27:34 +0000 Subject: [PATCH 16/40] check out_dir defaults --- eval/lm_eval_harness.py | 189 ------------------------------------- litgpt/scripts/evaluate.py | 109 --------------------- 2 files changed, 298 deletions(-) delete mode 100644 eval/lm_eval_harness.py delete mode 100644 litgpt/scripts/evaluate.py diff --git a/eval/lm_eval_harness.py b/eval/lm_eval_harness.py deleted file mode 100644 index 610ca3778a..0000000000 --- a/eval/lm_eval_harness.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. - -import json -import sys -from pathlib import Path -from typing import Dict, List, Literal, Optional - -import lightning as L -import torch -from lightning.fabric.plugins import BitsandbytesPrecision -from lm_eval import base, evaluator, tasks -from lm_eval.base import BaseLM - -from litgpt import GPT, Config, Tokenizer -from litgpt.generate.base import generate -from litgpt.utils import CLI, check_valid_checkpoint_dir, get_default_supported_precision, load_checkpoint - - -class EvalHarnessBase(BaseLM): - # Credits: - # https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py - def __init__(self, fabric: L.Fabric, model: GPT, tokenizer: Tokenizer, batch_size: int): - super().__init__() - self.fabric = fabric - self.model = model - self.tokenizer = tokenizer - self.batch_size_per_gpu = batch_size - with fabric.init_tensor(): - model.set_kv_cache(batch_size=batch_size) - - @classmethod - def create_from_arg_string(cls, arg_string, additional_config=None): - kwargs = {el.split("=")[0]: el.split("=")[1] for el in arg_string.split(",")} - return cls(**kwargs, **additional_config) - - @property - def eot_token_id(self): - # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* - return self.tokenizer.eos_id - - @property - def max_length(self): - return self.model.max_seq_length - - @property - def vocab_size(self): - return self.tokenizer.vocab_size - - @property - def max_gen_toks(self): - return 256 - - @property - def batch_size(self): - return self.batch_size_per_gpu * self.fabric.world_size - - @property - def device(self): - return self.fabric.device - - def tok_encode(self, string: str) -> List[int]: - return self.tokenizer.encode(string, bos=False, eos=False).tolist() - - def tok_decode(self, tokens: List[int]) -> str: - t = torch.tensor(tokens) - return self.tokenizer.decode(t) - - @torch.inference_mode() - def _model_call(self, inps): - return self.model(inps) - - @torch.inference_mode() - def _model_generate(self, context, max_length, eos_token_id) -> torch.Tensor: - # this only supports batch size 1 - assert context.shape[0] == 1 - out = generate(self.model, context[0], max_length, eos_id=eos_token_id) - for block in self.model.transformer.h: - block.attn.kv_cache.reset_parameters() - return out.unsqueeze(0) - - @torch.inference_mode() - def run_eval( - self, eval_tasks: List[str], num_fewshot: int, limit: Optional[int], bootstrap_iters: int, no_cache: bool - ) -> Dict: - # Returns a list containing all values of the task registry that - # match at least one of the patterns - import fnmatch - - def pattern_match(patterns, source_list): - task_names = set() - for pattern in patterns: - for matching in fnmatch.filter(source_list, pattern): - task_names.add(matching) - return list(task_names) - - eval_tasks = pattern_match(eval_tasks, tasks.ALL_TASKS) - print(f"Found tasks: {eval_tasks}") - - # **HACK INCOMING**: - # first get task dict on local main rank - # the tasks are downloaded *as they are initialized*, and the downloads don't like multithreading. - # so we download them once on the local main rank, wait, and then initialize them on all other ranks, which *should* load from the cache. - if self.fabric.local_rank == 0: - tasks.get_task_dict(eval_tasks) - # torch barrier - self.fabric.barrier() - tasks.get_task_dict(eval_tasks) - - lm = self - if not no_cache: - lm = base.CachingLM(lm, "lm_cache/litgpt.db") - - results = evaluator.evaluate( - lm=lm, - task_dict=tasks.get_task_dict(eval_tasks), - num_fewshot=num_fewshot, - limit=limit, - bootstrap_iters=bootstrap_iters, - ) - results["config"] = dict( - model=self.model.config.name, - batch_size=self.batch_size, - device=str(self.device), - num_fewshot=num_fewshot, - limit=limit, - bootstrap_iters=bootstrap_iters, - no_cache=no_cache, - ) - return results - - -@torch.inference_mode() -def run_eval_harness( - checkpoint_dir: Path, - precision: Optional[str] = None, - quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8"]] = None, - eval_tasks: List[str] = ["arc_challenge", "piqa", "hellaswag", "hendrycksTest-*"], - save_filepath: Optional[Path] = None, - num_fewshot: int = 0, - limit: Optional[int] = None, - bootstrap_iters: int = 100000, - no_cache: bool = True, -): - if precision is None: - precision = get_default_supported_precision(training=False) - - plugins = None - if quantize is not None and quantize.startswith("bnb."): - if "mixed" in precision: - raise ValueError("Quantization and mixed precision is not supported.") - dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision] - plugins = BitsandbytesPrecision(quantize[4:], dtype) - precision = None - - fabric = L.Fabric(devices=1, precision=precision, plugins=plugins) - - check_valid_checkpoint_dir(checkpoint_dir) - tokenizer = Tokenizer(checkpoint_dir) - - config = Config.from_file(checkpoint_dir / "model_config.yaml") - - checkpoint_path = checkpoint_dir / "lit_model.pth" - - print(f"Loading model {str(checkpoint_path)!r} with {config.__dict__}", file=sys.stderr) - with fabric.init_module(empty_init=True): - model = GPT(config) - - model.eval() - model = fabric.setup_module(model) - - load_checkpoint(fabric, model, checkpoint_path) - - eval_harness = EvalHarnessBase(fabric, model, tokenizer, 1) - - results = eval_harness.run_eval(eval_tasks, num_fewshot, limit, bootstrap_iters, no_cache) - if save_filepath is None: - print(results) - else: - print(f"Saving results to {str(save_filepath)!r}") - save_filepath.parent.mkdir(parents=True, exist_ok=True) - data = json.dumps(results) - with open(save_filepath, "w") as fw: - fw.write(data) - - -if __name__ == "__main__": - torch.set_float32_matmul_precision("high") - - CLI(run_eval_harness) diff --git a/litgpt/scripts/evaluate.py b/litgpt/scripts/evaluate.py deleted file mode 100644 index 4c00d5ccc1..0000000000 --- a/litgpt/scripts/evaluate.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. - -import json -import os -from pathlib import Path -from typing import Optional -import yaml -import torch - -from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint -from litgpt.utils import CLI, copy_config_files - - -def safe_safetensors(out_dir, repo_id): - from transformers import AutoModel - - state_dict = torch.load(out_dir/"model.pth") - model = AutoModel.from_pretrained( - repo_id, state_dict=state_dict - ) - model.save_pretrained(out_dir) - - -def prepare_results(results, save_filepath, print_results=True): - from lm_eval.utils import make_table - - if print_results: - print(make_table(results)) - if "groups" in results: - print(make_table(results, "groups")) - - json_result = json.dumps( - results, indent=2, ensure_ascii=False - ) - save_filepath.open("w", encoding="utf-8").write(json_result) - - -def convert_and_evaluate( - checkpoint_dir: str, - out_dir: str, - skip_conversion: bool = False, - tasks: Optional[str] = "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge", - num_fewshot: Optional[int] = None, - batch_size: int = 1, - device: Optional[str] = None, - limit: Optional[float] = None, - seed: int = 1234, - save_filepath: Optional[str] = None, -) -> None: - """Convert a LitGPT model and run the LM Evaluation Harness - - Arguments: - checkpoint_dir: Directory where the `lit_model.pth` and tokenizer files are located. - out_dir: Directory in which to save the converted checkpoints for evaluation. - skip_conversion: Set to `True` to skip the model conversion, - assuming the model has already been converted and the - model.pth and .safetensor files exist. - tasks: CSV of task names to evaluate. - By default, the Open LM Leaderboard tasks are used: - "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge" - num_fewshot: Number of examples in few-shot context. - batch_size: Batch size configuration. - device: Device to use for evaluation, for example, "cuda" or "cuda:0". - limit: Limit on number of examples per task. - seed: Random seed. - save_filepath: The file where the results will be saved. - Saves to `out_dir/results.json` by default. - """ - - from lm_eval import evaluator - - checkpoint_dir, out_dir = Path(checkpoint_dir), Path(out_dir) - - save_filepath = out_dir / Path("results.json") if save_filepath is None else Path(save_filepath) - config_filepath = checkpoint_dir/"model_config.yaml" - - with open(config_filepath) as f: - config_dict = yaml.safe_load(f) - repo_id = f"{config_dict['hf_config']['org']}/{config_dict['hf_config']['name']}" - - out_dir.mkdir(parents=True, exist_ok=True) - - copy_config_files(source_dir=checkpoint_dir, out_dir=out_dir) - - if not skip_conversion: - convert_lit_checkpoint(checkpoint_dir=checkpoint_dir, output_dir=out_dir) - safe_safetensors(out_dir, repo_id) - - os.environ["TOKENIZERS_PARALLELISM"] = "false" - - results = evaluator.simple_evaluate( - model="hf", - model_args=f"pretrained={out_dir}", - tasks=tasks.split(","), - num_fewshot=num_fewshot, - batch_size=batch_size, - device=device, - limit=limit, - random_seed=seed, - numpy_random_seed=seed, - torch_random_seed=seed, - ) - - print("results", results) - prepare_results(results, save_filepath) - - -if __name__ == "__main__": - CLI(convert_and_evaluate) From 4c77a6a4d9ef30d7519aed668be04690654b9926 Mon Sep 17 00:00:00 2001 From: rasbt Date: Mon, 25 Mar 2024 16:32:52 +0000 Subject: [PATCH 17/40] move evaluate.py --- litgpt/eval/evaluate.py | 114 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 litgpt/eval/evaluate.py diff --git a/litgpt/eval/evaluate.py b/litgpt/eval/evaluate.py new file mode 100644 index 0000000000..5f831797a0 --- /dev/null +++ b/litgpt/eval/evaluate.py @@ -0,0 +1,114 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +import json +import os +from pathlib import Path +from typing import Optional +import yaml +import torch + +from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint +from litgpt.utils import CLI, copy_config_files + + +def safe_safetensors(out_dir, repo_id): + from transformers import AutoModel + + state_dict = torch.load(out_dir/"model.pth") + model = AutoModel.from_pretrained( + repo_id, state_dict=state_dict + ) + model.save_pretrained(out_dir) + + +def prepare_results(results, save_filepath, print_results=True): + from lm_eval.utils import make_table + + if print_results: + print(make_table(results)) + if "groups" in results: + print(make_table(results, "groups")) + + json_result = json.dumps( + results, indent=2, ensure_ascii=False + ) + save_filepath.open("w", encoding="utf-8").write(json_result) + + +def convert_and_evaluate( + checkpoint_dir: str, + out_dir: Optional[str] = None, + skip_conversion: bool = False, + tasks: Optional[str] = "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge", + num_fewshot: Optional[int] = None, + batch_size: int = 1, + device: Optional[str] = None, + limit: Optional[float] = None, + seed: int = 1234, + save_filepath: Optional[str] = None, +) -> None: + """Convert a LitGPT model and run the LM Evaluation Harness + + Arguments: + checkpoint_dir: Directory where the `lit_model.pth` and tokenizer files are located. + out_dir: Directory in which to save the converted checkpoints for evaluation. + Saves to `checkpoint_dir`/evaluate by default. + skip_conversion: Set to `True` to skip the model conversion, + assuming the model has already been converted and the + model.pth and .safetensor files exist. + tasks: CSV of task names to evaluate. + By default, the Open LM Leaderboard tasks are used: + "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge" + num_fewshot: Number of examples in few-shot context. + batch_size: Batch size configuration. + device: Device to use for evaluation, for example, "cuda" or "cuda:0". + limit: Limit on number of examples per task. + seed: Random seed. + save_filepath: The file where the results will be saved. + Saves to `out_dir/results.json` by default. + """ + + from lm_eval import evaluator + + checkpoint_dir = Path(checkpoint_dir) + + save_filepath = out_dir / Path("results.json") if save_filepath is None else Path(save_filepath) + config_filepath = checkpoint_dir/"model_config.yaml" + + with open(config_filepath) as f: + config_dict = yaml.safe_load(f) + repo_id = f"{config_dict['hf_config']['org']}/{config_dict['hf_config']['name']}" + + if out_dir is None: + out_dir = checkpoint_dir / "evaluate" + else: + out_dir = Path(out_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + copy_config_files(source_dir=checkpoint_dir, out_dir=out_dir) + + if not skip_conversion: + convert_lit_checkpoint(checkpoint_dir=checkpoint_dir, output_dir=out_dir) + safe_safetensors(out_dir, repo_id) + + os.environ["TOKENIZERS_PARALLELISM"] = "false" + + results = evaluator.simple_evaluate( + model="hf", + model_args=f"pretrained={out_dir}", + tasks=tasks.split(","), + num_fewshot=num_fewshot, + batch_size=batch_size, + device=device, + limit=limit, + random_seed=seed, + numpy_random_seed=seed, + torch_random_seed=seed, + ) + + print("results", results) + prepare_results(results, save_filepath) + + +if __name__ == "__main__": + CLI(convert_and_evaluate) From 96d8229fad0155a3b07809b0baeea7d3b6937582 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 26 Mar 2024 17:55:00 +0100 Subject: [PATCH 18/40] Deps --- .github/azure-gpu-test.yml | 2 +- .github/workflows/cpu-tests.yml | 2 +- pyproject.toml | 5 ++--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/azure-gpu-test.yml b/.github/azure-gpu-test.yml index b436c67ad7..8a9706717a 100644 --- a/.github/azure-gpu-test.yml +++ b/.github/azure-gpu-test.yml @@ -41,7 +41,7 @@ jobs: displayName: "Image info & NVIDIA" - script: | - pip install '.[all,test]' 'lm_eval @ git+https://github.com/EleutherAI/lm-evaluation-harness.git@115206dc89dad67b8b' + pip install '.[all,test]' displayName: 'Install dependencies' - script: | diff --git a/.github/workflows/cpu-tests.yml b/.github/workflows/cpu-tests.yml index ce7016c672..1b4e7687c6 100644 --- a/.github/workflows/cpu-tests.yml +++ b/.github/workflows/cpu-tests.yml @@ -61,7 +61,7 @@ jobs: - name: Install all dependencies run: | - uv pip install --system -e '.[all,test]' 'lm_eval @ git+https://github.com/EleutherAI/lm-evaluation-harness.git@115206dc89dad67b8b' + uv pip install --system -e '.[all,test]' uv pip list - name: Run tests diff --git a/pyproject.toml b/pyproject.toml index 9aec513f1a..15e9212c15 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,9 +26,8 @@ test = [ "pytest", "pytest-rerunfailures", "pytest-timeout", - "transformers>=4.38.0", + "transformers>=4.38.0", # numerical comparisons "einops", - "lm-eval>=0.42.0", "protobuf", "lightning-thunder; python_version >= '3.10'", ] @@ -36,7 +35,6 @@ all = [ "bitsandbytes==0.42.0", # quantization "sentencepiece", # llama-based models "tokenizers", # pythia, falcon, redpajama - "datasets", # eval "requests", # litgpt.data "litdata", # litgpt.data "zstandard", # litgpt.data.prepare_slimpajama.py @@ -44,6 +42,7 @@ all = [ "pyarrow", # litgpt.data.prepare_starcoder.py "tensorboard", # litgpt.pretrain "torchmetrics", # litgpt.pretrain + "datasets", # litgpt.evaluate "transformers>=4.38.0", # litgpt.evaluate "lm-eval>=0.42.0", # litgpt.evaluate "safetensors", # download From 9d9ef7c2d1694e0b1c497cd2d25eebe8ea3d59ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Tue, 26 Mar 2024 17:55:41 +0100 Subject: [PATCH 19/40] Extra file --- tests/test_lm_eval_harness.py | 95 ----------------------------------- 1 file changed, 95 deletions(-) delete mode 100644 tests/test_lm_eval_harness.py diff --git a/tests/test_lm_eval_harness.py b/tests/test_lm_eval_harness.py deleted file mode 100644 index 8310119c7b..0000000000 --- a/tests/test_lm_eval_harness.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. - -import subprocess -import sys -from pathlib import Path -from unittest.mock import ANY, Mock - -import datasets -import pytest -import yaml -from lightning import Fabric - -from litgpt.model import GPT -from litgpt.scripts.download import download_from_hub -from litgpt.tokenizer import Tokenizer - -# support running without installing as a package -wd = Path(__file__).parent.parent.resolve() -sys.path.append(str(wd)) - -import eval.lm_eval_harness as module -from eval.lm_eval_harness import EvalHarnessBase - - -@pytest.mark.xfail( - raises=(datasets.builder.DatasetGenerationError, NotImplementedError), - strict=False, - match="Loading a dataset cached in a LocalFileSystem is not supported", -) -def test_run_eval(tmp_path, float_like): - fabric = Fabric(devices=1) - with fabric.init_module(): - model = GPT.from_name("pythia-14m") - download_from_hub(repo_id="EleutherAI/pythia-14m", tokenizer_only=True, checkpoint_dir=tmp_path) - tokenizer = Tokenizer(tmp_path / "EleutherAI/pythia-14m") - - eval_harness = EvalHarnessBase(fabric, model, tokenizer, 1) - results = eval_harness.run_eval( - eval_tasks=["truthfulqa_mc", "hellaswag", "coqa"], limit=2, bootstrap_iters=2, num_fewshot=0, no_cache=True - ) - assert results == { - "config": { - "batch_size": 1, - "bootstrap_iters": 2, - "device": ANY, - "limit": 2, - "model": "pythia-14m", - "no_cache": True, - "num_fewshot": 0, - }, - "results": { - "hellaswag": { - "acc": float_like, - "acc_norm": float_like, - "acc_norm_stderr": float_like, - "acc_stderr": float_like, - }, - "coqa": {"f1": float_like, "f1_stderr": float_like, "em": float_like, "em_stderr": float_like}, - "truthfulqa_mc": {"mc1": float_like, "mc1_stderr": float_like, "mc2": float_like, "mc2_stderr": float_like}, - }, - "versions": {"hellaswag": 0, "coqa": 1, "truthfulqa_mc": 1}, - } - - -def test_eval_script(tmp_path, fake_checkpoint_dir, monkeypatch): - model_config = dict(block_size=128, n_layer=2, n_embd=8, n_head=4, padded_vocab_size=8) - with open(fake_checkpoint_dir / "model_config.yaml", "w") as fp: - yaml.dump(model_config, fp) - monkeypatch.setattr(module, "load_checkpoint", Mock()) - - tokenizer_mock = Mock() - monkeypatch.setattr(module, "Tokenizer", tokenizer_mock) - - run_eval_mock = Mock() - run_eval_mock.return_value = {"foo": "test"} - monkeypatch.setattr(module.EvalHarnessBase, "run_eval", run_eval_mock) - - output_folder = tmp_path / "output" - assert not output_folder.exists() - - module.run_eval_harness( - checkpoint_dir=fake_checkpoint_dir, precision="32-true", save_filepath=(output_folder / "results.json") - ) - - run_eval_mock.assert_called_once_with( - ["arc_challenge", "piqa", "hellaswag", "hendrycksTest-*"], 0, None, 100000, True - ) - assert (output_folder / "results.json").read_text() == '{"foo": "test"}' - - -def test_cli(): - cli_path = Path(__file__).parent.parent / "eval" / "lm_eval_harness.py" - output = subprocess.check_output([sys.executable, cli_path, "-h"]) - output = str(output.decode()) - assert "run_eval_harness" in output From 5abec5ae31f5063ec195fdc23cf88665e4dcac07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 27 Mar 2024 10:10:46 -0400 Subject: [PATCH 20/40] fix import --- litgpt/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litgpt/__main__.py b/litgpt/__main__.py index 1c2db41eec..59d53ac904 100644 --- a/litgpt/__main__.py +++ b/litgpt/__main__.py @@ -23,7 +23,7 @@ ) from litgpt.scripts.download import download_from_hub as download_fn from litgpt.scripts.merge_lora import merge_lora as merge_lora_fn -from litgpt.scripts.evaluate import convert_and_evaluate as evaluate_fn +from litgpt.eval.evaluate import convert_and_evaluate as evaluate_fn if TYPE_CHECKING: from jsonargparse import ArgumentParser From 966ff3e95369e198bb6db1f003bb7e3f24720e48 Mon Sep 17 00:00:00 2001 From: rasbt Date: Wed, 27 Mar 2024 16:38:47 +0000 Subject: [PATCH 21/40] fix evaluate reference --- tutorials/evaluation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/evaluation.md b/tutorials/evaluation.md index b270c50d10..f46773e3ca 100644 --- a/tutorials/evaluation.md +++ b/tutorials/evaluation.md @@ -32,7 +32,7 @@ litgpt evaluate \ --out_dir evaluate_model/ ``` -Please note that the `litgpt eval` command run an internal model conversion. +Please note that the `litgpt evaluate` command run an internal model conversion. This is only necessary the first time you want to evaluate a model. To skip the conversion, when you want to evaluate a model a second time, you can pass the `--skip_conversion true` argument: From 9b2ae7d86a26a19e83d17ab50cb832301e76d759 Mon Sep 17 00:00:00 2001 From: rasbt Date: Wed, 27 Mar 2024 16:41:46 +0000 Subject: [PATCH 22/40] fix doc formatting --- tutorials/evaluation.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tutorials/evaluation.md b/tutorials/evaluation.md index f46773e3ca..4fb50a284e 100644 --- a/tutorials/evaluation.md +++ b/tutorials/evaluation.md @@ -47,7 +47,8 @@ litgpt evaluate \ > [!TIP] > By default, `ligpt evaluate` will evaluate a model on all Open LM Leaderboard tasks, which corresponds -to the setting `--tasks "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge"`. +> to the setting `--tasks +> "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge"`. > [!TIP] > The evaluation may take a long time, and for testing purpoes, you may want to reduce the number of tasks @@ -72,7 +73,7 @@ litgpt finetune lora \   -``` +```bash litgpt evaluate \ --checkpoint_dir lora_model/final \ --out_dir evaluate_model/ \ From bb4ea306fd5dbc46f39c39907fb73adc151a5cbc Mon Sep 17 00:00:00 2001 From: rasbt Date: Wed, 27 Mar 2024 17:57:28 +0000 Subject: [PATCH 23/40] prototype --- litgpt/eval/evaluate.py | 25 +++++++++++++----------- pyproject.toml | 2 +- tests/test_evaluate.py | 42 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 56 insertions(+), 13 deletions(-) diff --git a/litgpt/eval/evaluate.py b/litgpt/eval/evaluate.py index 5f831797a0..284a1f2b0c 100644 --- a/litgpt/eval/evaluate.py +++ b/litgpt/eval/evaluate.py @@ -11,14 +11,18 @@ from litgpt.utils import CLI, copy_config_files -def safe_safetensors(out_dir, repo_id): - from transformers import AutoModel +def get_hf_model(out_dir, repo_id): + from transformers import AutoModel, AutoTokenizer + from lm_eval.models.huggingface import HFLM state_dict = torch.load(out_dir/"model.pth") model = AutoModel.from_pretrained( repo_id, state_dict=state_dict ) - model.save_pretrained(out_dir) + tokenizer = AutoTokenizer.from_pretrained( + repo_id + ) + return HFLM(model, tokenizer=tokenizer) def prepare_results(results, save_filepath, print_results=True): @@ -38,7 +42,7 @@ def prepare_results(results, save_filepath, print_results=True): def convert_and_evaluate( checkpoint_dir: str, out_dir: Optional[str] = None, - skip_conversion: bool = False, + force_conversion: bool = False, tasks: Optional[str] = "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge", num_fewshot: Optional[int] = None, batch_size: int = 1, @@ -53,9 +57,8 @@ def convert_and_evaluate( checkpoint_dir: Directory where the `lit_model.pth` and tokenizer files are located. out_dir: Directory in which to save the converted checkpoints for evaluation. Saves to `checkpoint_dir`/evaluate by default. - skip_conversion: Set to `True` to skip the model conversion, - assuming the model has already been converted and the - model.pth and .safetensor files exist. + force_conversion: Set to `True` to reconvert the model and override + an existing model.pth from a previous evaluation call. tasks: CSV of task names to evaluate. By default, the Open LM Leaderboard tasks are used: "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge" @@ -87,15 +90,15 @@ def convert_and_evaluate( copy_config_files(source_dir=checkpoint_dir, out_dir=out_dir) - if not skip_conversion: + model_path = out_dir / "model.pth" + if not model_path.exists() or force_conversion: convert_lit_checkpoint(checkpoint_dir=checkpoint_dir, output_dir=out_dir) - safe_safetensors(out_dir, repo_id) + hf_model = get_hf_model(out_dir, repo_id) os.environ["TOKENIZERS_PARALLELISM"] = "false" results = evaluator.simple_evaluate( - model="hf", - model_args=f"pretrained={out_dir}", + model=hf_model, tasks=tasks.split(","), num_fewshot=num_fewshot, batch_size=batch_size, diff --git a/pyproject.toml b/pyproject.toml index 15e9212c15..ea9eac7dc1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ all = [ "torchmetrics", # litgpt.pretrain "datasets", # litgpt.evaluate "transformers>=4.38.0", # litgpt.evaluate - "lm-eval>=0.42.0", # litgpt.evaluate + "lm-eval>=0.4.2", # litgpt.evaluate "safetensors", # download "huggingface_hub[hf_transfer]>=0.21.0" # download ] diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index b20f88f533..a4d95d1e09 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -1,13 +1,25 @@ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. import sys +import os +from dataclasses import asdict from pathlib import Path +from unittest import mock +import litgpt.eval.evaluate as module +from contextlib import redirect_stdout +from io import StringIO +import subprocess import datasets import pytest +import yaml +import torch + + +from litgpt import GPT, Config from litgpt.scripts.download import download_from_hub -from litgpt.scripts.evaluate import safe_safetensors, prepare_results +from litgpt.eval.evaluate import safe_safetensors, prepare_results from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint from lm_eval import evaluator @@ -21,6 +33,33 @@ strict=False, match="Loading a dataset cached in a LocalFileSystem is not supported", ) +@mock.patch.dict(os.environ, {"LT_ACCELERATOR": "cpu"}) +def test_evaluate_script(tmp_path, fake_checkpoint_dir, monkeypatch): + ours_config = Config.from_name("pythia-14m") + ours_model = GPT(ours_config) + checkpoint_path = fake_checkpoint_dir / "lit_model.pth" + config_path = fake_checkpoint_dir / "model_config.yaml" + torch.save(ours_model.state_dict(), checkpoint_path) + with open(config_path, "w") as fp: + yaml.dump(asdict(ours_config), fp) + output_dir = tmp_path / "out_dir" + + fn_kwargs = dict( + checkpoint_dir=fake_checkpoint_dir, + out_dir=output_dir, + ) + stdout = StringIO() + with redirect_stdout(stdout), mock.patch("sys.argv", ["evaluate.py"]): + module.convert_and_evaluate(**fn_kwargs) + + +def test_cli(): + cli_path = Path(__file__).parent.parent / "eval" / "evaluate.py" + output = subprocess.check_output([sys.executable, cli_path, "-h"]) + output = str(output.decode()) + assert "evaluate" in output + +""" def test_run_eval(tmp_path, float_like): repo_id = "EleutherAI/pythia-14m" download_from_hub(repo_id=repo_id, checkpoint_dir=tmp_path) @@ -60,3 +99,4 @@ def test_run_eval(tmp_path, float_like): 'alias': 'hellaswag' } } +""" \ No newline at end of file From 8988dda121343d944a215d5b2c53de41cdfeb67c Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Thu, 28 Mar 2024 11:46:42 -0500 Subject: [PATCH 24/40] Add batch size --- tutorials/evaluation.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tutorials/evaluation.md b/tutorials/evaluation.md index 4fb50a284e..63d9c189aa 100644 --- a/tutorials/evaluation.md +++ b/tutorials/evaluation.md @@ -29,6 +29,7 @@ specify in the following evaluation command: ``` litgpt evaluate \ --checkpoint_dir checkpoints/microsoft/phi-2/ \ + --batch_size 4 \ --out_dir evaluate_model/ ``` @@ -39,6 +40,7 @@ when you want to evaluate a model a second time, you can pass the `--skip_conver ``` litgpt evaluate \ --checkpoint_dir checkpoints/microsoft/phi-2/ \ + --batch_size 4 \ --out_dir evaluate_model/ \ --skip_conversion true ``` @@ -76,5 +78,6 @@ litgpt finetune lora \ ```bash litgpt evaluate \ --checkpoint_dir lora_model/final \ + --batch_size 4 \ --out_dir evaluate_model/ \ ``` From 45968da21a8c6d37ce1adde20811ac836d7add8a Mon Sep 17 00:00:00 2001 From: rasbt Date: Fri, 29 Mar 2024 20:15:41 +0000 Subject: [PATCH 25/40] revert to saving temp file and fix output print --- litgpt/eval/evaluate.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/litgpt/eval/evaluate.py b/litgpt/eval/evaluate.py index 284a1f2b0c..69acbe9223 100644 --- a/litgpt/eval/evaluate.py +++ b/litgpt/eval/evaluate.py @@ -11,18 +11,14 @@ from litgpt.utils import CLI, copy_config_files -def get_hf_model(out_dir, repo_id): - from transformers import AutoModel, AutoTokenizer - from lm_eval.models.huggingface import HFLM +def safe_safetensors(out_dir, repo_id): + from transformers import AutoModel state_dict = torch.load(out_dir/"model.pth") model = AutoModel.from_pretrained( - repo_id, state_dict=state_dict - ) - tokenizer = AutoTokenizer.from_pretrained( - repo_id - ) - return HFLM(model, tokenizer=tokenizer) + repo_id, state_dict=state_dict + ) + model.save_pretrained(out_dir) def prepare_results(results, save_filepath, print_results=True): @@ -75,6 +71,12 @@ def convert_and_evaluate( checkpoint_dir = Path(checkpoint_dir) + if out_dir is None: + out_dir = checkpoint_dir / "evaluate" + else: + out_dir = Path(out_dir) + out_dir.mkdir(parents=True, exist_ok=True) + save_filepath = out_dir / Path("results.json") if save_filepath is None else Path(save_filepath) config_filepath = checkpoint_dir/"model_config.yaml" @@ -82,23 +84,21 @@ def convert_and_evaluate( config_dict = yaml.safe_load(f) repo_id = f"{config_dict['hf_config']['org']}/{config_dict['hf_config']['name']}" - if out_dir is None: - out_dir = checkpoint_dir / "evaluate" - else: - out_dir = Path(out_dir) - out_dir.mkdir(parents=True, exist_ok=True) - copy_config_files(source_dir=checkpoint_dir, out_dir=out_dir) model_path = out_dir / "model.pth" if not model_path.exists() or force_conversion: convert_lit_checkpoint(checkpoint_dir=checkpoint_dir, output_dir=out_dir) - hf_model = get_hf_model(out_dir, repo_id) + + safetensors_path = out_dir / "model.safetensors" + if not safetensors_path.exists() or force_conversion: + safe_safetensors(out_dir, repo_id) os.environ["TOKENIZERS_PARALLELISM"] = "false" results = evaluator.simple_evaluate( - model=hf_model, + model="hf", + model_args=f"pretrained={out_dir}", tasks=tasks.split(","), num_fewshot=num_fewshot, batch_size=batch_size, @@ -108,8 +108,6 @@ def convert_and_evaluate( numpy_random_seed=seed, torch_random_seed=seed, ) - - print("results", results) prepare_results(results, save_filepath) From b3b693ef6fed9ef38c9e8b08e504e12a1bec03c4 Mon Sep 17 00:00:00 2001 From: rasbt Date: Fri, 29 Mar 2024 20:34:40 +0000 Subject: [PATCH 26/40] run test on cpu --- tests/test_evaluate.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index a4d95d1e09..1c4d55cf08 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -47,6 +47,7 @@ def test_evaluate_script(tmp_path, fake_checkpoint_dir, monkeypatch): fn_kwargs = dict( checkpoint_dir=fake_checkpoint_dir, out_dir=output_dir, + device="cpu" ) stdout = StringIO() with redirect_stdout(stdout), mock.patch("sys.argv", ["evaluate.py"]): From 17d4aa20c1d5fdff258ad8456e858f226609e43f Mon Sep 17 00:00:00 2001 From: rasbt Date: Fri, 29 Mar 2024 22:53:46 +0000 Subject: [PATCH 27/40] update tests and docs --- litgpt/eval/evaluate.py | 6 ++--- tutorials/0_to_litgpt.md | 15 +++++++++++- tutorials/evaluation.md | 24 +++++++++++++++++-- tutorials/images/0_to_litgpt/4-commands.webp | Bin 33466 -> 0 bytes tutorials/images/0_to_litgpt/commands.webp | Bin 0 -> 8298 bytes tutorials/images/0_to_litgpt/pretrain.webp | Bin 22906 -> 5420 bytes 6 files changed, 39 insertions(+), 6 deletions(-) delete mode 100644 tutorials/images/0_to_litgpt/4-commands.webp create mode 100644 tutorials/images/0_to_litgpt/commands.webp diff --git a/litgpt/eval/evaluate.py b/litgpt/eval/evaluate.py index 69acbe9223..3f37a5b64d 100644 --- a/litgpt/eval/evaluate.py +++ b/litgpt/eval/evaluate.py @@ -39,7 +39,7 @@ def convert_and_evaluate( checkpoint_dir: str, out_dir: Optional[str] = None, force_conversion: bool = False, - tasks: Optional[str] = "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge", + tasks: Optional[str] = "hellaswag,truthfulqa_mc2,mmlu", num_fewshot: Optional[int] = None, batch_size: int = 1, device: Optional[str] = None, @@ -56,8 +56,8 @@ def convert_and_evaluate( force_conversion: Set to `True` to reconvert the model and override an existing model.pth from a previous evaluation call. tasks: CSV of task names to evaluate. - By default, the Open LM Leaderboard tasks are used: - "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge" + By default, the following tasks are used: + "hellaswag,truthfulqa_mc2,mmlu" num_fewshot: Number of examples in few-shot context. batch_size: Batch size configuration. device: Device to use for evaluation, for example, "cuda" or "cuda:0". diff --git a/tutorials/0_to_litgpt.md b/tutorials/0_to_litgpt.md index f0497b45ae..6359d0fa25 100644 --- a/tutorials/0_to_litgpt.md +++ b/tutorials/0_to_litgpt.md @@ -12,7 +12,7 @@ The topics, following the installation of LitGPT, are in chronological order, re   - +   @@ -448,6 +448,19 @@ Time for inference: 1.14 sec total, 26.26 tokens/sec, 30 tokens - [tutorials/quantize](quantize.md): Quantizing models to reduce GPU memory requirements +  +## Evaluating models + +LitGPT comes with a handy `litgpt evaluate` command to evaluate models with [Eleuther AI's Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness). For example, to evaluate the previously downloaded `microsoft/phi-2` model on several tasks available from the Evaluation Harness, you can use the following command: + +```bash +litgpt evaluate \ + --checkpoint_dir checkpoints/microsoft/phi-2 + --batch_size 16 \ + --tasks "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge" +``` + +(A list of supported tasks can be found [here](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/docs/task_table.md).)   diff --git a/tutorials/evaluation.md b/tutorials/evaluation.md index 63d9c189aa..4a85e4fe0f 100644 --- a/tutorials/evaluation.md +++ b/tutorials/evaluation.md @@ -33,6 +33,26 @@ litgpt evaluate \ --out_dir evaluate_model/ ``` +The resulting output is as follows: + +``` +... +|---------------------------------------|-------|------|-----:|--------|-----:|---|-----:| +... +|truthfulqa_mc2 | 2|none | 0|acc |0.4656|± |0.0164| +|hellaswag | 1|none | 0|acc |0.2569|± |0.0044| +| | |none | 0|acc_norm|0.2632|± |0.0044| + +| Groups |Version|Filter|n-shot|Metric|Value | |Stderr| +|------------------|-------|------|-----:|------|-----:|---|-----:| +|mmlu |N/A |none | 0|acc |0.2434|± |0.0036| +| - humanities |N/A |none | 0|acc |0.2578|± |0.0064| +| - other |N/A |none | 0|acc |0.2401|± |0.0077| +| - social_sciences|N/A |none | 0|acc |0.2301|± |0.0076| +| - stem |N/A |none | 0|acc |0.2382|± |0.0076| +``` + + Please note that the `litgpt evaluate` command run an internal model conversion. This is only necessary the first time you want to evaluate a model. To skip the conversion, when you want to evaluate a model a second time, you can pass the `--skip_conversion true` argument: @@ -48,9 +68,9 @@ litgpt evaluate \   > [!TIP] -> By default, `ligpt evaluate` will evaluate a model on all Open LM Leaderboard tasks, which corresponds +> By default, `ligpt evaluate` will evaluate a model on 3 tasks > to the setting `--tasks -> "hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge"`. +> "hellaswag,truthfulqa_mc2,mmlu"`. > [!TIP] > The evaluation may take a long time, and for testing purpoes, you may want to reduce the number of tasks diff --git a/tutorials/images/0_to_litgpt/4-commands.webp b/tutorials/images/0_to_litgpt/4-commands.webp deleted file mode 100644 index aac24a13b31521a80bee1413bfbfac80f551c454..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33466 zcmce+Lzpf+5H8rZZQHg^o2PBtwolu(ZQHhO+cv-XFJ{(vF*C`#$SO|?Z>o|iB`I<7 z#y}t-bunQDH3crB(*M>-%D_3mG(@0*AV5Gsgr%{z?Fu z-=I_B0cAB1IE0ge`EoWzpVS9 z?`e;9d4e5)D}s@}`rUs2slK|O%kNr1D8T%e^!4ctaG&)`_|x_d*pAx+^yi)Q&H3H> z`vZ>sfV}>jelK5T&*~rU2Y@TYH$nb}Q(uKZ-}?uGBQ_)+*B0&>1U zzB%9JAN6ngDg-wHGyYL865kDvICFwi{-)nC@8HizhkJ{F7J$p|!cWmJ=7)SmoaJxK z`_~We{@hGowZF=@$#2Um@VChO-ZSB~V2$9m;GQq?Z`4m;tojmQ$UhQ5`9=9FaSi$2 z_851yx8Xkl@cTUiX#eORu3hPG33mC%{3p!Z?_Q7ngkUWo3;_G9{ayD)_$_$R_eU@s zfd5=6|#IE;#qI_<8x0{)+j-{Z;t`0C?r*PaMe=8vh?5ic+>JgqOo9o9Tkm zauNKwI`n8n%iR!o={y28cEdj)BRl*nC@lDEEkGK*(Icg?I+`8JAvvN&5=Pm3=6NB{M`uPk1wW(6k*Dgk9`L_GCHZb6(jKJ zC#9urz+ojg&G`rUA3F@g5V)6@`w}ietJ;d9Yl(xwK(#5bP#4TqPEywVfe0|l^PNfREnOJgK1BvP`lU@(Rx1i)jp>L*VN%Ge#%%HL;hwVZMoqN9z zlPY?M^?6?+N?UL{Mb4Pb^6@{yF_dm91^+BpV{m8jtbFi673~=xkoW$?M%YJoC!MW2$IT(#Sxi=}X_o-ykC4n#vcoURv zeLjkU|GtchMJqw(vH~>_{Z=;rTh`~6x2;ZX3idBdX&hwKg>F>j(R+ zwo0~pOBC}#sO#Z~i245wjKo%F2F!MLp^Fg}@qRYfzSAdlJZ8Pk#_OBQSPRkV#e%KT zKL0ssD*UcOSrHF>4O@1g*mEmQK@3YiyaY^bKZqPdV@x3Ivkzfc~#0=exHSUN33SSO0 z||+NwLi41Rf{0VG*ns+0Wll_o?dgCz5nR1Q@YI^ zH!ESIrkJAENs`!Uq|HpXWaz#AhXWTH%i0G{Ose|3PgER?;TWNe(i_QIHvfN&`gSwU zFAlHuLcwt>BBe|W3;9gsL{W!5%B#&hSZCvg);@!HNJ=)l7z;${TpvifKVlvE)C_dz zT~kbl!Q#MM%}Vn{_g5HRpi_EzEFMd?X~tg3F5Jh9cd}0*ebG;2;dQ8#;P6~E@3TK{ zjVu#?Y)(U#I^9CvbkpBHlC8I&8|OeYrMIhRcANq~X~r#Mdd4%&%~lTkmLw*Nc9jSj ziAw2EAu+ae5UlX*M5*}&uEI*RnlQf>=>MmIY#y#325##x*oral3BY#+&|aegkd?dz zFCTq{R1c#B)ylSmjdC;H?b$k%!h(X$E;ejiRjdqlCvSWb0a0m^f9(d|*4Ts1SKY)1 zy9*ekJ#b4@;YDYtr+T*P8vF+qq1%-h1ETZ{1zwDML@597OAm@b&oAs6>f-A9_J5J? z|0q2EA4O3uLY{l`;T&1C>_#&9NQE5*gY|nyH8o=_!!R?Bf2xoUY);>)mQVlBQC8Vd-f6lRUd$La+PXc!Uo5P z&h#r*#v{GH6v;vi30?Y-+CJ`8#Ojud|Glxt9c7th%A}5ZRNgz z_zV2tTBR?BzU}{Z^Z%i(oTWMQ(^1bMv;wg`>`}P&p`iaqr2iLUOLRTZFW~p}>>m)2 zM8^vHs7@9Rclp*qYq^f!rIQUxt;jN%+DFl@Z}7wmjeNsCogPk4n|wr2;6Rw+xQDDk zhE<1D|IpL-TLx5eAE}iy2`PU!dXWQ{wxD#k)U!DCoRi+Xk;DknpJZleT5*#wwy(2B znnl49;j?_AOfWp3^-j0JP%{lDaa)1fC#=RELnR5DGbSOfdt}_Ocr~N?d{*&b?U%`Z zc)+6(ROxsWkH#TCh?)pN1ar7^C#W65LYCuiKKeH!fgej3(Y$|lS$BQW&p}*=(F2=P z*~uJG+Pt+sKh3|y`$4#gbbz6u&HzRC6k`ZNzn^b!C|4i9b4$!mK6y9ugs{QZH@&Y| z6D=MpeL^W{8WLT<6c(&#W(UxeLNbLd_nuI`AEtd!FroB{#lhGws}9tg_JCb+hN0nx zh-NYtAGz=bQwD1cbguXe2$u9e^dQ~izX1<{fW~d2)Q>I^>)kM~Vr|VG`O`0|A)XBm zDEVKE;Ib0+XszKty^AHN&ZJKe6m@LG6Lt*Jxgw|E4rAb}X+961Gee$7H42wV-?$Pr zWQCh8k9*tXxOhNC#ZFb3WyM;NfAhuLm7HBHGr~_!s0j5zsso&XpJXlZ)HqhDC7=$% z;}oqN>i3Ff`~*w@%`C0y@KR~t(uejnh5II%*gdYIy)>1~o%wchJwc-?DjG=-b`J7( z9z0-o6h5V>{}`yXp7X^cSNI0x3AxV(hfrNQ_{wn)7YS4PI9gAC%pimuk@5g9x)H{! z_}=UeUtrUW%y~ZUA+21P550;4Q0%bm=DHoP3is^z#)UFg9$8IyqE`sxh8K^#sC}*V zWv~Cf*y7@|K6&AKLuw<7{c&3XgGsYbOe>SO9jRi%>%Mx;;F+y1<#m1VH=$Eea=Pdh zDSUc_$imgkths+>NyJlV>X}t*?T1j`%}jWuwxX3#T8!ez>+oftyTp~PElbRl>IgK5 z*b1;FJBqODZq-R6=0`@RJ{}sQ_c=s#$vuT^F41a^P0XK#cRI^?FA8ciuWLM-4e!ao z3gKG}$K9^=c;=utLB# zq5dM@yyW~Q+1BQ>j~m4Xld=$1IeNu%+oKjQmdb>J~%FpC;kzjhlzFeZZa^gl_6fYNj5M}6 z)6k*Q&ig`vw1FLlyZ*g^psNUQxg=Qak4$$NufmjcO$>q49_3lkUt8nna5-WaQo}Vn z06J=ES0j5*Eh^tU7|XX8wzQC98AD<`jzSc@#^zKeUbNC9P%r!yn&X{Bs<++G|ftF<#g*@*&_SrWG^^+j{h5{SMeLVmaa_F%1o2lu{(DG_T= zTMht`LaaIRK46>+oe@k5tkKMSvuRoi9 z4s?bNt0a$xE(O|)@k$7#+;ohxm@XFAs}G8%)22$sZXZs@u%r=g^FkzJq{nvqDKQ9n z?K@hTT)f*BFmsn-@`rs+Imu2|4y?lER!SXt6}+%Fl_`E9Qh2t!Izz3#G@y>1mVflP zRF&@0;9dqR;~s|Jk!crYHQf+^mX@6Dr5N1p!|Qrp&aoNJQQ>mEvD1+=pA45}ib14HZLMr7iLFK!f7LI@;atJN&DU@sv3M0m%r{RUU607$ymtg2 zWYY>dqUN(q_?!#cO778p)1kTo)Zgp&L(C~cO&uJLUvnC`1QO#j~GF;d~R z7paukGC>+7Fv^aO0H-4Tt_M0m2{M_%zu6}a{kWQ1A^U>234~MM=!C}@640e%EY6`s@*CISg8!k-Jn7}sn?rTBIv z=9!VrR$p}C75W+UEfTO;m~ctD-Z&8^+Z4MZs#c>26ERTqXy#H`sjMcySK=^zXO@c{ z=^}zJx(>vVVMr#Le>Q2z^Fd>>LtRedp%k)$fX0*=3iHq@qzi$dP2X3FQ}+j;Cf_U@ zHiVf^>DAT;9EP+0#f)8|Rl|Y_s@V}uL%C$b%#Jk+qEj@ix54c>hu)3%kFR1thdz?& z*n(+rw`SgCTnCk($z13=i^+E2j(YDf0M@c6zx>X6?pJ1^Ft_f0Lk3qETJtOu5lBue z&ogGFGsA0^GrRP2>4iR~1J*a0PzOXoWCD{Mn9_URJQ#4XBRnn3fQI56{V{@E9!s~9*jStt{PpU63E8bjEjA0l2E1Y6hHqCj=>hAHK$r2|b4F&YJ4L8_<$pxQ1!3FvN;SS4;k7S!QH+i0 zOMYp6Us4J*)XjZF$p<69*ayDr7+XL8S?-8PI@AK|CD9LdW9AWJCn8Q0wU*{OTV|uZ z=gjh5&RhG}B4dW7tA{gWIS7Z$S(lbCt+DH~ksI%9X*i&_>M?yf_b$N`NW$5uG~V5d zaq}bacY+djt!}p9+T|B;J87b=#VyRc;e@LWV)`(|agCHSeP%1-dnK|I>AUvaoOp^o zDEhj2OkJ-WYKw>S1R+);7l$|2qbII+eCa`JER|?3n-zyS@MUON6<&QWS{VgxOiZk6 zSeT&;svUj))x++=F^1e4t>q{jOcsfEa?RzDReouR>Z(eEFP?FhT^2xW9$GpNgwhc%8n}X?|q(O8)}7WDHu*^^wNF>BL)6D%l%Kmn9>Xy zts`uNR3E~hrEr}6KRUKl-W3FyG20j94IJPKduZR=fdeH5AhMW{!komln}sIuZRhO= z%hvf)>M-wbCg(n%llur~2!za2H5Z%aGY|d?^k#BHThg}CDx0LAC0J6VcGc}p>EgRP zTH{&Bj@2nt7wsjJG-#iNyON>%nw13U%e_?0p=wp$Op6~i3Ti|FeqIW&!;fX2-NfB> zuFfyn=4jDj%mkl=&hL<=(s_hWEIw-z$ad*?0&Q+d+lRNXz~7**uYmbLYR&p(E{F?z zdv&mvVR#?ssJX6*YV<@OyG-xNeYz>W?N#tpUu?V=vrHI;&U}aI< zV$gYC4p4%P+4Js3+RrfMMUIr(O;k}Rh9*S{l))nLu{H=uqvn10)2Xs_X9?9`jR=K6 zxUeXlU{%HrizH7@3_;oG-czQV)uz^R-YVN}ciafS`SvtCmL}K3EA?s3ZQ)(Wa!rCF z`%b5={~qXy;)){Cx$lxnzN8^Km3juADoh!aBzGebG`i7@&OwDOT?#a6L5&MiP-|%) z-XX^C+>SLMkQ2D@3FJ$FjqSz*%FMSdhuhOB5o&xGVt~p zT7?0mhyRZ0QDMBT^JkjIx#MiL$iZ8Og6Y(x1g zxg_|RlR?Xp5a?8@IH()<^ocvAg>!yt^j+}gk|ZJ(t8Lu2z3ydKo!YvNb>A)w0pubm z#vh}4WImPxqN)#b4}$?Rp53?wco4os2RnU=4k3TZY2x!fzJH5Aoqx_DBP^?`cEk^z zYO^#b6G-t_Ygrc>m5&u%*l@#|`XctJxy-heJRHTds3H1++z)EjB2eTD#r|?IIrrvXy61%5F)! zQd)gbEtQphut#xGeYbl@;)+X%XZ4QZE*{?t@*HM50Ef{EHt8nm3l@mZZroP&U_-xu zgy12*nE0M7`tY0Z-5-DMxsY;Ok%t<@p6jHD2he!$w7pZ2u)$(kEC*s&7?e`qpzgPp zSRJenVloG5v+XZ|_l>qeGx$;QeCO;x0g*|x##_QCK3OA!Jr$5wYWvgan5T#%GX8X} zJK^G&gmP=R%Nu(3g3y`v%I>F$tBJMhZz(!r1`g7fZeI5k(2)O%sC zdH3%uBRXDd&Dl4CCf$7)%GtJocgyz8=8K?ByuB_Z9lxO|BC{mkIGFJnRyVRPzpQ{x z_>v^@%ra62;C(*WO0c%jy7kAf@MiXIx**QqB1r_fHpR7RUr?y_#Y&n(mM^I%d0=dr z_s&JjEi76Z2lP*}s3B@EnUyX>1tX8blDMylPD)@~NK=eB8 z0^~E|l@CJc#nHw3B!_MwgPnOw3c3*nT8N@P>x^B5@ty4a{?w$T1*_SWqbn7-)vS(L za4?}62#S*%n4t@~k=MR_de|}d?1TUqP{t%^-4;l&vd%VRE~o=yh7{sZ7gB$}zKC%i z(Xnv}VXSiKCt3Fps!%S>z%%eB`N)Po9tg!epYXfq*0^96`e;NRXCs?9&Cpv$KEABY zSa}XPx0xaGbYhee-6S=b|>NY+Kb84Ys?1oj&2VZL5T+K}CX?3I(B#YB%LNFkjh$SG1#}_;@@T zcd|r>612g|iG-#aP?x;F^U@s`=R3z6R)!DC@Iw-+MmNTrRHQL#s6hUO!fv%`0y~-^ z#WfKaDB+(E5DqC!dAwB~Izm~?mI~%S1gPF#OM1l_1-}^fH)0?OA^u}!?Xn0y;8`Xb zVFf=?AOZ}MS+n`7KWX8pA1d|Tn&e7%LvTGfy)lLR`WbVwebVm)#*XqlLW|6+y*6`W@MM0W#;~_R`Iv7F z3?=7bV=Shknb_iPDx862b$Z^if-N-Ru5Y(G_I!&-w~RXpdQ4iH?E%En>~G&2YhIqv z`0kj`aR(zPdc=4Zbo?NcPg*(gq-e9MDS$9+*!okDn#2cmxxFQ{I123VGR@hL;Rh0l z;qb+23Kh_FwGJW4KpMXVmYirwB4#E7$$GSSl}s1m7TI>!zJ#Phzc_JVNfr`zW*vsZ z>vaP!n!GY+=!Rfh&1VB?m|g?hDMXp{xX9q7`>+RiIQ!oY;&_fHS;#F6#BOb}q_KFU zMP^&Y_`2R|%Eu~UyQZqHF5OiDoqXMzw=J4N#Ni`LV!#>GGGJQ|@ztI3fWidq{@#pRx`h{@L@9RyGu8b}~}%oPFV?Z<;1(PlZ{&SotJu)*Sq= zKZ-5C3=TcRw=yBa)2g0&7^vK9Fzw?qz8}^puY`Ea7kK;*b)>Y=C06ui@Us2P$^#}4 zdB8G>gcU96mU}!q!VO3mW8!lxKd7@!_lfr!1ix;!)hhu;bR7zr(<@fuYT?&{iw`HS zOuX+jh6;uc!Oy?W@5dav@zvATNhTVznZ^ps3<=cp#7_MgX$@h#G%(u3pKbAhDpfV8 z`A*m5UsrM;Hs%Sgmfct3ec8Ntuc;{Q^AW(}GAgz(1|=?TNv-ea0o@Yc{TzkRgSXid zcYhmO#rw4*oL?}~VyC?rr_U%_XH&B5DDIEzUjT+RBPlW&pNO02^4+{_KNE@G3OGyGFfTd6i2R`q( z5-+8#5H460NjMO^uZLn5E5BdctJdI#v84lf=Yn7wRKXr0*5g>o(Y#BJNOfcS#5HJ> zf+U#-_iY&(gkMp`tM*yN_qF{_2DCl`Wme@nJ;8O*OT_h z74mt?r4_1cEq4$n^6LT9uIrN1i{^+eV{h8As-}AY1f11ZGFxb>!3`6HY&7L_p3bo9p+s~@CCM4TV;HQiE0ExbRfgbtYF#jG%PNitr-3*_HDc(3oRmk6O?UAtrA*}^Y#_#SkH#;aL-8t z9L_1HQdqfA+J~#%MlgKbO39fJ3yL$|Xk{hfhOvUps@2$4P^Hrp+uI7$AslYxBH=Ek zZ=VRmkd9eO`5`N=B5tq5@=#kbx>RhK zo0BGk+{UvEE0xZhds&m}So+zmreQR<8lU!lDT9vki3HwCeI<_I7wLgLap-lR&;`}Y zB(~f*nmrHo{@&w>d5|@?hLw=#V_qLfcMFB^F}2=wFx$^2746--mTYP%9NvqodW^7? zOiquj-RME=WQk{F{F|0&Gu?_XR;%4GU<74K-wK*a3Wp*p&Oz!7qPAI@je=KCX)!k0^5rxP1y)8>UPACb*mrjW z7}9rZ@NlHK+%y|$ur}QT-9NT|g4i$`w-#$ebSZQ5?S_k`KeE|P*AGX0+xz1WzJ?~+ zG*aeGr&R+>DR9mTR*Y0VaW=?h+8Y_|s5r(nyw6ZDSL_-E=t{Q>2?|SJv}m>jVx{M{ zMr;qb|5@Qw+YG5R+Hn?<7~jT9N;%#Ye-is>k)DX^vNN`*C@tK_hJr=9bflT<6P32h zH%tU2$e~(Vw5ttwH1Xm{M@Dji|BfpP&K5hdCO7shkYJMR91^A(XQ!dZF(+uT&^I zs1KLMY9^j%vRHdAp*j3O>uwXj;$_J|yF4BCARH+5hf50NZAYPZlC5^%TLTc?0ksqj zeXa62Q-&DpD|1lv4~G;E@>oKet=|W=6YUM3g*GM5W|K6_rr^}esj2SFB$g)XaD-YX zTQ;7xYl${+E8PGNtHlRb8+Er@BPeQnC8|6M; z`sM&b%>N1fgl9Fh#x+u> z8uI?NvzW}*1G1Q&PZz`miFl+aH7{+R-h=I9=bJ)5#0(gCYdMCxxQLn9*=RhVOHdy~ zZQYazv-iD$D={fy{vRLfLd*1j1mCjFdaWN}0GELr_qkR0nft?Cx@*sLp0bz6{o&8U zHSX84Mnq&DPx~hp6_*8mn~D;S^0PZ|Sy2w&dYgT&remZ>-Q%tif1uO*I#`F6tHRt$ zna=N5+rMQ7`cnSq`(GUe_yF@?jk!#}*Ww})WoRP)Xq>P~KAF43BNzV)m6!$=dlW7L z+@J|F7P$U`(4wU~SNuYnf}6|8(Q|o-2Nw$YZDMzsu*PmQ5+C;rlpzf;xvo6)x+9Zu z2cC7^RPIED370hsGIFJs^dx)t&BwS}sE}}|s#5Zr4}OJcNZAc|vP#uF*og3a$vo9_ zi>0ArSmz5VOTG$GL7u`eYzxIBmHQ*~l*L+y?%@>I^*o%f6aVy^48<^j+5+AFVsmC; z$d|!}({Sl>mbshOl>hjdk3=;h-Q_vdn_%jP^&^F|5nU3*L$Us-im{T_u80;1=o>uf zO1_`=5a#!<7AzeUugkKg2 zKH5#RQyhJB)7>$v-<8rCYa=}Xmu%UZZk2CP{McT%GTT7kxh=4wc zzdsgccg_byR#F$tfHzc(NMoXo1&j9(Gk^J^UYohZO+Eec9-P2tZnscqMS1p55Dvou zaw=xGN|0NJ}MRqctd!4KTmhdKp#a)qW{`c|HlA>q_j43_*Hjq_X^g+i%u!h zo1O$&1{@L71}+z4t84@^6JvpbR|RLW+dg>(M;a<1?1hI-k?3Uls~S$7W= ztC`%W_73Aj%Ym>|VQTY*u#8$$F^31(8k%p`<^rkjJCR$tkhQY#0;243<8&?G+nk(q zNTHObSx+=ExBN5gax&rGc3OMi0@mQqGd}%qO?{_T=${st*$&4JYt@e3Y zfb@ute{{eHh}y5D>Tjiyd`cO8!Xl>o#6*npSP z4D&W@vU|YrkBMLHHES1y)?J+KbxT^2db3p3m>u0(S(vFP8Kz&j#VmR5A#uirfBv@3 zw1)E14%Fz)D=!dvWc0dXAb3I~@)H=H=yeg)$VEe-2)3Cx7rVGI^I2M$0j0|1me#}t zKfIikLg@~IcypQdeJWKVIovV!9%3_(q+2MQUt2_L4U?YMNS!VNe9!YA6WPtC;+Hj! z-~ehoRzAD2Q>Uh)4pV2C4tj1# z?xmKAw9ljB2r|9rxV^ZT$(4^la=Sv`csrgXWz+!45ir;nTFVeuo5%4GVgvSy!kPqf zWyw$HEK6?;MqjTCeqX3q8$*j-WH}4xgn%i`G0)G$(S+48p(RmhPKU|{r74vr#cfp1 zkD1YP#iIkt`x!NDP{k*LrO*cnyMFY*vW`QIJ`9u?N*0E2a!O!#1nJhIESgeRWO>qp zoUgKdo`9BNxLR|}S0?-L8jYsZ6NRxva2i#3jNajS?Y#Aj({>l6T8JVA;d+a-&MgvE zsNn+XvC>?@eNFc7aV_qEs-5fSVSURvGEUihnn=QlAxc;0*Q2EOWG_Y~)Xzzzw11a> z80X4ia4IiW7m~ogIvgP>B3?>iRM8WzRQvS^cn2CPY(R&Z4w7#Bqa=#+Foz(`yexxT z)KEMGzfQ9B@-_#H&3N8|r zO40?nF?d7A(^pu^Y~|6qDG>Vt_U*#&TcobQ>$QWr%lC$pGG>FM<}DM}J`s)XGh?Sn z9Ur1iIpVXHQ5vp;XGVKg89WLTDUe08-XeM8D0mCso!;PtUe2@U9U@k&0u=x!uermE z!3jlMMs89YGzc6O{G^^}fZo0IF1mS>6uNAaO1}9+Rtf~%dTDK-WwGAB`)3?dB7VPk28}j7T*?#Z;bpP)MgzMMCmao%(NVns0OL>u40u%p`?dcFz~+ojKKyuL>J9FPl3$fi*QO3MQBM9(EL zLPOu+?g~J?1ift7B$NTZ)++j3CmCE{{B%t!7ua2r)Qi)s+gxD>tW#laW9C_E(GnHE zuYAh2apbYs^aEaHPYn+Hwr-YB&-j-Y#ndhEKy^z4S`TV~nJF>D(Q2wMH=4p6+t`#o zRt4kmyzfXd>=iX9zj!Cf->R{xF=wn~MoC;Ue?Rl#cLmB{O%~D*8C>N?pcLbdW5omC z7Np;8?7`x>TJMwLwZ}Mlu2FH}0yX8g4%n?A@r(NUF4*%VNOh$y(kEe=Z&@~?g4?Fa zZZ~Nymq$(i5ozLd@+i~G@=m9dp=(xgA=)N`-Vo)?>N23b^(?JSE8!27@$fr}&J|W1 zcE_RR0b?wELR~x(B{ps#16#5@IPN zZFki&7qa~P81r5(q1MJaC}qyo)4q@}8(WA+z*0%o2Nx?QE`92-g1!S>TJiZxkujwF za7mJ!$=6ZaD!blq>M*}Xui5dtAfYE!u7n+{s0gH2o6i@=4bh-TyvM(pUPtUt9WMP> z$@bF5^}v7c&9CP*D^Db&&u1mJ#oTS7b43c7F+bYSrxxxd_e>&?#f*gHgJ6q`1UA;z z3ufkqdQDFh8HdIy2RI-7bbmP1#r7@!YbXeUGK{r3MFuw9V@t**2@Kx z`GIieH~QD9L4k%aJ%@EEb!2ED8*~wKGKrffE|KtBT+wt-nx6!8XzucM-0)F@gm5sX z-4sf=q`dpbVTQ1;Tyk^{!k!&FD1q;#<#_NS-#VVIhpdXWd@J20(XjrACgSoYtBUSa zGG0Ub4!OtR-*lkZKn~Fhb{hYGJ&mTD6cT7wnUhnc*}2*uSeL@JER;{oiiK>Fm73__ zy@2}T;#|{ORz%3a=)L6N9x(SVch#WBQ%*Ku$**8Z18?*ZSb8f60{0{&OM=%o`!bfk z2LsCCz6jsw!{?;yUq}B7SBBjMs7ZqxU5lOo&YVS_hy2LQ+XeHso*}74PYSH5NzR_I zlA$nLSa^Os=asS2qEmnTmOHEI)9HWm11n9X8wd+Bz(C&WAT_T7w$+L#FMN1BBEsEZ z)lXzA)dkRX^_`oPT8)@y2avt{#T`{0@7aft^BdZ zv>9HOkdbHEPsv+c11F&+ND*pIJL%CE#rk|=d+1cb%zJ1_%2J>##Z>|XPv=S2lK})2 zre_x_f>HBH1d`$A_Iik6AQGO#FLL9a)=bsX7Y)?=dV0Sk#h2yF+2!0W< z3ufXU_?JoCDa%8YP5@_>sUbH#YeML>?M{khvN>s#DiZzvbjmiB2&Wv%KaF*l;z^=5 zu+>9=Za+%S@Df>AsinQ(C({jS{>*+1MCZ1*3$5|)ek)~ps!$_<%Zp9mGK>`@Qls=Y z1lQ(9{WD`_PUFdyQhNwvioyXk zp3(KUoRKVLGItBV->vbgm@qeI%8%4VHL#5~)$N`n20M`2hFzSI0C$UWAF@J7l& zN#8QzmfiMw7%Whq4<KI(vs|@swfO!Yzs8H9|C!CPLZku6`*-Bofp+cV(vjVm#bZw|jp{P?e zoTPRO3#ba+f5nx-3TT@H6U5(1Kx|^LL!`HSKyogm*$qTZQJ^^AnD~jhmNIE7n)1nQ*}L#SPD~HoNeFp@O(dI{)EsY7Z|uGm)LkS;D15a7oW%t#)ch8IO-EQ5Oa| zT-YXjR5L$b`={ zPxQ)!9(oyt0(%iE@i4eInl|dys!?l`-mq&cUh?ZPn`^Ob5=d(g_Cx`FVkZnu+(+tn zMVE4N7BocyJHtc?e?N)>`bB9K0u@yrE|OQPF-G-}{n=A@Xm|TLXm_|nyZ&3uBwF7K z15VyMLBwC=7)cbFs<~TjRa{+7i8A%0aHTHG7=`wTQ|O|+ErXg{YUj=DsW*6jr8uoq z85bR)P^?L7%3w$3W?p>p(GWPcv8`kmJ9ZwUdUj3|V3(;b%c@4npr+Ic+@14p;;Iu0lL&WnQv)57zhvMYpja}IiqX>v zYcWlJk0C@w7D8TOvoCSo2*K0vDJAlM{g&npwXkSkL3>pT4*$!`j)N0qU=;MoJ?*qL zPGB9Os*0i{1ZOFp?aalcX&lD)3Qj#J{Y-rNhWLh{c?!TpJfMSH{tB=(ChOoILJ7}6 z?lg07+HhGDe9iO=Rf~N%7QC*T5bgeimXvEtkFndc zWOyTqXpC=6i~o26$7AlYs%FP|c#TlG-r}!?6e-ee6WqUwjX{cyjbRpuzb69THi*R3 z`-q1T0$R;wc?8*p(QrVlQZ7;h7nH1{ESxOzTt*h4gC^*|c2FUv^~w&ZuV*$WA7CNx zv?g-1zoNlYgp6px)~+S~I*c9?u3-rWlk<*;72Y_Xc4->|7LJddoZgr(pTb>)6kmBUl z4^q)+Ayw!Ys~rmygz3TsTTFnYVHd1u1hEtVMIpG@Y?f|^S(P$pTbKz0!)2NQI+Ttn6Uyqv@hl&u-~(JuR?&Sk%pbWN zNR5n!%qDIh!^t^6(!%R;oR%YtdOCwTaO#-A(lmKm!Oo@N{&6a&tM7%;meHtLo#|s6 zn?sH91F{)1|E6YboCKO7CI?BV*jAZp(cFE2aIkhx&@aa~kvV!5Eoo=8^%EsO!;F&OA#O;#auLkipWDoW&r~dB0z$ZFT>0xGC0R%0Y>cl#0-l zc{!H~j=_en1kW;v0`J+eDxhra*bs!)Jnm)LSvY89v@lS?P54YTv>!6MmJob@>|>DP zgsm3l%a;ZCCX|V<uTI$dhga}A;5;_&)Y*EfW zG*1?%IIg?{RJW&&eJB){{-+gUqx%O`qAOqNJ(7*W^@}8kzjEL=5Z(IH?=Qoi81Jeo z*|bGv<1PKoMq}^d-@4k01yK;!oczDYW$?*?Duq83RVn`ACOL5?ws6V7=kCIr@=%0^ zQs9f|VQsnW>>HXC=U2lZT)cp@|&)$#2; zGSg$#X%SCq#mD_qAI9prggrH9{$OhIa;ul4cL>RsawU zlU?ppEjUwwPb@bWIYZA@L>n`CjH*!LM)uXp@YR^7Wu-f?W&bR!`|hZ$S)6Y6Wj13U z?Z+u`!^FD`K1iIqJ-N598!_p8uGisL&FsrfR9#h022+%$7-o?ow|@!}-@MaiJ9vU1 z-iX~`Z3Bu;oA9!9T59)gA=_joOvy)p4}}ZmcaMGMdkkzb;%dm0H;0!VVr4z1JUX=k zfD!G;REzU18=xO2y&XU;br#4_HtMbrEm+&59grA_9Ez}#rxn+U=8;dG^|DneOW3~h zPTV|=ji@Ui$%@q+!TBIOh2C9|>-|?mvY%zlvPBGGz`O)dZ_IEsWvyyw;(7a#UMQWo zmdly$lgmuycX{>?yD-W5+=?hQV%8@SHj)nT;kAaj!lqZn`FWB`afp*Q3bVM893)Z2 zJZ^u#xux1qo4YrEwUrvq<>G_Pf#!oEey>f;7BB$R-K> zeS=U9uFXX8U5yN@<7AnO#s90ba|jki2LkN1ZQHiqwQbwBZQHhO+qP}n#{3$sW;L5+ znO>w)sqWLK^Ph;M%a4Beso!;RGRD$b5rM(YYP5w9m|3t2v!rCLGXV~C%QGZbNSQD|%(H0L!VPil~@sYW@O(jv<3X)e&=Mkf^7I?;Rpa0N#N)k$ z15Y?84pFMEEVg}?iPTt_CYHTC5;)B58P(}}`&4X_J{Gl*B z4UkcYrQ1^%X*?7MsqqmB!LnCMHf6$tX9p^SN}-$7e{B6hWOnmO(=R=6U35mOx$|BE zthe>!xr=!yair?JDOp3OY{maq1>=9MiQ<5`siVitDfx`ccDC+cLxM;)4&x{!Uj$#C zNY!giPOOOm-V zk9n`gs1D`E6*{`RG~JDNVHjQ|X^0K`9LEDdz#7t{x)~e5yl=!%ra-9;H$T|dv0apI z;K>AtRL2t;-`28xr1m<&4az1ZDI9bX?>*QQ0cZVXAQF|~JWURfeH5?4si1o>T2PhU zpx>{I5e3mhwGQrNDx}xjfxSJX-g#p;S;j4SN@}V|03GFyxALS1%KW4{*HxR5XcECH z{E|vRxi1y$lHvR>erlJg?{gdrv0W8@7JfEUN;v~SD`aJjGp#Ma5Thp!FZcYp;x!xsCA1a4w_?;w)?+CspqldszG!cmxfV!@Vd&8av^NKznfG%F8b=hJx#~V=4M`0KWV|Vt zg`iUP;%iiEq;N~!8UD>5#mr}u6B}oEXdj?Y4~%k6IB5<*PCwKumDR(PtQIN|BLz0{lYd?h z7_h`(bTZ)q?IYyXehSfD{4<0>SRIlRqE<6O@ajzQcY8L)69#F*_RDpMwVAQM9IgSpY9GWnr>*K8#RBu& zLXv$pE5F^V_-NpA>y*%q#5H!7Esf*CXfm4x*8-}My{ce3LSH3(g4r)BNPTK>%tgXjts&eS-3n+cK!W~_wx zT~-Fq9g4;iC?g)4`k7sH=ul}=jw+Iw-EoIraVD70U^i#6Y9=}xEKha+@A;hh^9BRq zp9Mf-LFnLR|6n2xAK4>Im%pv0KHO18KcNA(v4(|-73;9!6J#+w(`>i?qilk(DLH(c zKv{;(MB?ue#%yP1SPv2o9%zPiDA05BAdy-aFz`CMiiH;Wa>;J zlXdqVB&B>$cB0X07-y@o`0_+8M}c(NcyaVpKXYr2E%!Ag`8m~j8?-8w-GP!eF>L@&tAza14AcL!se2=g?~UfTx0^O_f6#+ zWJ5{UwDO^p3ADlRFbuy%^AV2F6B<8xr0yY`)QV_tm_D|ljx}i_g&E4nj7#o7km#6x ztM4`;q_O)=8xo<{Urzfy(DKaHjQ0KaAt76sa=-Cu3CneA85i#Ef`bwCVcEDMwLXnU z?R5zAD8tl4s^bV~gOhV~qW%|LQ$_*UPiRoTt_DGJ1N=lsX z0q~+%7FP4w_GB)eJQyrJ#!4@&Zyo+bD3PUdI>QW24s+`Ta772KWN<-saXtl_b9G-R znQ$HVoX6PFN6hw`(!)g4T1diOcK-gGi*r3}%6ofu=vV@m4+WEY1@GD+9)(XA>iKX- z(6`1{&a@H9lpuK2ZCM!m`gM?PqIUC5b!hiNTmL8&JR$&hd1f$bN}0rrAC5BD-q}~> zK@2>~Wl1{{x9ryJ2|N$fCW>9?@rc5OqmB}nW7sz6ZGP@<5N3I<8yiY+E36#4RGY(Q zHl?~C1_YqVDbPM#e#WFxs*rmkXI;ff-ncTkl;>%Y6&@pZrft27$!3sKWQBX~2w|CY z&3h_PF8V%*7Nb5pG$sh%SD=c)CZf3#I<7+P90r-^HgrHlC6lANE~yHKu}^}hHSYuk zi748+&u1>b@DcIBpcJ6#8i%mc4{XZ0T+J9V2S~IPixBMjF1BQwD`&YBL0rGmfZq_E zJ-R~b_OUtePQDcb^bjra(rhby677k|D8s94KhHq$CYLWY8?(AzNSn zy&Kg+4=Nm?=ugfe{n>J&i;`u%H3P{&Efr?k!hMnR1Op6^eyQ`GajpEAB)}e~CmGAE z1DQJZn9H2dGwt*JbOg<^(%mbDk?#Fx)3{ra3JC@`E`Vg74F5~w{cMpiFAFij z30T?k%D;cjF6_vD-9RmNqeno!>SW2_R+Aci=KdvR=3b$Ocm;q-Y46VT;;eL^9ua@s z2ad3%%Ci3$*(~A@#;N?XOhci#PG0H-C|sa%f-tJDG~&P$66*pB7d}e4Bb(jFS^0|> z$00(#P9AzEWlkL6g7NWI4ry%db3W$+Ji39*g0_m3&tu?>CiBX+GJM`nF(+QEkL|nu zmc7aQTA)DJ5h-HIFa8>Zah5y-D)G@+FqZg^5Ft^qWqGm%6u9!Tk&+GM~hXTXw9xdtPtW`q`b+O>i1X_ATXuSYOWJzbp2hd;X> z$?K2@E!}kLL*LK8N0t<>jasXdqlAMcCaB*&E#|X~hF!_>TT&nCw|Dz6qN!oQ6$^T( z?ipX9pvJ}9^idEZCX@LReTgcWS7GeaV`uLbF7Z!y+nt_r?T?PWGaA3>4ex+$;PD?I z4~w%PIdo@I)~#h*)~x+1z<{!AIuUh=wbw32!M!EOvKwV0uoFM1qd$Ccb2H&kvX7&5 z6!!N*glyX(tDsp&%|#7dk8?N|eIcmKke3|%3d)#_!rRV}6-;wA(R%aD_F=`dJV|1M zc(=ZHWjh zFctPuMtn2QECsPg8rQnl8Afmo*Fa!3FVHU#1BJpKTnKvfB(0?B2laW?b(>31IMSSo z<=gO36mo#763I8gp(1#Wis&^0&96Z)51igdP=Vk%?N07TE!`3TZD2$(?1EYS_zBJ@ zTj!s)T`8H$crj^&_B;v;Zk>4_p=_762m!&AvVMoK|H;#>acngy&Nqpf05BbNeXB>F z&!a(>I@|62o}4%Y{_tq&&H>0+rf*o#{~DP%8~U5lsES1Yje`^HR3aly7P|dzutdk6 z0QrkA?Bv?Yyhh0Xp^e$8-3017@~)cV*;59kfL?XV1w%d-%AIMv5IB!vlY#)F2o7qN zS`Bj7)^bJv>y`87h}K^Wv2=Ceq$Y@#a@QDhz?t|>?N%hX3NHF|3QgCVL&gW^IZU^{ zeU{>v=)^LZHBn0c!_0(x+k2E1e2YZM)NSqlSweI?Ws2o7o0TNqa#NHI>x92i=N|nI z=g>D0mKxjOep*KopiPP3KpUdCd!V}%4DtY`%JnDMDt*d$ z?)#Alma$Zw^&rfRbNQxC|3)9$z;WM@S>5+!1 zTXOgo3_`&JZDn-=ox>w*+fGiq+Ikm-5VvVI?5ZU_YM4<53H`KHFd2D3GK)s)C4xS{lQw0vop3 zB77Q1JZn7taKHjL*LXr(2|%SEiR!(uf9Ln57cvG=?!7iGAMmNAw+ZiIpHOA7yy2k4 z`FDbk9)fhuiRtnOf-t&Fbu{KPxoL}k^*qQGy&XZ0RM{KHy`wfDsgYwZOs8AFJ= zAu`!{K3x{uS<=S~tHdR=7+J@EeMRh_kH87v z*LrkxXxFqchHWR}%NFMT*HSA`6gIl@4n}kE=E8XhM)~Pf&HO0q8MI)f#aeQt_V-V3 z<*>O9a8Cg;It$9hM~IXjvu$-%6BD1>g)F1AWRVFQ36?^}Yp5`nEB=7^k`q6wro=7kL`-l<6Er z4<>X>cP*Pyk>X7Op$yeeoySmJZ2Y@ouxphH^3I`F(P#&;KpNZqft-aJMY8q~QpeL> zLa+Mw>~AoEW$xkKa5cKv!R_WFdYX&ldu^~NkDZP{! ztt_m?fRNDl&;F}ZTlkF#jb&wHuM7r6pB&4J$z8C!F(E7&jD}STvme0a+JPDx9XEZ< z;N@ULb$S}xve6z*K=7AT=rcav> z@haL5xSkXktZgp$*QpKh#sJIoTD;>oRr|j?&VuS)13$79_uoZORmD$0#$BsR1QC)+ z%*Gh={5Zd}6=!gKQv~}g5rAk}gDpam<7ZCQNFx2)7}9{^_54WO4;eHG#S{lmjgna3 z-VOFCsfO>nxe3=(7sw{Un~C=$C1N2wPqBVpmwDht<)T#tb}iQ`tHz7ke8K+E;$3OT z+!E%E)sv^|EXgj9qiqul%EG>(okoA==v=HInwg)I2WaIgz^Yp4nngc@>Mt>{A{sc~WOJKTz@a$KZj3 zJqklZSnX4mmQT zg@AhmL`i|rr8W;rCB2rV`rl&)3PEfkU7PDppbZ!+OL89R34WyQVXsYnF4^{$(SCio z{*tXcVErg*0scUNXkR&?&?##y!_>H5H!<9U|9TwBYPJ!gzI?{tapItx`YJhV^_WqS zID+085d5jtRN*^xMg>y<6TRcEpL7FqiSWsU64mCJM`=c>R`q2rXWnoW?t*tsFI~ciiLTAM)OM#?VW#zWZWFBY31sXLZ7B%W^pQei3_R2oTBOS@eBO@K~-;2=W^!Ik2{aOpzX#;Vv9*3oQk7^CUBe2A#c!GksZK|LsL&pp4?gH+gKbS%)|9jQqf zTOm^cI(+|6J{ed9np=peXmb}XQy3FGG2q%S%HuT|6;oM7S55B=rN9bJ!iFED;JRBA zs7P80t32vqF@1y6_jNBKv)5|qfHYeVbNxnZh$vv>+M0D~} z=N4*9AU+fK|PX-ZSpW zkFCPW!AphWkJE`2DHv3$OZl5HIg`8~ktyHqE6%?lyn*mto{OVQM}Hz*yvg4{&;wWa zZ>%9X!3iyjxYN!k3CW_P8_ndF^Jc@pMrbNs^6=M><043D_BRUySHzn_n=t~>r{%s9 zi`_J2+^b2DfMV^S9mh3Fc>cF-1MZ{`#Qzz2O>Qq9oZQl|J#>y1mQiTwwsI(_J3|zP zOWL2)S?TXWsOFEu1j?Pe{S~aJ5_Rb6ZlHt9E|5o@Uf1}yfF$75MM60wNE&+sfBdJd6`FP_6z;XvAoh||zt(+}3d z7y}=ayp!0!-AjM2kn9l52FkM8d_!+IC_A>1Ilx zq$RZ4Sz~ytcoCYT`gW1n&e=2OQhRQzmoeiUJ6C6XIvT`-udqYWgSq-752Aond_p4h z+NDd_o0*zwvQW*U;0%ADt#Eg?CL_?aFK_K|aiH<;`%p7-5fD~2nbjo&sV>=?B6Ga` zBlnMwqk2OL-09C{I%&buLKrJFCx4gipG%kb!$%~3d!7EyRVY5+egX}jr`+8_vzdKyo|W4E z48bQ1TU$buoe1zHg9IC?88|D85ACF$W3;@Fxi==>T1&GQl*Ds4-!hmOfmnQFMGBnE ziKaBFr(at(d_+5U{b;eQa6PEBw4lz;PptWX@K=HG+kDV|?@d>OtL3MtWjJkBtpw?! zZNq{cJdCG8jW~s2-74SaT=-OOW?*JSGj1O-D*4ix78A}SsMu9?U6J?zJFKOl5%CRL zfF1gv6_Upeh!wDGjpx%IJPWe{I<0f;)SNDglq=5{IXQtZS4>+&1ePC@1V`g(L3ASD zX;Pj5XL+zMLsPdEKn{$1HTv)Vx!{5=K-uSW(*^_|ow8xJAls3eQ#yJza zF6#Ug6HY-DZik$dgo)211DnZ(&Y?GiK#S?prbQ48GskjVBTzf{PQI>f$Jlnp7!{NE zVy4Xzn{r8j9zm!5vXRHRw%3W0a_zOKg{6N=4wenDz4o)|9{(EF_-E)U%#ghG!zc$8 zEQV5qykJ5yZ%=u80kS_R6`*s&-?yLVntQZ{omP~Jc*#cv81Vk<10aJgo)#qdZ3u3u zXOd_Y)T6TBFo#<^DzrLDv=5n>(%NCLs0hLIRSQd5FazD9{NTo032!#T^fY-&^tFrx79M^6T3Od6}Fw- zNr3mEc_C1V5P4}+&N}jFCly4!+TC)+H8uO^eU(bX z47`JDGI@iF8o2k6ltEStMb`rR(@6P>@)WiwJuf$=MNWh@yRFxXH>0&_@A6%#qtzgY z3t}0n@#zg&SE1N+cv;rHTe5q`s~q;eIo4U?sq@q|5= zT5J35ejmM5GW|2t?N6*@!7cfk6EVl&(|wGJQ`Ef5bUST%69ejQN94PC<`t#<%@np9 zs{d*yq^DR}UzA(L0U^4rZVq0(Rz=rLpm@ay#A)rTr0 z;|WDPG&eIW&o!xI!Tezgza&B*3R>TN7}SzR37Jh$t-3mgpFJ_IOw6{y2d1i>L@OX5 z4yQe`Iq!cw?_HLO9QgUHw9Y|)P^mnddyr%s`mo~c8v)<)`nm1tqe)uF;^=| z7%#2Wj1_bmoJc#9mFr@z8~D-U4S>9j&=`?WId{(K0(a{YmsYZJ0V>g(ZmxOt1V^2) z1~_&oEWW3W9LnuZE`s&iP+ZEngj&UmN6d%^+pb9=5?0=WXXyo#L!{z5Zp0FttJuQvjKIcPNm_cIH~3M+!} zH4kvTib-ad>LW97Ct4U%%lUJ25tp+yZi+-7$7Z!))?*jY%OuHq9U}v#8H;qi9cha+ zV{&${3i^?E4`v$sl_pCvO(~M6@rE42`fNY<2)+aFJ~NGP3C_J=-A{d&m(^g?Df;as zf!6ZoB_)1;(+)y5arR&PVUHxkeT`JAQK9iY0rXydh7{}V$r<+E;uCPOw?1diX5 z5_^dAq0B>zEpiL1fv0!q_OroM6p%Zkven|#>H0>bG_y+J=7tzw^1@Kc4F6R?H82yr zs!+S2s`=*+*_CxkVE` z#dBT&6=Bce{jPG4TZNy%Y(Y4}hEQ5`7DEj!PD4EW?IkB3d^e?lsICw@3My3O7nHA0 zKu57(;k39V#%K-qFEHoQK?=iK6QYn1$OMyeGo!J{9HoZisKUJ11UNIN(`tf_0!!2R zz6DB3h>W*!DfSexSEWso>`{CIn97*Aw7|XO)r5cLm-a4)Q76Ku#&c58IZ-X??N?B@ z1zb!W{7AA3zw|yXX`6D@%fmdxiu}B|_Qk1znE+L)X~~|fIv3%jfMH>2Du{cOY%@Vv z6A5u^!C#xP?N_Y|RT6GMbm#GM3beiuR4%z5)aJp)SaH0*z%EM}WlzOJIeo#7)5_;R zz{qa5NcywVC&F@&oDG z0Cq9Ay5UIkSOV`zpu7?H=-)Lc7$J)ckB+ilMaBsK@G1=S#KW{D7QDZ9b*vp>y4Kg& z&Z(~hNwI+~7Db14gSk6Ub;0LL!lJ^K>>VLCwi2Z5gz zX%shYSQUZ;q>u&Sq5YZx@%7AnM3$50cjMT-7#o(}cp1_D;P(HIw3CwLeO?>twpDjn;mF(QP=10Yu zc{2Hwzb!kKSC)^o!_mr*OKJ8X)j-U-?UdGUTCo5Z-7B2`kx~ii`w-QkiVx&d{LKRo z<)i!mR(qjt^snw+o4H{!>R|@_oFBi%qls;^mA$LyWY~WG&Z|01VOCMAcDg~cr!>RPS;|v>r(MtJSMrOK za8Zh&v#fV!g`j-ua}2k4Cs=f~6>aFKJl}B*eb1b@qs-ns{bh*>gUhw`1WpwfS_8FT z(_2^1ygLp4h;c&l10`AC5ESx@GJog`>J-HIYIoh9av5XCwQqhf;w4Wm z(yfq-g2xn5A~H z@0LA;NVeUmhI7#!I?W55HF)ncb5M4Ri+8DCwc`BGx!V}-WEEVKj z?2lCUhgg`77lGCjBL9pU)ZlrI`3Q?U7<<-X-Uawxseu$v74-2DedsOeyqf>Cb@P_t#hyG|#= z<*uK3LwJ_IZG)-Gx>DxL4KA5TXW_NiFcG)io$<^qT!**^Ikc{i-?RwIuN$Txe&1Q- z)TH$woTFny4ll&{NyH{@9mHt{h8mMauUHcim17r6k_9B&_t)R30_b`Z^A;C`Rif

Xm&(_3b@k{%lps_qt<1mk9{=#1aIV;qFDB-2$H}=G6yN^_D7H_>? zg`g1D>xZ!f(Ol=hRtcwxyAf5A-pCNrAA5TXMuRFe%3c2&Q)Uojg3BiL9Y=!%a?Pbd z!n16<)lPWo)GOeh;H~#;N7v(Hc1DJSKqjJ35mgKd4u?2>XckHz;2#0T#txNQPfu-8B9!6m2BmkklB3N zN!~axK5(xW^S9qOC}1zWCPa+g=h}#~0_>kYGMLD~l(${Ej>itTb@AcyX%1JY1dj7q zeu&LgX=C=!1n;&?C|Vg_)C0Ob-5ZpvtT?!{sG>&mS^|}2!)1k&4p@0;5gH*ETUh~UQ!`} z?9v3TbY8p(V@~NKl{gKO)W#~#+4$be7FT9REc!NCuk(5h<=4Qr+sItsE==&Wzsy`Y z9n>c|mHgS^z#JbooC7Da)An_2@P%%q z$rpPO=S^FS^CnR9`W=-geo=^s5@0lNekhBJO$NnqH%ozgFTEn(WOpdDuSMLVDueDO zFs(3?ynX_~;Gk^6Qg?7`p#*{f?^kqCZ8GMk)KDIzN5oq4d&;;0j-1WOK!GzaE{mi> zlo98jr+$$7VU1*!8QdYh_1a0wk{TbUBIuFUP!r4z)etOts0CV}?V0qt_Z)#v^M^r& zt43`Wi9|XsReGV)-LOg!_Er z{HyY|@qtcNT5utD6raii>xZB4Ty1P_R)kW=e$OjJQE9L`nRZYVj!g_TBotKc6o1g) zMi~OYb#1qlPOHa30Nzd!LSlXia0%E=D|S$M4o7j$h3XP!$#cld1hlj_+EB4ys18T4 z*yLCu9|Z@^6neDV+RfMME{h;~6-EJkf6KL7=S<3+9Sr+(HWs@6P(st5c18)CM~;{O zla(G8CAuo8kG~XS023!r1wT~JN8kX zQMg<8UeQjjd>}gNQWF-={m-aVI#=M5l@e=!?7_aJC5o^gEzaqK77Fa$bW9aQ4GhN_ zMz99hc~OOSJ~Y#ueG`RQ;!V)>XllSD-^H z*$k?P0%rn4Ez1ki8cTFg)>6<_bZLnewU2Zg;|vbXtG`mym4UZ)nMf;8xM1Tm^yrXb*jn>s88(NDi+%0(84ideH1=Z;WS_vul3A*e<0`VBt$^g*7}7w401X+$HZGhlfFW#Ta9 zu8KbY$D4e(d8WS|C;70-6qS1c=L}M*-h9*D!LuzBK)485593i`*?=*HXZ`5 zKRubyoFtlfC_qm0J&!KJJ8KTfe{@BsNq+#==mCAfmD@9~j^pEmR`P2zah^k>GY)UX z96+dS*YrTX8=8CAWX_~8H|^_X0Yz-HA!Ak;Q-@aT0NZXH0KB=BRrMk9S}GjL5~>yugInGxdEnc^U` z#TT*Yj|A4lemx=~{sJ{7ap{h3g~wQc1BLmQoJ0~RJQ!+)PrF_+%ajthIU&3GN~3Db zr)+R_Ea!3XUNP%i4#DS8w%+e??=Q15_w4>EEtk?A?-)$8S~!4V#9E$zqSECUplp$C z1_7+G;Vu-bbe#hv7cD+V>g%|M6ZI0M@qw%c>|K+qN%m1sys@&{&VIOcSI?ivgw`HT zMwbdjGng?&y^eNM>kr%(`}e+2BT!UL6y`b8>*RXcj=(DHt$wWZYar{U*YwIAYP)wc zXI+=>XLBo?j}mIlnoe3ivK{u{`erF3wmCKK!6MwKHFbz`H(jn7$0PJg0IAGX5{^_} zM*`0>;Esl-2NuD(ZlSS(fk6R*zq*Rf2gIaC9SeK?h&hCqs1L{b%85W#tBz*Xb>AH` zc<2?j4?;_QqnC6eUT>BaywO^|7XINdU&gh%0jd$tk3$H8$o)%Cxy|3pt|EXb{!qen zv=Z90*v^=6|J#xHf5kbdUOH2H8K6LAqxIXx3QWEsjqZN@{YoQW=b*z;!J_yx|KVyW z<{j2_i+cmT>lD<9ed!%;F##yinW@{-OJhxJN`U6SrDfY1s0%iTJ|&4fo&cBaIsYz8 zw}(U7ggm;l136F{r=N&6m8(r$9E)G9(?MapjU}2xs_x-Xf;HYX)*1>oeyP#PqPLM0 zZ{4gMb_cG@a$4K%H}5LhVSmLlkVz|0=~HOqv9b-u5*A(zV4#(Xp4j_pa(zNrgv^iz zFD57j62SvTo`U5s#1Dnc>@_-7I~AyeR27}Es?tlb%5F&5m`oX$PC>Bv^8s$Q<=Mxd z8KwOHo%9A`RW($YDDAR!1)o;v={UNBg^*|&BalzJ4!Sv)Y`!|OGz7m)1JS!f>m=5s zk}Ey2_j(i#%_Tn`39cK@QWTdAQGEQ=%eI}ZLf_S& z2XHe?G+IicKk(3D>s6uWgD7|aU5hg$W}k2LC1PRbsiy~Ir(-wWJpjlu-N`LW{VUd>Vng#tl!)xWeY6FY7TJBFeRiu5O{B-E?X~{ME_P++*s{e^n8z>?NLCD??X4F?B9zF$0i>$Ph&~5ib`V0b zl>!VY{QV);c8cB`x%#NKSHXm7tzefiM{&8Exbd%UZ&<}0J^2F4lS-%Jwmiy8@ zSI?4-yMUC7R}-Zw4-EMgR6Q()Bw&du+K@hcz7D@M)!Epf`)<}=$~%cT1t=xWukZPF zg_q>>#et#2Gl-s5*sdJYtXVF)tR1ASD4rbVpU`;)ad3Bgn(MdwS6N(_f^tycSZTl? z_EAQ}s)d48wlg}#w+*-B(Ioq?kML@!t#*}+TzEC9Yxd!Ojn^0HM{}Mq%hd>~e#@jNpG?b^ddpA>ob z2DKTsx8|X>Zv7YmpjgOs#&SiGBov0PNSy|M-|ey$9!$b8LUvLr?`onY+fPKg&nF`% zmgI&MOaRZGC?~B4^D(fc zg})3?&)-IYUFSLJ;-w<6hZA9HW9-W59wL~1vCMRIe_1UM~HtI2U*B4O(E2rN&g8m>f&~q76{ogP?gI@Wm z;lDO9Sa0UwAI2X|YYI=;3KFsK|5U6W@pE2!0Y6Zio+l?DSXW2|>R;@68M9oteN1Qm z1}M_ha;n6hYwk8;37mBBHe1_*P-97-^Bl`Yg6iqim~@q$r9gN#}m{R5M!LME_u7l@w$$GaDq=aw#e{auIjD`}g`GP*+mB?KpG8?;+KMtr!FzQfNYN_;w%jqNsgvX?h~~rfbMOPtqV<%k z3{~i8lY1N5le}G=jp*ed9ZrWWty#5!bnv26w`YXRj-MNXjVGNs8JbK;L z=)FxFz7Hz#@pZ!03=B_4gv5cbFNSBmm%e(#(GaUi-y&qaUPbnu;J!Gz`4$zNzaCmMfk$$?XNUG>xtHz&8OaN7-je=me&$BS# zJ{a6vf8)6tOqYXg^Qw;D&=ap0lOfy(mNTBc$uk^cJiPUI(g~jb#sLQNn>0OK24R}! z&82A>jz4ZTA2%^CPtkRfCNOqGBku=hNQ0LNs0l(+&&`Z14TmQapN2`X5))nPlaP&Q zyLWy#Jz|f-+EK-AH#W?Y?5dJYZ=)faBEN}Zan?UuSH57|L-@FsI^ZzaJ2(P&Exz(P z){SAg9c`kU=@rdM?*hx=iM#x7o_Rm9jg$>v>qmP!JQKS_d4M{h#Vs0qnlUchqJXFn z$x~kNdu83!wfM<+n-@qA9DC5yqUnT+ZTdHXz>XF(O;GS9A zl0J0T-h%k7ynL;hip`HXjGC~ml{HiWTS_~z zW(X6>_q)u&rBUg~U!d7tTpMPQZlm&+hg(&$3EXGhvGwHq@Lr7W``-9QA26lRIr+9|7tlBY+;pV zcF-Y{){5-tzup>V?Er>;qap4pAqPsKB=>#HSy%S0d8V2eM)jANHxXTdEcE8SJ*o^B zkrnU@PLz0$(8;%Y&ihL;I%=7HY7PNEQ$jdDjeAYXk2g zP3`@%-nrF|0&xJV=rys^FG8Mfb=yqLe%r&_=VqP6uXDg5LaSK>lS>cf$ z-1;99TT(i*9k6%2QFi=~by*&{0q4X7#k)fnk5!Y$jKx4NZdq@aIQ`H}yYEqKwa9@a zc+NYC!T~WmisnJEop+sh)BY+LB5EWz=17 zuL<|4PAPxA2+ZZ3^qPF!CgroRWfBSnp%w=9$(Qx2``m{W3v^NFr(-Jq4+BG*BA)f4G8e-!7JL4Er%v7Jnvg)4PhQ}MH> zNM}Vd?1x0#&Ol`#AQHmtdnx52;h+SXx7Fki1g9?Y# zkJ7w8)c$W0S|CY@X``YI?dIaj`+*SPK;l>hHAt;FRAWnU7ng5sKzD%D>*L zWEo9?li%)Ke*N{Pf^{a7T>F0@n9c?NO}JRK zCffD82Mgn|8s$DNr0H9h&L9Y(ILtI~_KHlpz%K$MvV-3Do(+C^9R>wj%@r6Lp3Cr_ z+cm^s(rFI6%#K=nViTC!dO?%AmQ9vJi@oEY_yQ+!d8wq)W3{xv-8_ybM?juq z(QS-4iYHBn(z(OPT`%j0FR*wcfk}W@uVXPMe&PxDzq@{r@7~6{g(A%?8@cddr)9dn zI5ZZc2h(}aeR3}LM1uZ8iWbTb6wbEqx=|yI;*mPYI%QKnLqS+7g(@bK%_a7>RWTnRbue0#?82sBLJ6oF?l7|Xe~a_Rtl(vy^2Ss)ctjm z-pELFubrCL$=A#FJr1P60Q6YDxMoB4^LIMERF(9JniM4*1knSa$k9~&%iMo52An0p z>fc^ipam6@hH*H?)3F0SK^*FTKCS z4k$OQ+AICj+RnXPrh4R9LyZCtWdURy!>A$5_tB{XTo{+|>iNw-%=Ii(3{^7oKCC#y zj%;P2cFh95VV$Jx({<)!Nhy?16=nl6oR48MDg6HonVp>Q+8=lKJ&6|+dQ6R#;3S#A zt%664(Dgj;VHw(77l?%!x2aXSd{5tj^0)G$V(fq814}!wLtZ(M_e?$qUlZ486S(O4 zj1t@AtSXqpD9^hqUtMl3SS8AwObB+ukD+!2&^0B}YY$CgzTELu>t{o39Ap3a!2S}1 zn;fVV*xP3zZJz&Up=R{zs0j4o8eCMOuDrVW?o*aRsiB3kyT4Rqz3=~4bVoHzg7z$e zyY$@Oe=YQ?FYd=poAgKPB-2I{hM{KWEkv#IRO(PjIu1%m8ewJEhxVHT%D_mBlIThdGO!K+%j%Ux!i3Sq)ZXCkp^2h2N?@JdeKS5RXg z#4l;y_HXx9X-kFjGawU;Df^bMj5D#!uE0Y3G3v{fpmq@yCT8PQN4 ziDz#7W7*{m{dKSRE|HsHE79-vt<@f5JhA;?e{0`{^cU%1{_B$i<3I0xt$&gHvh&}_ z*SNoJ{kip{$RD5oMgKqiN2*`U{^Wl5`>&jT?R+En$^NIPN92Fff7b6T?>=C@)BS_L zbLOv6{>^{V_W<&L{ewIR*C9 z3*e}?OxPEGr~~9b`y?@>voFhJb%RPdMQL!T?4+Ldhe+mWu56)KoT1UZamm>r-5y9v z!qPN)Dt@UK=-zV23t>u-8}@-%$hI=29CISK9#j~}(rPbLfPMHR*_e^16R74zu>0Fb zL$Lg6kxE|5x1gy*Io`%CK16rBzV91H@nVlgOUh#l6oRXJ%Z~|6uGsEBA(54UsrJVi zX}92tNGXf)=H5A+2Wka9`0^Mcy(h5R5|{=D?!&uC&?H6pDh*pSi4&j!GhSrcLx{lx z35YmgtNxJK+kLc<`dY$RsJQ1Lx7sBh@v`E4uYYQ$nj1z#5(^0DnZ*U;6(T; zE~Zp65E7%1Uu`hH3SMqMXNgZC4N;}OIO3$y&us07dP`tG#YuR_25po}tWulqpNIS%0A(TzMh<9d+v3y>!5zq9dS zVoo@_!o*ArOn71L3c7!=$z54q9>IMKjmfOn1I}rb0njfa%)292l|DEmHna?<-_r3XDW2oANbWOghl}WN~ij} zS$Lb(oUL$~VPRW!>Izb9PiZA>S?dZ-dE{Gz7rTza5Sa)GQM?$=>I70LCfj5}SaSA0V+8K_R|aCsJfXF9dUF^k}3bm5VB&b&A;E3 zylZKN@Kjw&?4@n$E)O(R_9{KM%KdUozQ!H85m>UN$PyI@(4Dq*w2lVU2%Uz&HCH&J zD!4v3F7=rlrbJC5JxabUecnBx85t0!hixuG_89X7!Re@nRFi>gL*tjwQ|f#b7fcJ% zJ~-&n{|<9!4EbW<_BBvuj_4LZP+`I)bio$U*)op~tLxcUjtRZ`Sm&UqWFRF+A*Clu z_%0yKDt{1Cvo0OH%543SGJuy| zcm`V5c|Iig^f7aVq@BEu5Yh&n)k^WmFSeLp1M^Ohpydg}l$`LuCPrY5+kAF#)Mb6m z7zkt2H3l|d^U@HAC{+lr>is77j7UT1l8S)Xch4y^u*GX7m4|9M1@_Yn99y%f*UCQy zOYN0IApt5m1^IY^nqhnu7gH)32nkU^F~dWjV7i%2`eDux5~Gk`YgkEffK_6gJG+rl zvrd;~Tx>Z`Jox_Wb-{*AHUdK(Vvcz}4{E`RcvMo*leJ_6w<%2!L#Xeh05rY>u_16Sld>NL_paQUi0c<<%O1neSz|$%f)$EKKpu zy?t8Qh~QE{XC)2{%Inr;mm@4!@h1$Ej|@a03iQV{$S<~-Uj?H3{y7ZosnB<-i>Z|i zgaoMM7u!rPf}-kWLEr%X|H23Xz<&FkxmtS;gYE1TT=z!BmP2|tB5r`ysJ~~%l&rl? zXnUFat~?FTmY{cU5xTbw6W(V8`Jrdxg8@`WkWeV7E7(gATcefO zR==1TQ)n_`94CF~B#CT4Yx1SxR*^}#O8*op>w8W1rJ*ajRsud(+^X)K(TA&RWz)RI z5jI@}Ns4O(f~6ape))T9p}ykpvZWWY3_tg-B!;`ie5$F?k^?)adPucQba8N`rnis( zZH@J1dGwm>Rj(<3Bxe)js_3QEnGfKPDRV>MqH^>Huv66c!H zH5zOh3SsQsTrYzV=6x+t!JOtB-{;kGEfrvu?sJBsL(P%TP!XAKP(8eP`ZB_}u6TeY zZyK~uE~zaX|A1%vPxs$5;k&qLx_UP9(B>hJ_-m<{sxhewI1LCcIy(m0u%;tXP&M6Y zS8|N|S~(+}odXxNn4k`@`)g=RT4opT$scBniALsA=Kv9oFd_xht`G*`5RboLXiznRc` zC)@>n>KlqFL2iV-wm%`Kuf2yuF+y@xMM3%lIM*at)FW=D#XGf+i^FSW6zqts63iA| zyl&+Lf} zv)i}*5WDM>X8DHmF20iS$r~81pPRK%^ffj8e?!zVXW>i*spG<0adnHi2KhxU#Cy_n zr{e8=qCfP0XP?PrRnKRWyN+>J5IbG0u)na|6y|??q&*uyyiQC!wa5;K5*QeqC@mFoG|I#!eg=U z?*qWTr1=cH+4-+b@w-VgypNHxX7F`@?!2VXB(2U3r8%xF+W`s7zm99mW#4hD4vXn$ z{{>?`F;4bNY1o`wwyGcuZIT)9#TTC+rUX2QAO`OJR|P+wEu8>r_Fe>qZxP%Ju+tJ; zO+6PURBD8Mu3gQ_nRSej8;$(|XEg;YS_KBOv6O$eSKnr*Oo&+mn9Lm=T)d_P{h zh7y)B5`BH)xg`vv@+PZ*Swzuk!p9<>@VF*xkOTy49BNkBC#zsX4><{rk~EK57&vj6 zw|y}8aqSg*@_Km_Q&22!Ejy5X4e`IZ`~*xSxWl}5mw>u)gmsb$5zKHMn3!X_ z>!o0c+?SkzCmu_&_ib)2Nlk*--b9pc0HO=PcW7Y?K{ zg}m}~>U$Sz#;YuQu^*m#gfcnNmVR+lcL0U>{LuhnnTcBi8u$JG$uRTZ03-w`?@8GG z-0t_{_OM5c`i3dnP6sCttY>#P+*Rh{>^g9;mhA#JvL=L(XCMBJA;6fbc4l{lB{?Al znd!cJAc!HfE>bwR{yudb7J{gV!`;w2RSrJfwxbPciaW%4&;U{0MgfqwW%L*ijZ`OU zs0Yep7nKMNWV$sm-wOQas)S5`vhUsjCm@>+2hX4!#ABV>r+P9kwJvA|jM?x)$Jx zIjf`sn*{$y+)F^OAkG28e8bagdwV4-TuS86sPua04&~NFUsEYzzUjJ*SEluy+a>!= zyzkv<6M*#v{9P5x>2|QQ1w0d&^D7{s% zxc#a{pcgzdsL@smr3^woxH$xZ%z;Z+u<#u34*U?WHkDfX-&K<7qvH=-=n#qefH^kc zPhM7Yv@E1`19Xl$)H(e<-sUO(NY&^NRrm_8_&5sCbNr_dhtvcyv<@tMXOKL%5Aq33 z!~Ep<5(3TDlQNL`IyW19#1b%n-(dzjfXq+qRM!Aq51-e}ajGIY@xUnNO6uB9rrC@Piyrp%$!3ZMAk_!c{D^qVTQ8-YXbq1COw%2?D9xM&p4@PkTVyT7a z|IX%W`W#D?nYCC7fb$WKyRDgbG&b5RrtA#SF<%(cpfuB+-Dcv( zcpN{H1@)I^vxk}~$fFHtVYLq3raU$d%1-H3lzILIiWev0!7sV?k9HC%?bLOhhK$B= z>mpl;DM#@2%oNVlAK7cL@cB_~=qIrp_d~)IJR^QTDv3z_5kllQ+jtb2JPOCGJOs_~ zzC`AF^MrIgfIAqm@b?(LgfY|LHS>bmMwO)2dm1T#G&2}X_-msF#lNHBuT*ka*0fi_w4i6r>&h6 zAo3<(-0sjxKx@pvRn8xh00@T!4*fgJeT-G?xs0ERWAa7OHs^vcgepJv!!d!3o{tSN z3I}kQ;mU{EN-c%jZ<+dHY@yWDhl_no$<)I+YPn+dXLQmI!U1WBHRAc+vdUB+c>B@) zf&t94msmnsltl?Lw4b6Lck{%I*b6zh8F&B?0q95!cYc996Z{#U))8(EGxFw>@efYtJi3nt>2ct}earh`Fn zyU;$xZIX_<#3~j?)H6Uv%6Z;SJpQ_@Yt;0#-BKLhf@-u!id>w9U?&UH1tYY${=J9glj=$ zMuIyBwG=}Bp^)w;tA6=yq+ZUD+Xs7iCYSN+PLm1B-UkPs`N4gcw?lWyLRwkHBZjB?A_z*0@#GhPH_PfMzfCgzR$7>>TwsxPnx6>0o&?xZ>V&eUnbl?rivfPr$l^b|daXwt&65>ec}DW1+{HPn*>n zF9D>B2VgNigo1C>U3Dw)o{!bfVLr zAzx|}+&o;EddQefMDwrzyJEM=q)?y#ct48UFmJPbxq9bcM7}7~cX4+T zqWE;`m6IrvX)5@>`wgOjvX=ph@u=`l{=ad9k^X>amce-ZGLS=MtsVlAj87~i@XJ1N zHw}BCCPZs47=JyQ65aSV8|aX{E0!QAtB!+0hEr<`eA}Oqn7ofEusn0pGg3lL>oEcP zYh2Y&?{IxT^1#WxZkkou<|_^IuB>bOu@2Phn24lKOI$|B;>jBiXg~BiX9-GnUg@;( zFv8n?6EP9-uP?A6Y+BY-tom#n|7wZC6YFn~lSQ3CRFmRw&36HlGkC-x{oAdSLSIGv zu>SS~V~ox1vHe3bj5&GjKhpq#gy+jF?(6U<+JMCXQVA-T5C*`lu|DLO9M@ z2VcaT5Hvj}94lga6h;t315;@lXZm_v`w~9`e;1t{;ow(()0@vYd2o@nz)c4Fb^&-_ zhS?x)D7Gz~qrfQozyZ3!qDS}J9uPh@zyTe$XZD~L!^tPmi$=*JRe8a;8gDKQ4$5jN zx7tMjaZ%d!df-sLT`F9p(`2pRcL*=?ivW)}B(G_|BkP|w7&S^Du0=Dd>L0IvN~b5a zaG!@de^N81kmW4B^#!L+3&-+|0V9mppP$5ND$&7c38x>4Oc&`aP2hy;*Z=7a<1Vc5 z5dyCSGQ!t;VtgKOT<4HS8*Hhg$L0oz`BbN?z0}Z(gJ3~dYA=xp5~0>cncc#ey$b$K zGprNB;b)>Ne&ns^rj^{gz{;s%EZ8LOum;(TXgv_bEHIw`w7s6Z4;%Puws?%=Eh*K) zvf8qOPIenrj;9v+5tIOwEg~MXiTgyEq5Di%b?i64bWsuD(he$Tb^3%S)$;ZRLW*B- zY7f|9$F1VV+O)99rNG3~b>dm~n4=ynoP=rsW<8XLElI3b8N@a>oe220p;I{T_Uk9U zpaDJcB*j$TcKLx5O3bwqAU}Rr?uzIDL<;F2VV?8QctqrsU z+&R=U7UA+7r4rcWuJV=yAmFy9Y2BLf_wi&)NnHMy3nYT=4ar!x3;sQ)hAwcGNncu* zC|$Cl=-KV4+Y@#XkxaJz&t+58rxI~=sL0ac8a{*r8i<>|9|jvPVxrSq4y3rqT|&d} zAvM>x;B%4SR2<^jKD^fy?#>MgSFYSvUHbJJOHP53A()x}L^2Px;nY3lW0|)Lt*4!M z@O|FUld%g=3i{R}cj4;@9d4}IvtmEI(TN)T?!b@W2Y=>{;BoaF8Que$w5LdrJY|-1 zT2ZurQgvQb09b;=gk5vv0CYUF9Vi1mFYJC__#2OuRrXyhi*`E+WuWa^v)k(Z9(YC? zgfd&4X~ieh927>%n;7zJK14v-gmqp#suvrI*>nsxI2vP6?$VBzi^up>F=WmZ!gfXb zKRc1!W9;)wdWqQ_aR2}Uxzn5(bf!i`qj`n>l131)9fU>0z5@=$*G?m3BU#i`3g9tf z1NTJZ`)zLdEmV){C;jU&onUn}B+TQ+#dD5r%fig@ob4ni;)Hs}eWo~4jVYPR{VH|T zZw{IQsP$(GS)+4u}sj-l%%18r!lv#(1WY z@^A>SiiaFoE&gY4k$1nnG-b*qYwrGY&K`~FvbT{#ZCO&wN^dBVL|ig1ee@r@0&|`M zXVvzy**iJ$rIJ-ohe8p5dqm1`Ky!&f{hEMUa*<1Nwx?|=d;?jkM0i7onM;{XV0ORS zJaE&M-(WY=(5RRBl2F%L*t{Rw15q!K7%-~OSDBnZ?ezoyb6xgT3QW%_MK3s z-a2Q#KPe&;*y{g;-8+Mb zD#=;EN9ka8)$qd*8bhtSYd7%pBaJJ(L}Cox#Ae7n|7%cG)BF}sOw;EW0u3x01~YMa z0pf$+R372f;95ErWU5-Cc>Q0)dtG9uq!p%-n?edHf4r`a1BW4=`k8h z4KXsAeP2W4W?Gg~W5M!jQ8ernY$S{6MHA!zl(SjHT;9*CNkf}J07r;{fX_OU5?t|C0ou52C? zvC}yaC{X_xGl9Z^35}R54bJh6h*T95Q0vu7?o-Zd z$XPr;&xoOf{_e_B{T@X&fV>&Qtq~}dkT1b9TBjMK5 zPJ6G|)R;n6=%tiyX8G|IIRlq19?f3%M6^a%Gp0w z%sH^NShcv-y0i&>CCjV+uK{t(=(+#aDal_ZpRniXtu9ks?_2sX%FMQVtITz;-={8H zDS)E8kXgC6XiiGtU2pu4Gc{vN1;Oi-HD$J0f~>5}YWpFfNUiRp{b>tzYU_%ykk|s_ z?A<_Je-zG5cT(NibYpCUBSwk?hgT70b-~m}d)pw#E{`i9Z5v7mQhOu;kY4#cbYyZ2 zZ5^P6?1svaN-b9l4MsL{iL#fW9j-J$QWn@W!1;#z)-9Pmu<{$APgE}7tX#yc!-NS1 zcDoce7sxRk*x1aHDo>oiqfyR8F1e(Nu+u^9$$2Q_yL^%F^uA9(9$9)iX%64`W6Yx-G=82x_ zKQ4p#48bYz-m$9OCnc{z#uIR$3;dumoDvcx$qX&jDo1&QEj(J4u{V!Z literal 0 HcmV?d00001 diff --git a/tutorials/images/0_to_litgpt/pretrain.webp b/tutorials/images/0_to_litgpt/pretrain.webp index 838a077f1ca622044aef00326e6701efcc7efa51..aeef2eb1902b94e53169fff429d54c5a04dbf435 100644 GIT binary patch literal 5420 zcmZXWbyO7U*2afsKtd3tL8K7`=@DN~vk92{Taqxh75pWTJ8SqGzE=TzJ=M za}+V<^bRCJ3$jxedv~108h6VXVToPd(sGI`Mh&tpxNpLrF-;zEWHHIn@ zueCTTDi|Av*!HU9bJ$P++|x8qcbK=h8 z_ogg`9FP-iZZ@qFKVCPXcyrj0Dk(4fISk2MxcumaAAl7riP(Rf|7Hqe3CF2~Zm$}nUqEQX6cxbu+FSg`e7t0W5)T65=!%tc z+syN##}>3Au1KHis2AYi0zkd~i3g!7$pJ8-)B%RV_%vzF>NPm?`>9T`Vpx2Uls|C0 zRDdju-Xe|zm)&f@a-$GWi;JO#TG(XHh+eg=Oo8sImF?N*jkqkJ!Gh2MN=S2zk~YIJ z00ryu#!)S^-6_1uV)mNlT*C0oWoeq+hub(?Da^W1D(0r|^VA|C!!j_7)(I^4?!tnEeNmf6S zo7mt|Ik5Dr+ZSG}zFrCpR*Fhogn11t*9*xPbFME=7cYMJJG*i0YAVZ*6Nobp?Cl^3 zJ7z9x|JkaKlqd7d#6Kuwr;hdGk&k2M&)g6-j$wpYi}kzx4+O^f9Hv+upZVQbtH1pB z2A95qkd$`69PfILWm`qX!B1px)@Y-8&Jm$!J^v3R=8}Ji`>XT+0qeO@mf^AeU-R`Z zUBWd7yEubO^(nuLhI0~Sk}j2jYJ440X`lI9=uu#z@aT-GQ{Ge_oF}F}sr`ieW4nD3Of3YP21t}x z-|$yvjLQ0t7Fv5ajdm#k9~Pf($0HiP2Hj}XU>1lpYv_E)5=KTFr!ZP{RC&{0XIaUj z<0;INkw{R;)N#XE@D^zdF350(Efk}DYQ+bLamB+m5Lj9@Gk@&K!nEn~UQ-dD4=T!R z1GBTRGOS;;4wIE9d&Rno=)UL3Jas13{f6;1Qt+hNl_8px6L1Vxim+^=2}oTFwHB#B zL#fG+xvbTH6A>JdPre}O#h6M!tO90G1?u7;l-Sv20cqzQN zU|IrMWQg%b8^38Q_)2tpbTKqUvTDC-|KJVIh`|KK z9PgmN;Z0S$GT34aRG^VCD3Bag&L0>r``I`23#<-NCu$Dqvvo;KP`cVSHmGd<7)bMt zo&wEMtrX5sZTyugNL;jxDI-QP1!U(q zG@>lQn6hHkEZRz)eq&5f4i79!{hi1%Aaf~Gr5s5p<0ovl|CSmUOuB_cL0(vUH?WEGQBZ6rcp^Gg;vCgwb}x1mIUl zXeNpBRgsjNPQeBIkuW|Sa>efXt;p9Zcr9rbputWlt2EJReL$mr}=p+s^+ORoo zD_;n?hOJS$&xQAcRc&|d!nPisJomXGy|jDY5$zF;w+(3ZgjYkC8s-s+f_3-zP3H;G zi{+1X`^WCrm{U&cf>9Dl0lrH|etO6%TLQ(eR zDpa#EkO-WZ{Ui5!(5;?_bbjsY!J(07gx3XP-KKtVcwO(wjv2SI5CRD|Sx;rXhYP7p zCvY{hq&iL)?<7uUJORWy<=E;_u-2r2jBMrNd<)%r>dA3Y&mq3;c$s7304Y*8~UgDy$a$oW$l zkTj_NBec4OHzmr4eiBeWKPv1`f`E{6eGvB;k|hQJFtk+KU+hiaMJ4D%;Pj2flkcO_ zy?hsl@t$bxn;DkkF1Wi_^jo1Eaw*JLRODC7$f=?#bpQ~f(nepR^al3X${!k1;h3rm zn*M&}5WkBk4-#UtdXwyA!`4U4utgz);;=dnmqS>tQjy4{oOPZ06DD0x=#l96qBPm7 zPxqSNSAPtsySDY)8_TgYl+{PQUO)A1Hdmhvp%%08zTNfpz)J$FWVia;6fA6v``ex2VoKduOBpy#G8pH6L8Jf1|TT*Kd1Wk0++mvEHg*@Z{IQjH#k@75}4s)!=KM zbKxIGwGhvmCww+8HWG?6&y!TyTwkTWxh?zYDrx6#{PmH3Hs)G1O-he(SB`X+ouyAf z-f(kRknO}4Te(9iE4OCth0=wJFBf@Q@h=+8h(i|l*Y2tFrItet$R~1T z4FCWDCMeJSlC4CgG#C084HH|^^~wWP>W@=%IE6|u zgaD{2_Q5wWWz;_meDAh(`wAn@%kt->kC0W)9XzW|pjpIkTX5-m_jiSbl3gy0u9RZz z5H8-kv`s66wZ@%^FKOvVX&}vDnts#$>zw@uGZ`Gg|79}S!REAm+J}vIxT9RyW0ZmcCLLLSq&^c`YOa6@dx|n7Vobdk;|Kk zb#Ix5pYp7>Bx5CYEN5etjqs$p@9vd1G%O)R@OBqY6=B64(a6_ZkZa4$Y+%&n*!Q98uHG<;YF2vo{bHV;S`=)=xw3nq z%6CgS{XTaTi&1hJ6zi8JMWAJ)Yjc`>HKN9&x$L0VCFPPt9@15lOOX@f-`KW1K9Wi^ zmP`ZW1?_P)-mC9Kl@<;kZaP83%(s%@61A|F9#@leH+E_LSx=uMvsm~?z}I}G?^y7olhvT$6AMEbcvMO zrb)-FKF9?T_;0Jztl7+LEvYw+F76S+rx&?N0u^dI3$&B?^uHsiQ%(Cm>?--=scu?p z_7jQX5#7(QV2%{av2+2VZ{z6=k{OXk>smCodARcd%QPJ}+P5X@T zg90ZDIYs?L&umGAh&wK$H9g^>wAkrO3o}EBFSjp|8ToyS2vILRsAZKSksoiED*T4AE{n&ta(i87=Lopw(v1HT#3 zxi~d?Z;d?+8TnWQUl5G#o8EI+I&8A3?QRJ-i^jL&l@pkN^dhMDv_et$lge&LY1-rmUA-BDnnY0 zt*a5}H7{eH(}!H1XkpVdYzZ5dcI1G96rWazpnRBGZ2p?BfyX)?R=9YdKCE$gg?8B!EFHP7%nV4$u^37p~6eM zjCzfAH$hAFnV8&VwzJuzUEo;+8iX%rmd9TVys3cf^qJ(pwSrWU^j}qA~F^2plty1nBq$-;D{UA{lM`9 zwYNr7!`?6rJL(oqXLc7+aCR;?2DH5<(xKp{s2gm_?VQLI?+ljA{}YrfUr9mJ+jdc` z=y^aHP=O8+30?lloiwkjZmzD?D|)W*eXUMqHP9a-V$;9y!<1Msa=R7gycHk1 z1FOJ#YPzh%*k9Vdn~3mYa=0AiZoE?GEy)0gIav?)An*dPJENsBS$*iM%OvA{^hDc- z;TxE8e4%rn$Jt*lRKD{#jhPrtI$XURx4nr^echLzpT!*c-Z`>zXNmG0M>k*tpJIS2 z8%iud92tj0+rh#hOdL$+-|kGwSUO2ScGN~4Y$ljoe4!%~;h#*~2{)qr{HbpnY^3$e zqvA9YOZeT~IzP^2+&!YDo1!!=Dj57c+@Lh4GX7Kb? z&Ar}Z-*zmxI+#N+y)A-H^mu~X=dn(JM^nAMXu%$fyY?%_wp)k?HYg65Yz!iSD^j_q z2Lm~zpg#GyGDWj&-)s`kQ?bcVz};BsN2Mxn1!J*>jw~MHK-9suw=DuA!br>0va>+% zQE{wdS!+5DjInZbuGmFXsv>Fgh)+=@YjPwABhGI~t;a{KAJCr3Y<73NuR5Fnh=^TQ zR7K>R&sUC#@OsVN6>VPSU2!V)xjHfBd70xC;rIg35TApQKtl8_TniR9G)P3+o2i$=Ew$Dtz3px6E>9O`T%;By>^s(z)RfHoAbhVfsm)B;)|k9$GcjEsx#^`68FC zy0mmkI+Orp8h~V_9YrRul(auQ65`5Qor`qT&NZv-!0_vT_8iBAOGgOgQiqxi;KbT} z0R_^rJ7JDfk`5-gzvW=h$edK~l{mNVy;|xnM_9Mm@bGL!9DvFu>(3@Lq zGozj+A1~UyNrEArpMEwmGyw<>hV^_a*DYtqec)#Y z#)#H4av43JZug;D1(=D&%>`7NtFRZ|(imrSL>b+UE5e`iHF)N$8hQ6OnIEH`%)_P2 zZKllVz0F5WzkTC@KZT-XFkbr4_;d4jjr{I$fJ%ctyPf7jNs|+Ll!v>_ok>WLp--7a z)9}lP)Bpetd)kXyU!P#lH^qGqqKSOnn7_VgcC6n%UolC=$N|vmravq*Od^8kxR{Of ze1?TpGm#6?3~u8sd}LEBg73r0EV0oT8n_iygX4NkS%E)$aYy`e)yikqU*BcM2(P|U zJZrRQmjjc#Fh~QpCOEdE)V%eP%lVBe5x!yQbLY8DrUvx+xp>xcdpVfK+VgT{GkNReiWO_b_ZE^D z)mqdAe!?Cc>5S9mbL;*kAzC)Cf_jyXWW}Z&piPkDnxl)=3Ac^|Re?;dzJcB&lSan) z0&@|nMn4fBm)z+`+y1a*9JgEuXcr&gdRroIq=IO+>iiI+OjFykd@7mD9_i2gt%RF3 ze|V4Zuo>52BzS1vxV{wAr2(=~dCXBd6{|02rIgW$wVOnI5`e`3l(om~%=h~DabytCNmxn;ku|ZQC|F?%1|%o6o#=?wVP1XTF(#qgs2_s;XaA zbx7JtClhAwF8{j^7uXo}_-O`};pK*|$%uk(nilI@ zGU?so0J}pd9U$}l^?LIN`YHC>_BeA5*y-&6S^$$Ck>BDTbA=65fnwib_hP5uuf6v( zR|acd&>0Rj@%YVR#u~gVn$o^|*rJ;|o1KdBtMrOK@I z-&%9u-eaZwl>5GQLS6LVs0qF~e`EWXewu09_JKyMxT_qdQt&CzxILuXYQ$j$u<}c< zn^UrB2F5VM6^&E0yE=c4YAHaw4Ii@AjtUzJ{gC_@vq2*iCY{^O$?)v$+=4Wj{UWWZ zlI$-Uy&0_dd8)07h;n};#^%U#CJ=TH-3)s?JLr}d3`e1FvggqchC-0K7*{XU$t_=3 z;~|-$rvg9Mv@|5%J8-^uO0VCWn%$pZAW3E-H4e%4w>rRqgo4`+=PhYCOZ-2ed2&LS zumW-xv3U=x+a?zJQ*S1JWU6>B;liV$t7pgXex+Z{WhS_*5K!rpZw`N$0gNlSMVR5 z@4x!u%mC1^V)N@@n$pij&Ru5^g>a6~}z*9S|Yl>Adu z`yh2D^X^z=u=(@1koA+yOrb#k|6t4i&b0rpbyK$~q1SfmMM>~sG{6>mdzCqDDEnZ~zq zJiQSh9H5GrwTX#zyr2YA!$qy0r(r^RR$huT`Hg1lG!vZWT#Z1&*?eNhoi;vE@6w@> zDoM*n`nM~ke=}1 z{*#x=fq1GfQxHgj1+;JSOf(S zcVBf-Tj#Hn{f9o{ZJ+88nIj>>(WcVyI}fDpTKG*|)XPN9tMb`e5zD$>i2yR@WyiFg z$1g6-X~&GsiKX6*&s3KpJOOf}wn9RlunXcioUl2z7lQ+*LQ`^#s1%*9LCd-F0qm4ep#q3Vr* z;eb+~<;flpm|uS-fHbIa0O)VfXnLlo`Z zq4u5D$OQa%cm2=o`*#lhAF7Rx3_<=UJMixsmP)m!{!6ltMU2&>EuI#3Y4_x^Uxy$I z%)DSk6fxyr>Gr>yKYHA@OYCW5@_uNxlt-8^cqCwF^JrCAyUKz6;3)w5Gac2RCM?ATu~EZ{ z^Ao8xLnllu?_>7JLpSs&(=AInW%C9AA54`7Tb~*p6u7Vk!Zo}aT(ibPSlRUQyw4sH zSy(uur9ZI|=jYy-K;~t&6{X<(`!9puL20I}zUL$|6l$%W5O-r;m?P~4a_%J#h=!iV zq*KIgF*N>7Hi8%nPq)WF*v+Ikn$fQ*@M62*wXG}@+Mw!J_0DZD>rMabr0^)$-_Pb#?f4L2h~`#dCwHly_!!0O*wY{p!>xG7`vhP z%iaTR69?^4I5|^v7Be|`@1%JUI9575yP}@hF6ifB!VPTMFaF6?Y@dP_@WV?Hz!m&H zZ+}O(9i0O9CCj%NTODkPB^ulZOe=M9z3TPB!7OkxUWg|8k4NDs**j%%P z|mlUNHohl=SL{R0w{lO8_o^*8C^G3nSz^N zFd#>nrfkq>d}On^)!B>ZeTvG?Hj(e(`93gUZ5C{TvWT^8m11j$s1HMt+EEii0`fOWj6`=$ELl`_xat7oyk@2tW!YH?(+Pd}+9`S(;*S23Du*-A?20RaQ+ zJhm|6w%pvJWq8()Q~s~MpZZMNBuJswZYOY#F+*pum^x&RWas-vx)O8_>+dt?^a2{(7$G75;qw5ulyP_Gc6}p@f#)n8I4e5xzJP zuLa>J?oCs)Z+xJSpU$9AURvO1R0~o9Bbz4l9|KB3b~nrk_YBIIOE70Ev&qL!`=wsw z!lx>r^XT%{4^c~OeGMKOAOH!yKWhhQuq~`$AO0Y>Nj6Zr1 z6#egkFv(2s6H&mY^3N=tZ-^vKbAK}NRRfl;UFupZ&F&U4dfH19n}z^l_j@TR%dF+N z%@*f2&Hj(%i~d^Xs;m8Qj@x)s3=`Kp(ceY9!OQ8MkcjS6+JryWLyQY()|qoymhuxR z@}{f#XYB!?vpm_SaDkUSg}ddDXFTk-#+xMR+$S3N)+6 zk-7v=mY(Ip58HOLPlsDt28->jy+B$(hLp2DCgz9FcJgp2Ea?-vmRDo)4W@@DH#yzX zd%6di`Ur$*G7mw%L1e@_?^{D8bG2SkoS67^%zxZA6mWcSE( zpFZun&MLuEsHJNB1{dRN1(}gns>Sk-Fw71wBs$45? z%f_5OhdClX&{XCM2bsRam*;qP^CEC4dQLit=B)E$6}?SEsYU=`?7dZbd&kWW8<(u% zurph)rM5oRyIdcxrPVhbEf@EXaC5YU1w@P(bRl9B<(lzIF$;(kG}P=fQ||f&6-h0u ztnt-~4E1`gaYdpAElK9C&UZtK3YKV0wm;6_{b=BT8rvxf!`DtW!p+NBLatcFt(vDO zJ9wy1@d0b|9x9qK_rn;Mscz8t5Uii7DE%}mI?R7>IH8r1wVyuknwEH14X`9_^xYvV^L{WbSRXG-Ny z$s0?XSTX&|z@n8BO^mRM5}|mJ?*&Q6n@3JO!>?D5KYlI$s4s6?{d0qk2{woUe(#TD zoxzup3^txVy-D1d%U8i6N-ekALgnPZr~wk`J~nfS@?dVQifMCR^jPoihXEmdUh=#Q z1G@WNoeK?4rcev;4)1 z;}v=*LHKZ8>yLEojk81X+q@{+HdcTVo)^yRnD*NH~t;8d}F+Ub@& z?l=&lJ1X#^Zm%K|Pn>A3cLX6q`asFVWlgF=2sWy8$H(=%8$D!52_I30(or;-l`Rr#$?!XIVf|_#^%Y<2Ye`fTO82yU3Ru9rc*J7&LlO57HwqsQ0bDCze~zZ3`>p07>cWW{oQnYS#wNm6 zKUQm8FZiLr^o3`?J{(frY7#nx=Pq_@yCi9(z)u5IcmvV>{H)BJ34ftqI*l$sD{}D%px4C`F<=xa_(QMP6PT6Aws|`yi zD_FWCZX0j|mAwF?g24RHr;ZxvB$%H&1jXgUO4R5190uhlLw;#3;Qg6%fUO zs&zuqEZ0LS{yS9++)OYa%KG>|nVd|mKX+X@M0{7z6;9^yJCcb=*VJ;s+>-#lZ?!f0 zcg1QMkWGB7T*sw^FEFR2bVTd?C(k_-83J%+05z3t2Xo>$C5Jr+kC-iU&Wx)0OslwJ zf#tkwXStNsHG)37Aws4_Cj?~a3w)$|isCkV`lJaNMQWI*3?e=MxmpK(uIP7(7NuCX zPOPlM7rG3SxTQ<4#Z8LRyC@m#>~5-Wm!{LYmXq3^HaTYu7yl|&I3UmAc_K?m68RbF zOc*PP&f^<)LF*b^+f_WjP{q(o?M8AfBx@;h*tn|77GVfNEKT#+>-%Su&|!}L_h&h5 zr!>*6Ipgf}9NV@>E>h)eVSS=5Lb;QAg8$K1(KSF@{?0~(VIUaq5PL9^ukBaN=j`cL zE(A~&BG&`HNQ3F~{2XTg?{2Z#arhY+wzmzT7Bi)71ytu3C+4qST2hf-8BOjeoS$dFWl81YszKq!@IB% zmg$?*+|(eOim52F4qlGuPxhhb#t|MZ>|6kQO}89k=^}SUG=L&=s4r-@qUJjHp#w_h z>KLtFrUo9I2SFXR*fIOFm4_tV-D87D(!9m!JfW=-NLDR{3Hd=^)iD7p$&OfZD?+)= zebw%mD)G|{P2#%DneYadUd#ttwq;#3mT72>TZY3HQ#PP(R&p9tgVXLI_@|?~)<97- ze4{VOTL+;qxOdpuJp%6rZh#Tx%mWnYSy7AGGqn(M^FZ^+I82`{J-iBt*bN5Mk2#?n zeC-@UIJb}b&yT`)mG29QG;{2w~y*Q~4TA6eJXRK_%V^hxC4tx1>rfyl9( zbaDRdH>*;O{1Sh?Zgb@IJ6Ws+tzrF^m3j0|ONw&gir^5uLwjq&zvGkoUptWY8p(5N z1gF3XYW1hj!HQ$E{h)5Fk3jGf#n=5W!c2JJF$t-uSb8uQTV1<<&iEI3> z!uVtcB_)nSYf;iU)P8l$@E6cEJU>=|~gc#L+(`y2S}0Kx4s8O4 zH6hniHUOC4RE$}v7 z(LrnE$R#wBF<9lo4LL!e;?8;*UufVx6n!-D?|14b z`XwcL#QO_Z0JJJ}95lJ>R`=>sH4lOd3JWF>SZEr|K*?WHp&a8lU?e&DONaHbjglcYKb#2FW#K!% zN5eQRw&RFMeueE@zqFJ3H-@+lr%&JmaS*fsGQ9dpPPGpS`*e81D76s(%Co$;QmuQ`Q6z?ZOob9;7wD^wK z(5%JaqFOg(5|H|AXmbQS%O9@_sr0fNt;QujOy~8{9|k63>zWeunH+>Lf&;yoj(2H_ zA=~X@2e_m8r-aKv{Wu5_)yc!}X%G+0tD5eg}jSV0|YmeMr@9r_xqgc zIL3;<{)K@<{?>lUca>GO_WIk$efeI>OrPTX4tP%qya8w8{-Gxr-T{pFx?{1v3V#gb zm#Ed&rm!+5bl+*}%zjmS=~qZ8dL6z6{9kW*FW-(Vk^6Zu{zt>T_Z`>zT$X&vuEuU=p zsAFV&SmF@cAq?&LWjAiasv=eKA?I(MS{vLmCL(hT9={>ZV;eQEies9PzszgLX&NMu zoHu^}OP}j?{F_v%9*XD;0yip*suG#dA?ul}%v?PzU}Q$)4@SY@mj&wvJN9k(k7}2p zyw`G)cvJDUh6X@KE~6G5rVC{*_gozJ-^}F@xuw!ZRQagDHdpYvmNT#(bpOB-JeHC7SgiaC5yPNx zwlTrp7LQ&u z7Y=Tk_CXMCqD)V=U)k%ZhTCDMAQGf2dGFj8pT)R^>Wh(I##yRJ-{5$3!GcP1hU?}` zQY)f45&Ol`{lPUi;v7UKeLp+QM6XYvtuccCC!~i_E$35nH$#;b0Y)&4@A#hw8x1(F z+hr=_hS!Y7R&yf-e~JKRF0}o4+S{M@hd_wFj%WP}Z*VZ!x98;$;UoEQxkZu^hVd9d z_flfZ2=nkK&s%?qnJq;Go*L1L{zCctxNU#RxHU{3GVVI}Z$3JI_-yw~SOFTAALz14 zDIZfi((Syl zDK(Pv#a`Y|%1=K4#cBf%N*JyH3^!GZGpGs#8u?D{F+_ub`CJR5Vlxqea@$PJn%hdM zXjwmuR0rWgN&9VhyZUU3Vyi9jVm*3glaR~@4lriJDk|hn_rLczC_hRRU4-8)+qw}v zBF4!uQIUDE6v&! zs6jiObcCcP32Dq$-!dNcS;Lt+i}ShL>zLBo)&8z7UpcdUYYiyn`ZY1Mk{NXt_?$X? zS+&7}bJAmA+HTWB7HdSLknSxUiRmM+yxZjQ|C$pnL&p3Z1s~J(oPq|`>-QSu1;vOv zH$5sE1qnP|Ybrr^?|CP|vi@_=Cf53N2N`vc0!m5wBDOz#o9!nhaHhJ90t=r2N^)Y; zxR)Q*OIgvbl15=oW; z?XaKnmk#e2lmi{QNVymPxHL(AM5~BN#E$Jxpdt$nrJn_m`*Z6g+v{B=C}(K&`V0AP zIQRnbPbVtZE6t&FeueJ1$U zOH`z%C?*1TV9sI=cMD|`b61(8jKC!?T{?pNW?PCaX<&+) z%u<66Nq&6_HBJ;pRvA&U^1VT%O3fd!>c9=)_!ZYAYV8a~l)ia2M)7gaiH^!hDGf?(gM*`G+<5smETjaq3T=D96C zLM^)XP_ueZoUa+&mL#Xv?+}kYS#pWQz7j}7Kw#}w7e#N$L;FbKjnY(_v)y!<^h96s ztqS|CR0z;~h^_UX%RH$PpSqF925ITXtMM6knbO+@yF?$A9w&f&ihUhPBVl2uGN56c z>`;m+ZxRYKT*IKdLHfPmVXSiDEigklP=~ ziw>U?0{uwHMqlX0<~@I)+!=NCd=KGL5yiysvqy|VwRH2r-CCa^wC&HaxRq25WuNJz zE|TKhI2wZM7A17tkjuMmbZ5e`vr(fxdi{Y}l~V$olgYUj0ZS z$QFr{_M!l{M;@`z2wiBhawx+H4M{dHw^ACZKg$v=OfNB)5P1T<5sd>g?xXNn{ly2q zcZa^D|9;z{ViZE?toR{}&W)=N*xPoCCb`HrGWrlCL=tK_)4d!cuOA+sj5a8< z^m#A2__tVKbkh;~RuhNDDkP>s*~8?u=#qj2j*8?FqTO%xUmrVja*|uH^^XOFi34$& zKVQVWuAcM|bqdYb1f(1c{XblC8HDL~96^zmW)m}h>|=#why}llgiwC7U)uG4Pp`mt z-wsy=xVQTAC)=B_z9362-5@x05W72qWE2ZmFvV+?iL_4RCBDAObJ6nW61V}S`e-D^ zt2|!SoSkBG_&FnCsX(i+@Nhk11wB91KHc#$&4N@7SO7gOytE%nYS2e;Bl>ph{l&xv z^k*?N&$29q@%RTP?4!)i)rce~Z#7&`B`Y>6O_bnNLD`07qIf=H87|I~G{V-=#Z6nL z3T7JHB^}Z(Xy6~QTu0NoKrxuFZ)vGzhH?a@u->5EAL);^I0J}X?;wx2Q7|uqv?K6G zQll3Sj`!(f82Zqp^O?LO*cK1iuOmH9bAtx~y_)my0h2H}C~H<|#A|Ij6yjHuTo7!G z54UV9o4$f(u3ENiWhAf1AU&eNK(TJiUBZ&*d$@`<-PXHhT@=D3cuy_eU#~K%l1t2H zn$dBvXz0u0@);zQLdU( z8}UT0{G&}H-k?k}f&sg&xU=}EL?T3`leeP=%pqtd{5L`|k1wlVZZ(;&R4{~y@S(*+ zu9rUv+zu(_q2vYaQy49Z*DV7>;cwHw=@W`J){e`MF_^xwr7GGq+GsGE7iZ$lsB%ya z_XZ( zcsa++^p=V5O=7R?JxsDq@j25$fm1G3?K8$|bsF69ew1Fpm8F2o-}&5^^i?AsU@k~r z9sbPeb=r$pSgg`}Cyp6*FOm##GZ>IBvyI;oTD&ecAQLe!OSfa$cmq8@;m++6`XGVeAHwQ~uZk=%;PdO_iCj24XY z2WqiRs`SkPiwXJS0l1T(Or;b;aMx>w+Q+YPn`JOKd5nA_MNpj%D$zi5$VjK z!kk^5m-y~x+X&TY5uB|FKcg-aP-V9*l<}E{!*zlDtJv1Xiz6M>I?MaL6)mQ3rF_4o~8EE!{Zk%RS2S z5`tHa^Sj4i#khN5iOhd08SBR^xtsB}50AiY(!r!Zco##K|1x72r}IWy9wS*IDoj)8 z5g$@(NH!{@drnZW%}&j^LE=j36-DIKUdlqnNgDfE?Y@nia|C~$JH~4%!)T4KgSe9w zmL%b-+?pC^Fo(lw045xt3UwO|eG<*%9%-=ADom1P!>Dl0nN4f2 zbiMZ47?qQ#eyYqC2PYa`akgDciF)&??k{^9Al8H9T&#ggm>sv)n_-ngKn5wxTPZqXbLS7osCJLDsIFAz#XIGIz0y8BP_yN$wJ-$cZYIF_I z*A2@_gyQEX_+;mz9;k`<0HyIMO6XK+qNwY9YskQ=V-%G+6p!6Rkz$~>&*4rDJ>m}~ zN_Q`GHx-N;P0$8^=_tbHgOxqf$Qj)$?JeP&-RmjiQ}?>oQYoYo*YMp)>3!QSC*lgG zat!F;B)UMeXKFEi(#isQth_FbEaM&EI+UX!mXl$a|A{V?6NCx~>=MCwDMeWumuhSw z#~QEb*g2UpxI(7MmvP$o-PsL(EU^@e7IyX8-~TnWSUUFe&9<_w(wQ@Tp~IE2hmBS4 zyKrM-S}6UkUUQ)zKgpcc=3iSZD!x*@VM0L@uZ#lFjqzZP@cKXvshqtr_*2{dEXy zy12ewS-HmO;lOJfoM05m99<3j)3<|kc0SDUD+kEr6GKosCA4s57Ww5((u2uTcft7| zpzTH*aysaZ;WW_hmOVz}6@KV~7NV$3PxpSu)I`a{M@rM5G_E#9pg3R^t-^qw2PN;2mI6Xn zg5-bxcsKGD^XrMvil%rwO)isWS4llb=&ud?+|c<$q|wY_Np9i%xb4b!yZ8^;t*mzP zLu*VmItrFw$@zUN%&D?6nUu_InPq;F5#bV6>U@5OwhwapX|~_cq{@tD@5KbPiI()T zI2j$}DlvcPS3P@}4FCfwgkTRRchb{rlaiN9#$sr5P01L7Dc30veivi=!4LY!FU`Ft|&Q7LX$F4EFe14!&tjo96l^x7&}e zZb5u)4n`z&q0y+7#2fx~m9iUASU_9Bv5%aDLlzl=4`^|pgwJ(PboQD&#`z6U; zA+iw7^TI3uY8<}@t#_d0dngle%JdxmU{n`7L2^`6~Q&XxiR6P zis#P+>9RQQrU;%=Zs%3WeoTk`VXt5_4?WC>IB)p%2NnzuM1c7!Evf`Hc1fduUTBcE-@- ztERpKba8ZCvjOZ`_hdX}e(E%gr(eh^YQyyybG7{?okuOb*nqFfmS)hy{XsHs}qhN1u0-1Mu@q zM+1joQc zcN5L{_x*xFl{nvM%yS`!W47h#Iv0#RaYSEdEjV6HSSinLBSVT0S{vxl2Y4F8B+VW7hd4-l?=RMR-rnZNWq7aYge~<%tOA7d=Q4588xJlS-xH^l2y<9 z>Kn{C1y+7dLN`;KHiHI>JaOrUW3X5h57fDJD!Ppy%HuRtu;Is;nzn1peKFZP3E$*> zK#^TCRqoeFqa44tpSd=dGGEai;ZEy4fMElbB=EbbP_HRX?RH9suV#h{t|)lB3?ASR z>pM^YvqoCnJtlI9DBHSjQhe83D8SEzjMAhIJmlvT*r3UBm($;Vz|E+Pn|`1g7t@5b z&73l;$scV|?h)(Ii|`@KGe*6qN7OzBP}jrO-%GOw$3RAVOfcD|@GVbS*RX)WdV3ktKBP#O$R)4|lWs`rSQgoq+N7hJDxyoK7yS zdgA0WPFp*}_j_sb-$-lhw#{03LjpythOlyiL z{khB#QeQb+RZZK7pvH*CL?-zi(vH5w_WLcb@Bmxpz-|2RotFe%Uuhpw58g`?Y7)kY zwYzRHTO}8l=1}psDx1ZqYEE~2^54uFO}i6w54t8P1mRbl462f)gEkWZ5HyBuXkB~) zLfX6vhu2W5%gMY(yhlz(REogq^Q^5FG()P+s9V!>$BW=<<%M_L91{?ok5 zLDzfj3RuIC1)`lMfl0((0rAhTcHSpvpY2JiU24A2?t^-9z-RKP-rJ=-&77hr`uaCr zW?hL~g|cAUsMK{IfC?|Qu=vgVOKW>*7k^e#u4nJcxbtiS`Es%H=JzeQ7jYTu?M#_U zm7V0imJbx$jr}9}XCyv1@n5><_rc;_oM(erQdPgdzk2MJ1kb2fA0+3 zCk9Fh`Pm=Z9-jmhB@P`?yxC?$B!vBeV_rrfyQJV_jF`W?D&`B|z=BlqNT?7he6fiL zZx%8^Zl)a9Li-ocvH-(Wx?N`c(VZ+FV55AkO<9_2QiK}7{1>ms4U<*2N_*q*%#u$% zMF;SvKu+~neTkxQb+v1t@iu;!Q@+l1bM_#Y&QC4SwVI3I_|zoub{6s}azg1GLQ7U; z>(+D5gaX!?!!ey-bm7NjoqVU*mugkjNpf;S1FjnJl)oB(M*%+L$IXOWI!iI^`S^5i zWsnFE8riPLyr#y$H}mI(IC|NlfqMM*)Aq=u((Tgc3g7-q(P3o5k?$o9b` zHg-w_p%OO*kJ~v%4xi5(ub36Jr$-k03pxT*>0;3{)N zC(5b#A9(dveh2<<((CYZEMt)`?+hI-4lwLCLJ2O_5YQ5Ln0^LIqs)NXi$bpRy5&^P z1Rt*mIdRN#@qQ~#fIe1QJ*Gu7@Taj3d(Sdn8%@OiSK_DS;qv9Lfnn%lma(J8zNlGw zp~@@;4;wIKlDWQ@DM379=|Yw33xc7&=UN{q7T&lPVE;sqI_&8vNH}7d9PH6O?NPi)7_oDX zQVe5@zzwo;qc%18>qe77rt#Y~zLh#Dg4`jcMoRU6E>b{LDKO23dvVGY^2V-vnT4cD znciIb*91sZ{jn`>oE*b`&3?S^Ofh zO>pbut6WVFLrO;nFQ4dav%Lt-`WS`c@NqFBY8T&wY?Yhffw@rzc*N(c8#9uu58dER zym+{nW)v7nNC}CaROCG~iVqd1%7(IUQYBfA!B7d3%pySU21klp;#?ZH+!~L79-c&x zETLdpuzt)9p^#7yR^0BP;)tJ)FUy|4Derfr*H7uqU9I^CTZj zpM3RFeL7P$sg13BqM5)5w>P6v~(6H1&3xHqWP#Y7&U3rSgpYs*-OoWG*QQ}!_PhC#87pibQ~M?-I)i)^!%qd-E; z%2!(WX{W|)Q4%*BnKP~_i_^l~vVkn1C z$ODvIaIF!RI;dqLKoXr5ek$N=xg)y&ouX@#YnP(@XX$ZiEAv_1triM1C zTZ+X{T|=}Ku7>_BGgYFb6(r!R!SQgCienQ_+UN{7*dC!KN1#u}B;;)3L<#FE&=JjAYj9Se>r8Pr|_dA`7z{4=hv>q zIf_OK{u|`%iR-j_c1Qy3EX{_pfQd=vgJj9jK~T^tIhpNuLreTQVW~g?&Ur1}(;Q0M zCts-l-l=T=U9XT}MZ^>fLAJk^V-~E2&humCG#*sS6n_!~1^VoChj<8>TvDp0m!Vd~ zY(+S}(6#42%1Zljz1Zc@%ITlpyL@V|fr(jNVqWjc5N$~opJ#97E)iz))dcQiY!v{B zPWbX1zXH~%G2Vr&Z$G)Sj1K&hHCNN1_I1`&tUC)AQLLRfvKE}#Ls))JJytxIV|r!Xi*V`JRUeel4M z`v-0V?wSh(>`LPOww4$bvk*}4lI3?7o8^`{3yZpqm(=T%-Fa&FkJpDA?i;XA`9*E~ zZ`LNN@*C?*iQl0bLmw~HoZ0~DtrO{(l7qc#-bhMgnJ%H2_>X4tfmhC%Y%-k<6uf0# zJ-XZU6UWGhzr#%#uc7^Q2N9}rlx%wC3k><9I#dD!N)WJ+-m^)8+v{W+-!W%9Ua&b! zU!CQ}J8C1V-1J_2o;oaG9+Ch$f{mw;vcjUKc#BnhPi(@^`9EM8!2U+a%$}adRq-v!zCAk+OB(id zKphdwU1YJ0qDJ9AxA?>r*I;O9wxHH++U~x-6Ny%-jPl_jPq%iScUY7xM}9f!|)N zQuW8rvRGdJ58@{J-M=8#LVa<5-ztf;WATi>=KC4RA4J&%9WEMS(p7|AZ6MhzfJi3g zggPcFv96D=2TzJqa;z?VtIRDvE;Z@i!c;k^4s_@57FGtK8KmJrq?~PblSr zvUd)c_u}Jz6qi3*yxhU>sn{Z8*(p?pHB`s2ZC@Nz+^r^1lIsY)H9DWU?;E+%Tw(Md z52>i>k69<8M7Uq~)?A6Z^P)}m_x;!?_YO|3>uk{k;xE+*lTgB370)7NvGoQP5j!>+ zhmew4Q=&vT8fqwRX)@N%a1FL!B~Ps0I7diH!U&Fy z%ENOW#Yt>AQm4>-Z^j|WWT((gCnRK-FyLWVHFLo2fBfx|i=+GGE}1+6d+}mh8%PEmn(1`)ZcHrzg`9o1p zOOFgIXX*2|wx$|ybE~e%xE0fPpKJ1wJ7-HX;{qBZ$GN=m030+?Q+k8PrVo3n?#SY< zp1YB$pmymX?Wzyq+ci;Fas-QA5=s|c8Z&b=5n?Zg0~!+D&zE_XZ6k0EW|0eT#U5uf zZo{UpO;v7u%MR3d{pbQlSsPZyDP-fuXu{5HBt7d|4Zgaoj99IQpauoKXxwZ~Nh>Av zV*HTU)9p;Tj?ykE;W}!mTacd5?&iWAnQPa$mq21WX?&{X!u2|Xn9ax%3hWQR> z2Q9e=Xa9_ir|)Jh&=qBDKPu#2Dr>SXS&A~iwS1o&@EBGtMss=)c%xpo;do^4JQ?Rj zcZ_?lDyM>XA*dZF{=tz_8aJxh^&=qpWicM1rqSv6w-b$V!DBghxT*jh{YQg|u}mjN zbH>9IMP22h*+wvlWqlh0UYNo@{k(evk{SiqtJizCWV`9TQu!+!ZX|$zUK4oQ9Kr zWyJ-dTVZa9d{fy@wT&ODRp`+=+b}W6=N|*wqdZ0YA==O66EVQhl8vm>E_R6r0=jfc z^HMn2Q=@ECML%kLi>fwu99jjTxs$vFf4>4)bAJ{ z382(LAda)H3!_@>dxXhqWY1OL(09U)reG(k$*&dJ7Ob{3|L;_m1v_7h5H~SV80kC0hQ~Xl;0~@FX|8pWz4u>p)L=%voB_aH(GgYELQ)xpT@P6 z+`nzT^w)KDoxJG5Yhns9IW#GQz((gi)kjdC{lnya2EB#$5BRUUK4*5-heJ0K0GK~f z6us*thnad!*Ot=7)Sdh3P1_?1c-BZjcK*1opU%j_15!doVVR!#xiG*!-#@U!4p0VQ zyGmvf`*N^3o4ZDwbr)s}8`_(7wK3r`4~118(cR6iSdD61WOe7cL@_gA{+u-WK>69k z_fJtkQwhkZ0_@gjah7nLHM^`WPdvRDwUpn#bE{CZSS*Wbo_5F5L20`N(--T(yznqC z{?sBEUKVbgIG!g;VGH|qT&WwASEIUMYN7do%{UHlQO*&e)5ib5(OrzA=e?BX>GGQ+clzLL0a@jAXowO_ZI%d6lx0PuG$xX1!6Z}wxHrepm z=L&1e-NWf5x$ds@I|Rh zDN#Y*Yi+7M;`_?Apv5D(IL)VD;KmKmHgnhd*Y}592+{MrUOy8IcH2BJLHrkbH~sRD zMbG=<(9s{e>8W*iD&hv#XFRfc9o#I+qB@{7YidU(NnFVDY#k=5lR>357k@lB^uR^a z+X4N2O>57;%M)pJs7UuhDV9+G3S*%cB#Tsn6pQmPnq`C+RPK|4lXqyu z;KmMb*i!9PauCj3k`?wD_tkT&RJ$r;4N1Jy|HfAwk6JHYgBaF8$=ArwpFpE}Ywiwa z19j{!CviwlKIQVqq|q;0h=S}P5BP_f#wu^xQQ^hqWDihLy7wAI!E9(T#Z(4GwOdkD zG9<%>Doms7K&q1jpDmVDeqYubLY9RMu=4lF-u)fhPC{M5x+AJxm^jC!}QAP&zOJ_8eR8*S^8ort{ zNpoQn?eCO&M=uT^nn zc`DcinH)XG^Q3b;U6CqwA6gHs@zZEvu{~Lrl7PXVus0SL%b46#^A}!bKbkJ+WlVEP z2+q^^Xi%Xt>kmv12T~rCmwIo{9X=HwdSNrYwq1u`ZfYM@En%OqtXZw?J;LTiC2%(6 zo$RAk5UU)1{7UxA?uXWNaZPaf*RNdk4B+NCORNmT`1mDz!mPLt9J3^-WS6 z-=hePdx5mez86GdH4BqOty^o7tTziHGD!QcKOuTF0S|~yvkEGubejKf-vq2T!@}o8 zz09zEg0{6)nLD3sT$0vWofIQ9OY-K$9Jc8vKKL(BG0t3G!^cixsI+O}|2Jbhus&q^VkrGjAD01B&-LvC)TI-kvm6BDF{>a|{;;yEkI5MPehOYAQ%*w+aG4>kpfg z+Y=+cm`FV{gn6LkhV6OR{f|^%XJ5R6qXcF4$G_ z<+fgr=*nmvPCb>N$lp((pPDunX||7=mEj~+A`&;cUX*_;>fFXKA+ZiHxtcH|x4VI0 z;`O}WWz0udq3v88efJ86hyVz)NNK8b=4bmF;GHJ8U$_KS9br+)L)V$hja{*BVXENe ztOb>K1@sS0;IRL9(g@faOFAnykb6n@ofZW_*kegIDfK8-MFjD7f*@*E(H-{)x{sQA z=t)^t$yV%%HWULuzoKgA+EkZ_jNMu7*o+vylm_p7M@oC&0aDA6DxQgAs zD9*P9;Z>U18+1+gdz8)*CqB^aB|eAGu6*<;EL-tkYfI<06yLYqgI|8^AQh~6bqZ)d zuX@-G%ICPuB8T$04l`@lCYb=uwai;$bLnxyH!iiI9b%bffbxiq7w{F|pvAyYlOi3( zNYK>3_+~&VnPHh^-2!wu3o_eXK~PC=TfG#eK9M3(3f2$W&H8%SO9 zbgEB1^O5TL<{?>qeMI3}Ar|2uXftILE|~`)y>Z35g26 z$hI~LMGzhg#8fEev`KpUg>#DE;Y5I5TJt}yLjWbn{$Lb7Bx$uP(8{@@5{-F;m%>lQ zNI0GeA0m?i>k%1BPOZ5uqtHFVlXc`H~tdV`}mK6WE(FrFDy? zC)IJUZR01WVWuE_G}`(UN|2^e6CfKD5zl=;s9Z}w!54{`%; zh;Z)3JPXg)N9>6U!c2*)DNP8a&$JtRkX)^DG)PPnj3VZJnV~fWNP;F1<>XaVJ$XcLd1ppwdpB4RLKl>@jzb&GXvSc*4dxy+4fwfp8jpEgq zFa)B%v>;x-os;DUNs-TG*<_Yp$1SVo(E)Eqqv`+<{}o%tIuf*)xW?rD%1+3Q)>ZCg zCM5z+IzmnwS^5Zb6f&FFr9^MB6!J4xq&<*iB}-2q zay;;et+dwWmSAl7w2^?JJqIODrcHiwr$>f_)ivx;%5WWv(4z2 z>t6wwp*g9f&NdxrNm(s|gg?;|mypS&12WF3y&BLKs3x=o_VZJXnX;IfC6!y>+#wZB z!%^@gmp3=l{Ms7*QW?$;)h1zr#1ua@;PxMamRESlbjaHB7p2a$VlDQ=)Fs$fIVbG^I-M^?1>k` zM=JiGv!*3=fHpeJjSR~dinnf=c|bl$aww(ly~-Kws-HBLI@|NB3NgZ{+Se#_XlSw@ zxJv~%BOkzZGh&n&8;1aaO1=ari4y3DRp4Wl*!Psbt+*KdTny+~hD^|tq?nq-I*`HK z8_-s@0sLRb7^4^D;D_5`j!(oszi>u z04?L50yDt1{38!28qbKHywC^j#|-~aJ|f+NftoM@mo``}=6MNRB=&NeP0JLO z&Vb$X2gsOALX+bwlU2fjT+Y_dza#D0j^{sgm60>0*fQZ0)YRMmNprH1I6<`dA8UWiAMlAH1*FR zo19>0S=%c$YpZaDU+-Iq6UYwqzqrL>nuh{wLPRhQ#eQY*xzJI4=g5sHy8moqr7^}7 z4{?+SNFpC4i*)Fy?)pa;n*Zcvt?4S2d{qD|iAQTDzPY7kpW-x>%M zZHP}mD{3Bxvl4uSBQjXht?CU z&b@DF9zTn0Hgpzt2rx!oLLWOWLv;Mt`eojI=%wL>JZ(v+)6I4MQZuYAkh-YbC-_37 z$ZCtL2Q3!tXMGNU-chx8mDOa3K>t-Pm}o}2yqDn=z+S(z!peOiYNT`TY-6KYQJ4+E z3l;F#O_2p!&+;1KmVxZvBmh;Z_p@pJV`0v4gj(4m$ed(tj|XSr07DV$G~op6p%nl) zNmqkj(j=~!)X7;dzj4)cYf)`{eXbk4_df>q0ZZ)8Apu5zpEVtHP}!X6A^skqC+OL! zBwu&K95RSUvf|62R%jLsAk|(%kDORu+h#~N{)ojh!tS!5HBJK#C$re4o8_fyn5%-L z7d($NrzjcHTnSU6!k@RQ0h;t<(_hg!{&qnv5w3s$0000000J^QtCLH_1_;h>rv!rT z#KE}eIOTpy(6;)m5;oSGaKH?sQm+GhDKZ=4BpH zW8Xp_%Dpg(<9l&Kqt*<{CYtUy26h14U#gPdIb#skr?P+=7=(^R2eL%N!lvE0|kSEjX9dx61}(t}-8rY)jQo3(GwP|JQMdg|&< zLiS(pt_Oji27wl(iU>yhcnFzp003W%tdi^iDA!Kr8k^3!{cSZnuDcb~H}VCvFb4ZX zbhXaOw}D&HyAr1;oGnF=&qt2WLMJ6m*vKfpxwp(1A)O<*r~n;q?+yFaHfw1KNmrj!LjB3EX9GVW;t8zW@#V0M zpj|hr?=0!ZeCnI629Hmfg)r=o6uDew`m1d`YASWe`v7MUsZH2 z;U&LwHmkC2R~$-+q~MdIr1Xzwv_IPUD;q4L7n}@K11jI9P7L_5)6Z6p9*n9c2n*Xg zvt@G=VyT#Q`Q0Tw&Ra-&5}lCmBBDAFLz*M zLMZlh_&#&zUJk>5?E?8$EEX76r4#&GX`P+B`e)Pf@Nk%9$3`$0sZW=suJ>>u3rFXi z-k#&*5usVi9e^Ru1b<3}I%W-V;h1INkHi=_cV)hz$C#(T(SUre@0jln+^ePEP{?*rD%H3_0D;)otEJdl z8mec3DJq^3n^Ca_PKf~_gFpsP*Et^!TADX9E+p+YnvdF-n|;Yc{f=d58&=zq7H*xp zo=L0sUhLywr5>`oHd?cO=Dhf(Ijy^e`q9^GUN7`)^22iz3Nyrl9*pKKZRG5!oAA0W Z_f1eM{OWM!?$HW*CIA2c00000003{uicJ6j From 296101de7fa87d658036b6200a67717f19bc05aa Mon Sep 17 00:00:00 2001 From: rasbt Date: Fri, 29 Mar 2024 23:05:09 +0000 Subject: [PATCH 28/40] update --- tests/test_evaluate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index 1c4d55cf08..b2d03ef148 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -56,7 +56,7 @@ def test_evaluate_script(tmp_path, fake_checkpoint_dir, monkeypatch): def test_cli(): cli_path = Path(__file__).parent.parent / "eval" / "evaluate.py" - output = subprocess.check_output([sys.executable, cli_path, "-h"]) + output = subprocess.check_output([sys.executable, cli_path, f"--checkpoint_dir {fake_checkpoint_dir}", "-h"]) output = str(output.decode()) assert "evaluate" in output From bacd1d65709bc673785d4261c255b0a0729384d1 Mon Sep 17 00:00:00 2001 From: rasbt Date: Sat, 30 Mar 2024 19:28:54 +0000 Subject: [PATCH 29/40] fix test --- tests/test_evaluate.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index b2d03ef148..61bdbb5f65 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -54,6 +54,7 @@ def test_evaluate_script(tmp_path, fake_checkpoint_dir, monkeypatch): module.convert_and_evaluate(**fn_kwargs) +@pytest.mark.parametrize("mode", ["file", "entrypoint"]) def test_cli(): cli_path = Path(__file__).parent.parent / "eval" / "evaluate.py" output = subprocess.check_output([sys.executable, cli_path, f"--checkpoint_dir {fake_checkpoint_dir}", "-h"]) From afaee750468d757186967c34c3695d936e837da3 Mon Sep 17 00:00:00 2001 From: rasbt Date: Sat, 30 Mar 2024 19:34:59 +0000 Subject: [PATCH 30/40] fix test --- tests/test_evaluate.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index 61bdbb5f65..9b80c8cd6a 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -54,10 +54,10 @@ def test_evaluate_script(tmp_path, fake_checkpoint_dir, monkeypatch): module.convert_and_evaluate(**fn_kwargs) -@pytest.mark.parametrize("mode", ["file", "entrypoint"]) +@pytest.mark.parametrize("checkpoint_dir", ["fake_checkpoint_dir"]) def test_cli(): cli_path = Path(__file__).parent.parent / "eval" / "evaluate.py" - output = subprocess.check_output([sys.executable, cli_path, f"--checkpoint_dir {fake_checkpoint_dir}", "-h"]) + output = subprocess.check_output([sys.executable, cli_path]) output = str(output.decode()) assert "evaluate" in output From 687a38251cb42fb00c6e1cee8c67bb7b5bca2b4f Mon Sep 17 00:00:00 2001 From: rasbt Date: Sat, 30 Mar 2024 19:37:13 +0000 Subject: [PATCH 31/40] fix test --- tests/test_evaluate.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index 9b80c8cd6a..73df745c5f 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -55,9 +55,9 @@ def test_evaluate_script(tmp_path, fake_checkpoint_dir, monkeypatch): @pytest.mark.parametrize("checkpoint_dir", ["fake_checkpoint_dir"]) -def test_cli(): +def test_cli(checkpoint_dir): cli_path = Path(__file__).parent.parent / "eval" / "evaluate.py" - output = subprocess.check_output([sys.executable, cli_path]) + output = subprocess.check_output([sys.executable, cli_path, f"--checkpoint_dir {checkpoint_dir}"]) output = str(output.decode()) assert "evaluate" in output From 5faa2939eae15cf2c14aa6b2512d7d24a7821138 Mon Sep 17 00:00:00 2001 From: rasbt Date: Sat, 30 Mar 2024 20:18:41 +0000 Subject: [PATCH 32/40] fix tests --- tests/test_evaluate.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index 73df745c5f..0cf22166f3 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -27,7 +27,7 @@ wd = Path(__file__).parent.parent.resolve() sys.path.append(str(wd)) - +""" @pytest.mark.xfail( raises=(datasets.builder.DatasetGenerationError, NotImplementedError), strict=False, @@ -50,17 +50,19 @@ def test_evaluate_script(tmp_path, fake_checkpoint_dir, monkeypatch): device="cpu" ) stdout = StringIO() - with redirect_stdout(stdout), mock.patch("sys.argv", ["evaluate.py"]): - module.convert_and_evaluate(**fn_kwargs) + #with redirect_stdout(stdout), mock.patch("sys.argv", ["evaluate.py", "--checkpoint_dir", "fake_checkpoint_dir"]): + # module.convert_and_evaluate(**fn_kwargs) +""" -@pytest.mark.parametrize("checkpoint_dir", ["fake_checkpoint_dir"]) -def test_cli(checkpoint_dir): - cli_path = Path(__file__).parent.parent / "eval" / "evaluate.py" - output = subprocess.check_output([sys.executable, cli_path, f"--checkpoint_dir {checkpoint_dir}"]) +def test_cli(fake_checkpoint_dir): + cli_path = Path(__file__).parent.parent / "litgpt" / "eval" / "evaluate.py" + output = subprocess.check_output([sys.executable, cli_path, "-h"]) output = str(output.decode()) assert "evaluate" in output + + """ def test_run_eval(tmp_path, float_like): repo_id = "EleutherAI/pythia-14m" From 1c3686c4bc6e7694979bf9912ae6f693c131c668 Mon Sep 17 00:00:00 2001 From: rasbt Date: Sat, 30 Mar 2024 20:49:16 +0000 Subject: [PATCH 33/40] extend tests --- tests/test_evaluate.py | 79 +++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 48 deletions(-) diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index 0cf22166f3..46c65d83ef 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -27,6 +27,34 @@ wd = Path(__file__).parent.parent.resolve() sys.path.append(str(wd)) + +@pytest.mark.xfail( + raises=(datasets.builder.DatasetGenerationError, NotImplementedError), + strict=False, + match="Loading a dataset cached in a LocalFileSystem is not supported", +) +@mock.patch.dict(os.environ, {"LT_ACCELERATOR": "cpu"}) +def test_evaluate_script(tmp_path, monkeypatch): + ours_config = Config.from_name("pythia-14m") + download_from_hub(repo_id="EleutherAI/pythia-14m", tokenizer_only=True, checkpoint_dir=tmp_path) + ours_model = GPT(ours_config) + checkpoint_path = tmp_path / "lit_model.pth" + config_path = tmp_path / "model_config.yaml" + torch.save(ours_model.state_dict(), checkpoint_path) + with open(config_path, "w") as fp: + yaml.dump(asdict(ours_config), fp) + torch.save({"model": ours_model.state_dict()}, tmp_path) + + fn_kwargs = dict( + checkpoint_dir=tmp_path, + out_dir=tmp_path / "out_dir", + device="cpu", + tasks="hellaswag" + ) + stdout = StringIO() + with redirect_stdout(stdout), mock.patch("sys.argv", ["eval" / "evaluate.py"]): + module.convert_and_evaluate(**fn_kwargs) + """ @pytest.mark.xfail( raises=(datasets.builder.DatasetGenerationError, NotImplementedError), @@ -42,7 +70,7 @@ def test_evaluate_script(tmp_path, fake_checkpoint_dir, monkeypatch): torch.save(ours_model.state_dict(), checkpoint_path) with open(config_path, "w") as fp: yaml.dump(asdict(ours_config), fp) - output_dir = tmp_path / "out_dir" + output_dir = fake_checkpoint_dir / "out_dir" fn_kwargs = dict( checkpoint_dir=fake_checkpoint_dir, @@ -50,57 +78,12 @@ def test_evaluate_script(tmp_path, fake_checkpoint_dir, monkeypatch): device="cpu" ) stdout = StringIO() - #with redirect_stdout(stdout), mock.patch("sys.argv", ["evaluate.py", "--checkpoint_dir", "fake_checkpoint_dir"]): - # module.convert_and_evaluate(**fn_kwargs) + with redirect_stdout(stdout), mock.patch("sys.argv", ["evaluate.py"]): + module.convert_and_evaluate(**fn_kwargs) """ - def test_cli(fake_checkpoint_dir): cli_path = Path(__file__).parent.parent / "litgpt" / "eval" / "evaluate.py" output = subprocess.check_output([sys.executable, cli_path, "-h"]) output = str(output.decode()) assert "evaluate" in output - - - -""" -def test_run_eval(tmp_path, float_like): - repo_id = "EleutherAI/pythia-14m" - download_from_hub(repo_id=repo_id, checkpoint_dir=tmp_path) - - checkpoint_path = Path(tmp_path) / Path(repo_id) - - convert_lit_checkpoint(checkpoint_dir=checkpoint_path, output_dir=checkpoint_path) - safe_safetensors(out_dir=checkpoint_path, repo_id=repo_id) - - eval_tasks = "coqa,hellaswag" - results = evaluator.simple_evaluate( - model="hf", - model_args=f"pretrained={checkpoint_path}", - tasks=eval_tasks.split(","), - limit=2, - device="cpu" - ) - - save_path = checkpoint_path/"results.json" - prepare_results(results, save_path, print_results=False) - - print(checkpoint_path/"dump.txt") - assert save_path.is_file() - assert results["results"] == { - 'coqa': { - 'alias': 'coqa', - 'em,none': 0.0, - 'em_stderr,none': 0.0, - 'f1,none': 0.0, - 'f1_stderr,none': 0.0 - }, - 'hellaswag': { - 'acc,none': 0.0, - 'acc_stderr,none': 0.0, - 'acc_norm,none': 0.5, - 'acc_norm_stderr,none': 0.5, - 'alias': 'hellaswag' - } - } -""" \ No newline at end of file From a8816301fbb639a60ddc0463f24fb4d85c875957 Mon Sep 17 00:00:00 2001 From: rasbt Date: Sat, 30 Mar 2024 21:29:57 +0000 Subject: [PATCH 34/40] finally fixed --- litgpt/eval/evaluate.py | 2 +- tests/test_evaluate.py | 40 ++++++++++------------------------------ 2 files changed, 11 insertions(+), 31 deletions(-) diff --git a/litgpt/eval/evaluate.py b/litgpt/eval/evaluate.py index 3f37a5b64d..74566f77cb 100644 --- a/litgpt/eval/evaluate.py +++ b/litgpt/eval/evaluate.py @@ -63,7 +63,7 @@ def convert_and_evaluate( device: Device to use for evaluation, for example, "cuda" or "cuda:0". limit: Limit on number of examples per task. seed: Random seed. - save_filepath: The file where the results will be saved. + save_filepath: The file where the results will be saved. Saves to `out_dir/results.json` by default. """ diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index 46c65d83ef..516d9ec7bf 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -8,6 +8,7 @@ import litgpt.eval.evaluate as module from contextlib import redirect_stdout from io import StringIO +import shutil import subprocess import datasets @@ -37,50 +38,29 @@ def test_evaluate_script(tmp_path, monkeypatch): ours_config = Config.from_name("pythia-14m") download_from_hub(repo_id="EleutherAI/pythia-14m", tokenizer_only=True, checkpoint_dir=tmp_path) + shutil.move(tmp_path / "EleutherAI" / "pythia-14m" / "tokenizer.json", tmp_path) + shutil.move(tmp_path / "EleutherAI" / "pythia-14m" / "tokenizer_config.json", tmp_path) ours_model = GPT(ours_config) checkpoint_path = tmp_path / "lit_model.pth" - config_path = tmp_path / "model_config.yaml" torch.save(ours_model.state_dict(), checkpoint_path) + config_path = tmp_path / "model_config.yaml" with open(config_path, "w") as fp: yaml.dump(asdict(ours_config), fp) - torch.save({"model": ours_model.state_dict()}, tmp_path) fn_kwargs = dict( checkpoint_dir=tmp_path, out_dir=tmp_path / "out_dir", device="cpu", - tasks="hellaswag" + limit=5, + tasks="mathqa" ) stdout = StringIO() - with redirect_stdout(stdout), mock.patch("sys.argv", ["eval" / "evaluate.py"]): + with redirect_stdout(stdout), mock.patch("sys.argv", [Path("eval") / "evaluate.py"]): module.convert_and_evaluate(**fn_kwargs) + stdout_out = stdout.getvalue() + assert "mathqa" in stdout_out + assert "Metric" in stdout_out -""" -@pytest.mark.xfail( - raises=(datasets.builder.DatasetGenerationError, NotImplementedError), - strict=False, - match="Loading a dataset cached in a LocalFileSystem is not supported", -) -@mock.patch.dict(os.environ, {"LT_ACCELERATOR": "cpu"}) -def test_evaluate_script(tmp_path, fake_checkpoint_dir, monkeypatch): - ours_config = Config.from_name("pythia-14m") - ours_model = GPT(ours_config) - checkpoint_path = fake_checkpoint_dir / "lit_model.pth" - config_path = fake_checkpoint_dir / "model_config.yaml" - torch.save(ours_model.state_dict(), checkpoint_path) - with open(config_path, "w") as fp: - yaml.dump(asdict(ours_config), fp) - output_dir = fake_checkpoint_dir / "out_dir" - - fn_kwargs = dict( - checkpoint_dir=fake_checkpoint_dir, - out_dir=output_dir, - device="cpu" - ) - stdout = StringIO() - with redirect_stdout(stdout), mock.patch("sys.argv", ["evaluate.py"]): - module.convert_and_evaluate(**fn_kwargs) -""" def test_cli(fake_checkpoint_dir): cli_path = Path(__file__).parent.parent / "litgpt" / "eval" / "evaluate.py" From 012ad9b22c7d328f0bbaa026649de715c2e8eda6 Mon Sep 17 00:00:00 2001 From: rasbt Date: Tue, 2 Apr 2024 16:20:09 -0500 Subject: [PATCH 35/40] add new pretrain image --- tutorials/images/0_to_litgpt/pretrain.webp | Bin 5420 -> 5548 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/tutorials/images/0_to_litgpt/pretrain.webp b/tutorials/images/0_to_litgpt/pretrain.webp index aeef2eb1902b94e53169fff429d54c5a04dbf435..266c1800e8e0851134db22e652137c0d61700250 100644 GIT binary patch literal 5548 zcmb7`bx<4Hw#I{%7I!%mm*Sy#(Ez~;1&T}2LLs~)1pm- zOK{$~Z{D3b_sn^3=Kc50o^NJ!y6a?}wBHNLe4VK#OlN z#}bG-OdiYS!exHT`jF%SI$Cdk=^pYs{EGR~g7om=x%}1m9vS{Q&Uo}b^xF~O2X=w{ z$${ECjQqvr^6cwUzmjA~|4j1a*Gu^)n>YEe<6!N-%7duH0gZRuiE+T71HF^0p9Rl> znDniwz01FOk7$TU=AXQkbp}}Tr!K2wi-vON>%)EC9oS#2MX`@8S$I46wui*)s1h=f z%?_K)_(h#yG6138wGGXo7H0?ET%lev!kFi@Bso$469Dl{ja-G(;Oa-S6@d%PY)PrL z&vp@8tzKmUPVJTwBzO$_duZ+e+A~b=3-|x6mVd7BFQ)-L@dqh8%YZLV5c|<^$mRsL)9Om9ZjCLq4W=LQu$INe8xNpGR&vA z8k{GH5ywScK^4{I0;z;VG>OWUac#fIGQ)XP+`Tsn?QAE{CY}xZ>nigy`7spnhb_Y1 z?s-#Bxb7{d(SHmXrF!I6CZviZJr}{^p2a<97*wJk$dF|H8EGN?Y027dlaBk5O>~y3 z`losXBsV$YIAoD@ghDQHx+ooOMO_I3eJT;zSmrnBnPRbIeZR6gvz)v?NBjeyN;X4G zgDquL$ulbJMn`YiKTqxT2O+^n4@qvFir*_IW4TI`dM=yT7@g0LM51U~0A~TgiT+as z9`$IDsKweEtc3eo( zQhv2+Zz&?B&KncrJ(3z;s&C+IP*eq*JkD01!z@wi;$w!%7UldTT=HiMbU2{JUCIDq zM>Ug%a2T-^%r?-Izs^e2evw~boT2%_8;ayrWMc)1@8$u49_7_%%_HrF^KP+?SGI5F zh!0YP<9;1c{?q9GrLq4cHL0$lT|}|!A!VuPRLikEx38vjPqDZr7`d?C{T+xyQDT^) z2TT8{bqwG1YooS09M#FRga=bBRt@HdhS_7UBYWJ%g%pdWnqQ&IyQKwnuVH_M=wH`= z6)WFHuV|A>{$7HAR3CC^$C8Zk-W2&8Zhtxt^ZoIfu=QdIta5yAr+@SQ@7*JZCkq{A{yEBX@enXKD7d0QJbZs%bF+qs z`55YBltYKJd3v>v{riD4kY9O|iPr{8Pn!K+K(fWyx@VAn8a8$~y2kxV%9H+oA{|~& z7pDZ>kV{Ln!qTE{!E@>DFLoX%3=9Kih4orVwi)z)IkL({mw1| zGM9;X%h#Oz?ZQ_o;M*&=$Ke!C^S1HY>Y8$6gA%HgL-~n?)~o3d0-EUu8fqE5FFN{~ zUR*_>#7|HiZ(t)wkKdSO>&c%8uems2Q3p@e#f(!vZU8NuB$V?b88njc>#uKPAWtx* zoW9P4k|n;$#4NrjsH^&-#0~59!jkLsq1T&%8&gAN&#P6PqTNuy?pL5LktYPa!-sp2 zpn_Z`U^~@8z;5N4?AM2#IGQ}K0WX;a zW9$Nt`doHnS-WP=bhp}z8v})ufc9`jo_98x>!LK$ywBym? z5pO+LBjRlZp~%#O$=5n&0$bhlhd{bRqttB>!zx_U<u)=dgk=kWXujnK=x*eZ`mUpE=sb-bZ3 z5pQC_oOGg&RpOkI)GnCJ0?@$A^0dt2inXi^b7$SAxo6;Z&TT#LzLsigtHTQW{_LJR zm;Sg+?sRo#9jlY@O1Sbf_P#y<0DvW0^KcN6WI0a&3qKtLvxN$%ikx6}M$We4K3#y{ zE^k-wuHM(-mInbZwm})4VJ5$cXgJ{D`6m&m(^XHuO-!Vl=864HNh~`NWgG0LlIG`{ z9Y(hVXrzN8km24?oV&Ouut&*cn|xQj6C zE6jV`;>Dig)%s`=&5g^Z4J9&=-MglCG5s7jtj@e;_){W^B?Gd?cnwb@`7S@HBkK4y zhkR#@Q7hZH&~t8;kNd{XAF53U$1rcagUB)ux8|l>)Q#Sng@T+(trmtl+e3>B~#c zO`+sXS9Z5qwPLg5V38}FigM;=O1Q21If!aL+Wi^|fG1XGZ=Si3doiac08fi4dJe6; z>y6}E?l)1eR4isd0ZtZCo%)F)6yNKH;N2y&ueq74Scik( z?HF0GvecvJjTyiiAw@GoJeRC8WZKOiGMaS+{XR;lor%HA#hpJ2-;kY#e%ZF-AII&V z%;xG|Llo^jP%D0s+f$Vk1Y>4(W5W7v|1((eDf5(E?hp^RZ(4>~mX?AvEr9>NnA{;u z#}Jz&OYL(_YA?Y=*rL;}!&GfC#}EFCg!VO|ydOwW?wCucnf|xe$GI)DUlx?eXhwG% ze3g&N2#Qxu8-~25dViq(X>Qr=uM0w+ZkWEL2uP~(BkEG?4XZmaxki)IDTt^QIDLPx|L7X7jjH#ezz5(=XR%#mmN2+36|N>0&~9Ftl$jhQAiA{SRW=s z$yy$w?pC`O;vy)BpIk542zhb5VFAwZJU)l)f!&nX)3(sRZFIybN4YxYm1SE}dR_*^ zeLpR9WV;V80-PAj*nOxgt{-UQ(ymw$x5GGh6fwIApipG5@Tl_9j-MLJL-6LQ>qfY# z6)yMSpAp2++~`C4do%mO(g%@5qN50`xSm<7-5*n?tBVIG0g-o zuu!^_-PN_#D_BPuR>xz$M-yIOZUPov%MA}gUci>J^S{A0#z}or1SULtB5IljqXi$; z;pK`OvdZx)ia+O3P?W1YA&@9nzYs2f_&d&dIST6Pq!svyo`oMbL>W0SXl&L#mS|KL zj`tY~-xY*~FY#^(Wsz*q|A9Tw2&lXJ5p~6R!A$UNsh2CrI9y!0@+xE6F#a=sVfjKZ3Rinhzcr7iYfEUc!S@TquEAjmIBEujysNM%&CTg)9I zc(UWN(HbZ8d~rql$eN&3W=GcuQniOnYZrg`T&BM?e#j!-s@nM^M&3VzEr)(|c zBb8aqds+S5la%B(cc`(N<+f|LUCL~;6qIxbR_ohlLOMpNnU;yPbFh z9aH)CD;REx`5jqPyr17NWA;Zjo7|Poh&iH44T0Qk#8&%sVKPamvm6GK&K}C6F;yX& zHeR>dVfSa9?z>0aX%{ic-tq>I`QtF-eP2{>erz++lxtZz<-~VP_?A}jQ#woH&iw&> zQ$!gqq2yCcYmEaH2c+d!zpM8>KzWuKqWkvMO7QzfM}Z&Xt2k;;$k2JE<3`X%dk!i#>IUHOJM*D z!=*fTx4nV!FdHs~CI0<>Sl zoXc?3#fRm&tG(;@$MI29e>jG7YD*xPEaW~@Za(|8nae{@otNoONe=DZ6SUSI^8UQl zBvon+otc0L`awFXNsz;14*I2W2iHw>7r$1^O63V`gre{g_8E!ElbLIpF$TKuX9jHt zqoO}NX$eb3_*TnvNOQiA-yMqNiM4bT@nZFTUI|s=ejz8Tw1u`c%a?I-pCxfN-n8x* z9B&418FEDF-Z#uy{SwTd@A0x-w+VFEl3Xp<>@oFG4O|gfEU8QEo6g^AX+R^-Sh}ST zc`93b+x8=aaWf-Jz|En!2B8<VMSJ|EmO>+A)%wmk;DlL%H23JXB4n*YPA*y?Q zg)PS|U~@Q?@;1L?c2o9#gmWiy-;qy)WX}82R0TZx=>2ApGHrou#*z`mwb7*t5es*z z2*HjQ!Cq=w%&bNyO|1+iVare`;vy;sL`uN1syEuV_ElS*?r=F~y`h4|@DsUtuOHO5yg~8v9q~6c**jlM=Z;S!662Cf z=Vb6RB~Gf6ymNv)Xc#&>)T-#fOHQ}MrODI;)Qlgp$;1@!Y=kotB(sY}&opqbDe=d? zY2x^YQ1rt?*y1HeRPi*Me}A5cie+FEaI@LQ_}L&AK{6D04QbCLiTIG{ilXT%1eEmW zeO6T?zOq;uckPSSRHkky&4$bSTB4AlibO;}xOy4zIH7z_fXQ@-?%Uj>LVxkfk77ns zu2Oq&98>xL&A1R9KTv~dCB6K&PkHHjegakB1LYP`v_Tw_FF(WzeKC%{ReBAk;^}`_ z{YLd)Rb!KnsjESL0gvZP7AEZ@3$&7RmqFOWYuzAeMfRxio~jW5!<;?Id)*7*!#wQ4 zJ2J9_3euyTz)h*Hdo#%8uiY@QpTi@R3Sc+%lP^GCt;@!`@Wd6TWLdn;=a;Nzo_FBu z%IwKt;Y+qB(IFl%2xUi)AzVW&-)X$|TzKknZxh0@9(Z4L*DGwcWE?H{^DPC_n^SFN z^&D};M=f?#S7xZYHxUJiWQCz%{_Q@3aRwi^mT|OpBq`phG;o5UHo5Z2RSU;0gEV-2 zr^-%b`-=N(!`CBJtyPv(;m%%gMLH#NE#Ov^qwnelscBGI@Aqw5l?ZFJqqS+U2M{Ak z5Tvbd4GrlaB!Ghgg5W;AoFDjr5|fgRmJBgYehFucIgG0wTAeZ5?;RgVO0HMGUT%1O zR_9#SZZ47v8cBQ8W#tk^MIIRQz~av4S-OQfG8j^@yLCbRroSg=z0DT44T5}SZ@-zY z^a585BanG-lbri0emixb3ORjZT*o^%&{@eHq4^?4pHo)A^4wQbDx(b+GQH=m&8*t| zRKN#H-Mz$)*B+>SQI0QNT^ItX Sr14%jRJ@<{J=py3nf@DN~vk92{Taqxh75pWTJ8SqGzE=TzJ=M za}+V<^bRCJ3$jxedv~108h6VXVToPd(sGI`Mh&tpxNpLrF-;zEWHHIn@ zueCTTDi|Av*!HU9bJ$P++|x8qcbK=h8 z_ogg`9FP-iZZ@qFKVCPXcyrj0Dk(4fISk2MxcumaAAl7riP(Rf|7Hqe3CF2~Zm$}nUqEQX6cxbu+FSg`e7t0W5)T65=!%tc z+syN##}>3Au1KHis2AYi0zkd~i3g!7$pJ8-)B%RV_%vzF>NPm?`>9T`Vpx2Uls|C0 zRDdju-Xe|zm)&f@a-$GWi;JO#TG(XHh+eg=Oo8sImF?N*jkqkJ!Gh2MN=S2zk~YIJ z00ryu#!)S^-6_1uV)mNlT*C0oWoeq+hub(?Da^W1D(0r|^VA|C!!j_7)(I^4?!tnEeNmf6S zo7mt|Ik5Dr+ZSG}zFrCpR*Fhogn11t*9*xPbFME=7cYMJJG*i0YAVZ*6Nobp?Cl^3 zJ7z9x|JkaKlqd7d#6Kuwr;hdGk&k2M&)g6-j$wpYi}kzx4+O^f9Hv+upZVQbtH1pB z2A95qkd$`69PfILWm`qX!B1px)@Y-8&Jm$!J^v3R=8}Ji`>XT+0qeO@mf^AeU-R`Z zUBWd7yEubO^(nuLhI0~Sk}j2jYJ440X`lI9=uu#z@aT-GQ{Ge_oF}F}sr`ieW4nD3Of3YP21t}x z-|$yvjLQ0t7Fv5ajdm#k9~Pf($0HiP2Hj}XU>1lpYv_E)5=KTFr!ZP{RC&{0XIaUj z<0;INkw{R;)N#XE@D^zdF350(Efk}DYQ+bLamB+m5Lj9@Gk@&K!nEn~UQ-dD4=T!R z1GBTRGOS;;4wIE9d&Rno=)UL3Jas13{f6;1Qt+hNl_8px6L1Vxim+^=2}oTFwHB#B zL#fG+xvbTH6A>JdPre}O#h6M!tO90G1?u7;l-Sv20cqzQN zU|IrMWQg%b8^38Q_)2tpbTKqUvTDC-|KJVIh`|KK z9PgmN;Z0S$GT34aRG^VCD3Bag&L0>r``I`23#<-NCu$Dqvvo;KP`cVSHmGd<7)bMt zo&wEMtrX5sZTyugNL;jxDI-QP1!U(q zG@>lQn6hHkEZRz)eq&5f4i79!{hi1%Aaf~Gr5s5p<0ovl|CSmUOuB_cL0(vUH?WEGQBZ6rcp^Gg;vCgwb}x1mIUl zXeNpBRgsjNPQeBIkuW|Sa>efXt;p9Zcr9rbputWlt2EJReL$mr}=p+s^+ORoo zD_;n?hOJS$&xQAcRc&|d!nPisJomXGy|jDY5$zF;w+(3ZgjYkC8s-s+f_3-zP3H;G zi{+1X`^WCrm{U&cf>9Dl0lrH|etO6%TLQ(eR zDpa#EkO-WZ{Ui5!(5;?_bbjsY!J(07gx3XP-KKtVcwO(wjv2SI5CRD|Sx;rXhYP7p zCvY{hq&iL)?<7uUJORWy<=E;_u-2r2jBMrNd<)%r>dA3Y&mq3;c$s7304Y*8~UgDy$a$oW$l zkTj_NBec4OHzmr4eiBeWKPv1`f`E{6eGvB;k|hQJFtk+KU+hiaMJ4D%;Pj2flkcO_ zy?hsl@t$bxn;DkkF1Wi_^jo1Eaw*JLRODC7$f=?#bpQ~f(nepR^al3X${!k1;h3rm zn*M&}5WkBk4-#UtdXwyA!`4U4utgz);;=dnmqS>tQjy4{oOPZ06DD0x=#l96qBPm7 zPxqSNSAPtsySDY)8_TgYl+{PQUO)A1Hdmhvp%%08zTNfpz)J$FWVia;6fA6v``ex2VoKduOBpy#G8pH6L8Jf1|TT*Kd1Wk0++mvEHg*@Z{IQjH#k@75}4s)!=KM zbKxIGwGhvmCww+8HWG?6&y!TyTwkTWxh?zYDrx6#{PmH3Hs)G1O-he(SB`X+ouyAf z-f(kRknO}4Te(9iE4OCth0=wJFBf@Q@h=+8h(i|l*Y2tFrItet$R~1T z4FCWDCMeJSlC4CgG#C084HH|^^~wWP>W@=%IE6|u zgaD{2_Q5wWWz;_meDAh(`wAn@%kt->kC0W)9XzW|pjpIkTX5-m_jiSbl3gy0u9RZz z5H8-kv`s66wZ@%^FKOvVX&}vDnts#$>zw@uGZ`Gg|79}S!REAm+J}vIxT9RyW0ZmcCLLLSq&^c`YOa6@dx|n7Vobdk;|Kk zb#Ix5pYp7>Bx5CYEN5etjqs$p@9vd1G%O)R@OBqY6=B64(a6_ZkZa4$Y+%&n*!Q98uHG<;YF2vo{bHV;S`=)=xw3nq z%6CgS{XTaTi&1hJ6zi8JMWAJ)Yjc`>HKN9&x$L0VCFPPt9@15lOOX@f-`KW1K9Wi^ zmP`ZW1?_P)-mC9Kl@<;kZaP83%(s%@61A|F9#@leH+E_LSx=uMvsm~?z}I}G?^y7olhvT$6AMEbcvMO zrb)-FKF9?T_;0Jztl7+LEvYw+F76S+rx&?N0u^dI3$&B?^uHsiQ%(Cm>?--=scu?p z_7jQX5#7(QV2%{av2+2VZ{z6=k{OXk>smCodARcd%QPJ}+P5X@T zg90ZDIYs?L&umGAh&wK$H9g^>wAkrO3o}EBFSjp|8ToyS2vILRsAZKSksoiED*T4AE{n&ta(i87=Lopw(v1HT#3 zxi~d?Z;d?+8TnWQUl5G#o8EI+I&8A3?QRJ-i^jL&l@pkN^dhMDv_et$lge&LY1-rmUA-BDnnY0 zt*a5}H7{eH(}!H1XkpVdYzZ5dcI1G96rWazpnRBGZ2p?BfyX)?R=9YdKCE$gg?8B!EFHP7%nV4$u^37p~6eM zjCzfAH$hAFnV8&VwzJuzUEo;+8iX%rmd9TVys3cf^qJ(pwSrWU^j}qA~F^2plty1nBq$-;D{UA{lM`9 zwYNr7!`?6rJL(oqXLc7+aCR;?2DH5<(xKp{s2gm_?VQLI?+ljA{}YrfUr9mJ+jdc` z=y^aHP=O8+30?lloiwkjZmzD?D|)W*eXUMqHP9a-V$;9y!<1Msa=R7gycHk1 z1FOJ#YPzh%*k9Vdn~3mYa=0AiZoE?GEy)0gIav?)An*dPJENsBS$*iM%OvA{^hDc- z;TxE8e4%rn$Jt*lRKD{#jhPrtI$XURx4nr^echLzpT!*c-Z`>zXNmG0M>k*tpJIS2 z8%iud92tj0+rh#hOdL$+-|kGwSUO2ScGN~4Y$ljoe4!%~;h#*~2{)qr{HbpnY^3$e zqvA9YOZeT~IzP^2+&!YDo1!!=Dj57c+@Lh4GX7Kb? z&Ar}Z-*zmxI+#N+y)A-H^mu~X=dn(JM^nAMXu%$fyY?%_wp)k?HYg65Yz!iSD^j_q z2Lm~zpg#GyGDWj&-)s`kQ?bcVz};BsN2Mxn1!J*>jw~MHK-9suw=DuA!br>0va>+% zQE{wdS!+5DjInZbuGmFXsv>Fgh)+=@YjPwABhGI~t;a{KAJCr3Y<73NuR5Fnh=^TQ zR7K>R&sUC#@OsVN6>VPSU2!V)xjHfBd70xC;rIg35TApQKtl8_TniR9G)P3+o2i$=Ew$Dtz3px6E>9O`T%;By>^s(z)RfHoAbhVfsm)B;)|k9$GcjEsx#^`68FC zy0mmkI+Orp8h~V_9YrRul(auQ65`5Qor`qT&NZv-!0_vT_8iBAOGgOgQiqxi;KbT} z0R_^rJ7JDfk`5-gzvW=h$edK~l{mNVy;|xnM_9Mm@bGL!9DvFu>(3@Lq zGozj+A1~UyNrEArpMEwmGyw<>hV^_a*DYtqec)#Y z#)#H4av43JZug;D1(=D&%>`7NtFRZ|(imrSL>b+UE5e`iHF)N$8hQ6OnIEH`%)_P2 zZKllVz0F5WzkTC@KZT-XFkbr4_;d4jjr{I$fJ%ctyPf7jNs|+Ll!v>_ok>WLp--7a z)9}lP)Bpetd)kXyU!P#lH^qGqqKSOnn7_VgcC6n%UolC=$N|vmravq*Od^8kxR{Of ze1?TpGm#6?3~u8sd}LEBg73r0EV0oT8n_iygX4NkS%E)$aYy`e)yikqU*BcM2(P|U zJZrRQmjjc#Fh~QpCOEdE)V%eP%lVBe5x!yQbLY8DrUvx+xp>xcdpVfK+VgT{GkNReiWO_b_ZE^D z)mqdAe!?Cc>5S9mbL;*kAzC)Cf_jyXWW}Z&piPkDnxl)=3Ac^|Re?;dzJcB&lSan) z0&@|nMn4fBm)z+`+y1a*9JgEuXcr&gdRroIq=IO+>iiI+OjFykd@7mD9_i2gt%RF3 ze|V4Zuo>52BzS1vxV{wAr2(=~dCXBd6{|02rIgW$wVOnI5`e`3l(om~%=h~ Date: Wed, 3 Apr 2024 15:05:40 +0200 Subject: [PATCH 36/40] Parametrize CLI test --- tests/test_evaluate.py | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index 516d9ec7bf..fc59676b85 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -1,32 +1,23 @@ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. -import sys import os +import shutil +import subprocess +import sys +from contextlib import redirect_stdout from dataclasses import asdict +from io import StringIO from pathlib import Path from unittest import mock -import litgpt.eval.evaluate as module -from contextlib import redirect_stdout -from io import StringIO -import shutil -import subprocess import datasets import pytest -import yaml import torch +import yaml - +import litgpt.eval.evaluate as module from litgpt import GPT, Config - from litgpt.scripts.download import download_from_hub -from litgpt.eval.evaluate import safe_safetensors, prepare_results -from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint -from lm_eval import evaluator - -# support running without installing as a package -wd = Path(__file__).parent.parent.resolve() -sys.path.append(str(wd)) @pytest.mark.xfail( @@ -62,8 +53,13 @@ def test_evaluate_script(tmp_path, monkeypatch): assert "Metric" in stdout_out -def test_cli(fake_checkpoint_dir): - cli_path = Path(__file__).parent.parent / "litgpt" / "eval" / "evaluate.py" - output = subprocess.check_output([sys.executable, cli_path, "-h"]) +@pytest.mark.parametrize("mode", ["file", "entrypoint"]) +def test_cli(mode): + if mode == "file": + cli_path = Path(__file__).parent.parent / "litgpt/eval/evaluate.py" + args = [sys.executable, cli_path, "-h"] + else: + args = ["litgpt", "evaluate", "-h"] + output = subprocess.check_output(args) output = str(output.decode()) - assert "evaluate" in output + assert "run the LM Evaluation Harness" in output From b53b6887883631a980924b0ee9688bf1bae8d1bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Wed, 3 Apr 2024 15:19:17 +0200 Subject: [PATCH 37/40] Minor fixes --- tests/test_evaluate.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/test_evaluate.py b/tests/test_evaluate.py index fc59676b85..94b90b4551 100644 --- a/tests/test_evaluate.py +++ b/tests/test_evaluate.py @@ -29,8 +29,8 @@ def test_evaluate_script(tmp_path, monkeypatch): ours_config = Config.from_name("pythia-14m") download_from_hub(repo_id="EleutherAI/pythia-14m", tokenizer_only=True, checkpoint_dir=tmp_path) - shutil.move(tmp_path / "EleutherAI" / "pythia-14m" / "tokenizer.json", tmp_path) - shutil.move(tmp_path / "EleutherAI" / "pythia-14m" / "tokenizer_config.json", tmp_path) + shutil.move(str(tmp_path / "EleutherAI" / "pythia-14m" / "tokenizer.json"), str(tmp_path)) + shutil.move(str(tmp_path / "EleutherAI" / "pythia-14m" / "tokenizer_config.json"), str(tmp_path)) ours_model = GPT(ours_config) checkpoint_path = tmp_path / "lit_model.pth" torch.save(ours_model.state_dict(), checkpoint_path) @@ -46,11 +46,11 @@ def test_evaluate_script(tmp_path, monkeypatch): tasks="mathqa" ) stdout = StringIO() - with redirect_stdout(stdout), mock.patch("sys.argv", [Path("eval") / "evaluate.py"]): + with redirect_stdout(stdout), mock.patch("sys.argv", ["eval/evaluate.py"]): module.convert_and_evaluate(**fn_kwargs) - stdout_out = stdout.getvalue() - assert "mathqa" in stdout_out - assert "Metric" in stdout_out + stdout = stdout.getvalue() + assert "mathqa" in stdout + assert "Metric" in stdout @pytest.mark.parametrize("mode", ["file", "entrypoint"]) From 887ff614761116eddb20c9f2b6c60a843a9f260f Mon Sep 17 00:00:00 2001 From: Sebastian Raschka Date: Wed, 3 Apr 2024 14:34:01 -0500 Subject: [PATCH 38/40] Update evaluation.md --- tutorials/evaluation.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tutorials/evaluation.md b/tutorials/evaluation.md index 4a85e4fe0f..1285a473a8 100644 --- a/tutorials/evaluation.md +++ b/tutorials/evaluation.md @@ -54,15 +54,18 @@ The resulting output is as follows: Please note that the `litgpt evaluate` command run an internal model conversion. -This is only necessary the first time you want to evaluate a model. To skip the conversion, -when you want to evaluate a model a second time, you can pass the `--skip_conversion true` argument: +This is only necessary the first time you want to evaluate a model, and it will skip the +conversion steps if you run the `litgpt evaluate` on the same checkpint directory again. + +In some cases, for example, if you modified the model in the `checkpoint_dir` since the first `litgpt evaluate` +call, you need to use the `--force_conversion` flag to to update the files used by litgpt evaluate accordingly: ``` litgpt evaluate \ --checkpoint_dir checkpoints/microsoft/phi-2/ \ --batch_size 4 \ --out_dir evaluate_model/ \ - --skip_conversion true + --force_conversion true ```   From 5a944d26aa109070f1dc8b4d25a7e0fb2f58682b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 4 Apr 2024 18:27:25 +0200 Subject: [PATCH 39/40] Apply suggestions from code review --- litgpt/eval/evaluate.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/litgpt/eval/evaluate.py b/litgpt/eval/evaluate.py index 74566f77cb..1b929897d5 100644 --- a/litgpt/eval/evaluate.py +++ b/litgpt/eval/evaluate.py @@ -11,7 +11,7 @@ from litgpt.utils import CLI, copy_config_files -def safe_safetensors(out_dir, repo_id): +def save_safetensors(out_dir, repo_id): from transformers import AutoModel state_dict = torch.load(out_dir/"model.pth") @@ -92,7 +92,7 @@ def convert_and_evaluate( safetensors_path = out_dir / "model.safetensors" if not safetensors_path.exists() or force_conversion: - safe_safetensors(out_dir, repo_id) + save_safetensors(out_dir, repo_id) os.environ["TOKENIZERS_PARALLELISM"] = "false" From efb6ca474f394dab456b419483a50e370e8cbd35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 4 Apr 2024 18:28:31 +0200 Subject: [PATCH 40/40] Update tutorials/evaluation.md --- tutorials/evaluation.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tutorials/evaluation.md b/tutorials/evaluation.md index 1285a473a8..81cd4f0fd9 100644 --- a/tutorials/evaluation.md +++ b/tutorials/evaluation.md @@ -72,8 +72,7 @@ litgpt evaluate \ > [!TIP] > By default, `ligpt evaluate` will evaluate a model on 3 tasks -> to the setting `--tasks -> "hellaswag,truthfulqa_mc2,mmlu"`. +> to the setting `--tasks "hellaswag,truthfulqa_mc2,mmlu"`. > [!TIP] > The evaluation may take a long time, and for testing purpoes, you may want to reduce the number of tasks